Exemple #1
0
 def test_event_hub(self, ssh_con, test_vars):  # noqa: F811
     log = logging.getLogger("test_event_hub")
     atd = test_vars["atd_obj"]
     atd.template = requests.get(
         url=
         'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/301-eventHub-create-authrule-namespace-and-eventHub/azuredeploy.json'
     ).json()
     eh_name = "eh-" + atd.deploy_id
     atd.deploy_params = {
         "namespaceName": eh_name + "-ns",
         "namespaceAuthorizationRuleName": eh_name + "-nsar",
         "eventHubName": eh_name,
         "eventhubAuthorizationRuleName": eh_name + "-ehar",
         "eventhubAuthorizationRuleName1": eh_name + "-ehar1",
         "consumerGroupName": eh_name + "-cgn",
     }
     atd.deploy_name = "test_event_hub"
     log.debug('Generated deploy parameters: \n{}'.format(
         json.dumps(atd.deploy_params, indent=4)))
     deploy_result = wait_for_op(atd.deploy())
     test_vars["deploy_eh_outputs"] = deploy_result.properties.outputs
     log.debug(test_vars["deploy_eh_outputs"])
     policy_primary_key = test_vars["deploy_eh_outputs"][
         "eventHubSharedAccessPolicyPrimaryKey"]["value"]
     log.debug("policy_primary_key = {}".format(policy_primary_key))
     commands = """
         export AZURE_EVENTHUB_SENDERKEYNAME="RootManageSharedAccessKey"
         export AZURE_EVENTHUB_SENDERKEY={0}
         export AZURE_EVENTHUB_NAMESPACENAME="edasimeventhub2"
         """.format(policy_primary_key).split("\n")
     run_ssh_commands(ssh_con, commands)
     test_vars[
         "cmd2"] = "AZURE_EVENTHUB_SENDERKEYNAME=\"RootManageSharedAccessKey\" AZURE_EVENTHUB_SENDERKEY=\"{}\" AZURE_EVENTHUB_NAMESPACENAME=\"edasimeventhub2\"".format(
             policy_primary_key)
Exemple #2
0
 def test_vdbench_run(self, test_vars):  # noqa: F811
     log = logging.getLogger("test_vdbench_run")
     node_ip = test_vars["deploy_vd_outputs"]["node_0_ip_address"]["value"]
     with SSHTunnelForwarder(
             test_vars["public_ip"],
             ssh_username=test_vars["controller_user"],
             ssh_pkey=test_vars["ssh_priv_key"],
             remote_bind_address=(node_ip, 22),
     ) as ssh_tunnel:
         sleep(1)
         try:
             ssh_client = create_ssh_client(
                 test_vars["controller_user"],
                 "127.0.0.1",
                 ssh_tunnel.local_bind_port,
                 key_filename=test_vars["ssh_priv_key"])
             scp_client = SCPClient(ssh_client.get_transport())
             try:
                 scp_client.put(test_vars["ssh_priv_key"], r"~/.ssh/id_rsa")
             finally:
                 scp_client.close()
             commands = """
                 ~/copy_idrsa.sh
                 cd
                 ./run_vdbench.sh inmem32node3.conf uniquestring1
                 """.split("\n")
             run_ssh_commands(ssh_client, commands)
         finally:
             ssh_client.close()
Exemple #3
0
def mnt_nodes(ssh_con, test_vars):
    if ("storage_account"
            not in test_vars) or (not test_vars["storage_account"]):
        return

    check = run_ssh_command(ssh_con,
                            "ls ~/STATUS.NODES_MOUNTED",
                            ignore_nonzero_rc=True,
                            timeout=30)
    if check['rc']:  # nodes were not already mounted
        # Update needed packages.
        commands = ["sudo apt-get update", "sudo apt-get install nfs-common"]
        run_ssh_commands(ssh_con, commands, timeout=600)

        # Set up mount points and /etc/fstab.
        commands = []
        for i, vs_ip in enumerate(test_vars["cluster_vs_ips"]):
            commands.append("sudo mkdir -p /nfs/node{}".format(i))
            commands.append("sudo chown nobody:nogroup /nfs/node{}".format(i))
            fstab_line = "{}:/msazure /nfs/node{} nfs ".format(vs_ip, i) + \
                         "hard,nointr,proto=tcp,mountproto=tcp,retry=30 0 0"
            commands.append(
                "sudo sh -c 'echo \"{}\" >> /etc/fstab'".format(fstab_line))
        run_ssh_commands(ssh_con, commands, timeout=30)

        # Mount the nodes.
        run_ssh_command(ssh_con, "sudo mount -a", timeout=300)
        run_ssh_command(ssh_con, "touch ~/STATUS.NODES_MOUNTED", timeout=30)
Exemple #4
0
 def test_download_go(self, ssh_con):  # noqa: F811
     commands = """
         sudo apt -y install golang-go
         mkdir -p ~/gopath
         echo "export GOPATH=$HOME/gopath" >> ~/.profile
         echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile
         source ~/.profile && cd $GOPATH && go get -v github.com/Azure/Avere/src/go/...
         """.split("\n")
     run_ssh_commands(ssh_con, commands)
Exemple #5
0
 def test_vdbench_setup(self, mnt_nodes, ssh_con):  # noqa: F811
     log = logging.getLogger("test_vdbench_setup")
     commands = """
         sudo mkdir -p /nfs/node0/bootstrap
         cd /nfs/node0/bootstrap
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.vdbench.sh https://raw.githubusercontent.com/Azure/Avere/master/src/clientapps/vdbench/bootstrap.vdbench.sh
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/vdbench50407.zip https://avereimageswestus.blob.core.windows.net/vdbench/vdbench50407.zip
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/vdbenchVerify.sh https://raw.githubusercontent.com/Azure/Avere/master/src/clientapps/vdbench/vdbenchVerify.sh
         sudo chmod +x /nfs/node0/bootstrap/vdbenchVerify.sh
         /nfs/node0/bootstrap/vdbenchVerify.sh
         """.split("\n")
     run_ssh_commands(ssh_con, commands)
Exemple #6
0
 def test_vdbench_setup(self, mnt_nodes, ssh_con):  # noqa: F811
     log = logging.getLogger("test_vdbench_setup")
     vdbench_url = os.environ.get("VDBENCH_URL", "http://localhost")
     commands = """
         sudo mkdir -p /nfs/node0/bootstrap
         cd /nfs/node0/bootstrap
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.vdbench.sh https://raw.githubusercontent.com/Azure/Avere/main/src/clientapps/vdbench/bootstrap.vdbench.sh
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/vdbench50407.zip '{0}'
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/vdbenchVerify.sh https://raw.githubusercontent.com/Azure/Avere/main/src/clientapps/vdbench/vdbenchVerify.sh
         sudo chmod +x /nfs/node0/bootstrap/vdbenchVerify.sh
         /nfs/node0/bootstrap/vdbenchVerify.sh
         """.format(vdbench_url).split("\n")
     run_ssh_commands(ssh_con, commands)
Exemple #7
0
 def test_storage_account(self, resource_group, ssh_con,
                          test_vars):  # noqa: F811, E501
     log = logging.getLogger("test_storage_account")
     atd = test_vars["atd_obj"]
     storage_account = test_vars["storage_account"]
     storage_keys = atd.st_client.storage_accounts.list_keys(
         resource_group.name, storage_account)
     storage_keys = {v.key_name: v.value for v in storage_keys.keys}
     key = storage_keys['key1']
     log.debug("storage_account = {}".format(storage_account))
     log.debug("key = {}".format(key))
     commands = """
         export AZURE_STORAGE_ACCOUNT= {0}
         export AZURE_STORAGE_ACCOUNT_KEY={1}
         """.format(storage_account, key).split("\n")
     run_ssh_commands(ssh_con, commands)
     test_vars[
         "cmd1"] = "AZURE_STORAGE_ACCOUNT=\"{}\" AZURE_STORAGE_ACCOUNT_KEY=\"{}\" ".format(
             storage_account, key)
Exemple #8
0
    def test_basic_fileops(self, mnt_nodes, scp_con, ssh_con,
                           test_vars):  # noqa: E501, F811
        """
        Quick check of file operations.
        See check_node_basic_fileops.sh for more information.
        """
        if ("storage_account"
                not in test_vars) or (not test_vars["storage_account"]):
            pytest.skip("no storage account")

        script_name = "check_node_basic_fileops.sh"
        scp_con.put(
            "{0}/test/{1}".format(test_vars["build_root"], script_name),
            r"~/.",
        )
        commands = """
            chmod +x {0}
            ./{0}
            """.format(script_name).split("\n")
        run_ssh_commands(ssh_con, commands)
Exemple #9
0
 def test_edasim_setup(self, mnt_nodes, ssh_con):  # noqa: F811
     commands = """
         sudo mkdir -p /nfs/node0/bootstrap
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.jobsubmitter.sh https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/bootstrap.jobsubmitter.sh
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.orchestrator.sh https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/bootstrap.orchestrator.sh
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.onpremjobuploader.sh https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/bootstrap.onpremjobuploader.sh
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/bootstrap.worker.sh https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/bootstrap.worker.sh
         sudo mkdir -p /nfs/node0/bootstrap/edasim
         source ~/.profile && sudo cp $GOPATH/bin/* /nfs/node0/bootstrap/edasim
         sudo mkdir -p /nfs/node0/bootstrap/rsyslog
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/rsyslog/33-jobsubmitter.conf https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/rsyslog/33-jobsubmitter.conf
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/rsyslog/30-orchestrator.conf https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/rsyslog/30-orchestrator.conf
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/rsyslog/31-worker.conf https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/rsyslog/31-worker.conf
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/rsyslog/32-onpremjobuploader.conf https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/rsyslog/32-onpremjobuploader.conf
         sudo mkdir -p /nfs/node0/bootstrap/systemd
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/systemd/jobsubmitter.service https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/systemd/jobsubmitter.service
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/systemd/onpremjobuploader.service https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/systemd/onpremjobuploader.service
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/systemd/orchestrator.service https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/systemd/orchestrator.service
         sudo curl --retry 5 --retry-delay 5 -o /nfs/node0/bootstrap/systemd/worker.service https://raw.githubusercontent.com/Azure/Avere/main/src/go/cmd/edasim/deploymentartifacts/bootstrap/systemd/worker.service
         """.split("\n")
     run_ssh_commands(ssh_con, commands)
Exemple #10
0
    def test_client_docker_run(self, test_vars):  # noqa: F811
        log = logging.getLogger("test_client_docker_run")
        node_ip = test_vars["deploy_client_docker_outputs"]["node_0_ip_address"]["value"]
        atd = test_vars["atd_obj"]
        cluster_mgmt_ip = test_vars["cluster_mgmt_ip"]
        with SSHTunnelForwarder(
            test_vars["public_ip"],
            ssh_username=test_vars["controller_user"],
            ssh_pkey=test_vars["ssh_priv_key"],
            remote_bind_address=(node_ip, 22),
        ) as ssh_tunnel:
            sleep(1)
            try:
                ssh_client = create_ssh_client(
                    test_vars["controller_user"],
                    "127.0.0.1",
                    ssh_tunnel.local_bind_port,
                    key_filename=test_vars["ssh_priv_key"]
                )
                scp_client = SCPClient(ssh_client.get_transport())
                try:
                    scp_client.put(test_vars["ssh_priv_key"], r"~/.ssh/id_rsa")
                finally:
                    scp_client.close()
                commands = """
                    cd
                    curl -fsSL https://get.docker.com -o get-docker.sh
                    sudo sh get-docker.sh
                    sudo docker login https://{0} -u {1} -p {2}
                    sudo docker pull {0}/test

                    echo "export STORAGEACT='{3}'" >> ~/.bashrc
                    echo "export MGMIP='{4}'" >> ~/.bashrc
                    echo "export SA_KEY='{5}'" >> ~/.bashrc
                    echo "export CLUSTER_MGMT_IP='{6}'" >> ~/.bashrc
                    echo "export ADMIN_PW='{7}'" >> ~/.bashrc
                    """.format(os.environ["dockerRegistry"], os.environ["dockerUsername"], os.environ["dockerPassword"], atd.deploy_id + "sa", test_vars["public_ip"], os.environ["SA_KEY"], cluster_mgmt_ip, os.environ["AVERE_ADMIN_PW"]).split("\n")
                run_ssh_commands(ssh_client, commands)
            finally:
                ssh_client.close()
Exemple #11
0
def mnt_nodes(ssh_con, test_vars):
    if ("storage_account"
            not in test_vars) or (not test_vars["storage_account"]):
        return

    check = run_ssh_command(ssh_con,
                            "ls ~/STATUS.NODES_MOUNTED",
                            ignore_nonzero_rc=True)
    if check['rc']:  # nodes were not already mounted
        commands = """
            sudo apt-get update
            sudo apt-get install nfs-common
            """.split("\n")
        for i, vs_ip in enumerate(test_vars["cluster_vs_ips"]):
            commands.append("sudo mkdir -p /nfs/node{}".format(i))
            commands.append("sudo chown nobody:nogroup /nfs/node{}".format(i))
            fstab_line = "{}:/msazure /nfs/node{} nfs ".format(vs_ip, i) + \
                         "hard,nointr,proto=tcp,mountproto=tcp,retry=30 0 0"
            commands.append(
                "sudo sh -c 'echo \"{}\" >> /etc/fstab'".format(fstab_line))
        commands.append("sudo mount -a")
        commands.append("touch ~/STATUS.NODES_MOUNTED")
        run_ssh_commands(ssh_con, commands)
Exemple #12
0
 def test_ping_vservers(self, ssh_con, test_vars):  # noqa: F811
     """Ping all of the vserver IPs from the controller."""
     commands = []
     for vs_ip in test_vars["cluster_vs_ips"]:
         commands.append("ping -c 3 {}".format(vs_ip))
     run_ssh_commands(ssh_con, commands)
Exemple #13
0
def mnt_nodes(ssh_con, test_vars):
    if ("storage_account"
            not in test_vars) or (not test_vars["storage_account"]):
        return

    log = logging.getLogger("mnt_nodes")
    check = run_ssh_command(ssh_con,
                            "ls ~/STATUS.NODES_MOUNTED",
                            ignore_nonzero_rc=True,
                            timeout=30)
    if check['rc']:  # nodes were not already mounted
        # Update needed packages.
        commands = ["sudo apt-get update", "sudo apt-get install nfs-common"]
        run_ssh_commands(ssh_con, commands, timeout=600)

        # Set up mount points and /etc/fstab.
        commands = []
        for i, vs_ip in enumerate(test_vars["cluster_vs_ips"]):
            commands.append("sudo mkdir -p /nfs/node{}".format(i))
            commands.append("sudo chown nobody:nogroup /nfs/node{}".format(i))
            fstab_line = "{}:/msazure /nfs/node{} nfs ".format(vs_ip, i) + \
                         "hard,nointr,proto=tcp,mountproto=tcp,retry=5 0 0"
            commands.append(
                "sudo sh -c 'echo \"{}\" >> /etc/fstab'".format(fstab_line))
        run_ssh_commands(ssh_con, commands, timeout=30)

        # Mount the nodes.
        def _log_diag(in_str):
            log.info(json.dumps(in_str, indent=4).replace("\\n", "\n"))

        try:
            commands = """
                sudo mount -av
                touch ~/STATUS.NODES_MOUNTED
            """.split("\n")
            _log_diag(run_ssh_commands(ssh_con, commands, timeout=300))
        except Exception as e:
            # Show some diag info.
            log.info("Exception caught when attempting to mount. Diag info:")
            diag_commands = """
                cat /etc/mtab
                nfsstat
                sudo ufw status
                service portmap status
                sudo iptables -n -L -v
                netstat -rn
            """.split("\n")
            _log_diag(
                run_ssh_commands(ssh_con,
                                 diag_commands,
                                 ignore_nonzero_rc=True))
            for vs_ip in test_vars["cluster_vs_ips"]:
                _log_diag(
                    run_ssh_command(ssh_con,
                                    "rpcinfo -p " + vs_ip,
                                    ignore_nonzero_rc=True))

                run_ssh_command(ssh_con,
                                "sudo apt -y install traceroute nmap",
                                ignore_nonzero_rc=True)
                _log_diag(
                    run_ssh_command(ssh_con,
                                    "traceroute " + vs_ip,
                                    ignore_nonzero_rc=True))
                _log_diag(
                    run_ssh_command(ssh_con,
                                    "sudo nmap -sS " + vs_ip,
                                    ignore_nonzero_rc=True))
            raise
Exemple #14
0
 def test_ping_node_ips(self, node_ips, ssh_con, test_vars):  # noqa: F811
     """Ping the node IPs from the controller."""
     commands = []
     for node_ip in node_ips:
         commands.append("ping -c 3 {}".format(node_ip))
     run_ssh_commands(ssh_con, commands)
Exemple #15
0
    def test_update_reg_clients_hosts(self, test_vars):
        """
        Updates /etc/hosts on the STAF clients so they can contact the STAF
        server.
        """
        log = logging.getLogger("test_update_reg_clients_hosts")
        atd = test_vars["atd_obj"]
        commands = """
            cp /etc/hosts .
            echo ' '                >> hosts
            echo '# STAF server IP' >> hosts
            echo '{0} staf'         >> hosts
            sudo mv hosts /etc/hosts
            echo '#!/bin/bash' > ~/hostdb_entries.sh
            chmod 755 ~/hostdb_entries.sh
            echo "cd ~/Avere-sv" >> ~/hostdb_entries.sh
            echo "source /usr/sv/env/bin/activate" >> ~/hostdb_entries.sh
            echo "export PYTHONPATH=~/Avere-sv:~/Avere-sv/averesv:$PYTHONPATH:$PATH" >> ~/hostdb_entries.sh
            echo "averesv/hostdb.py -a vfxt -m {1} -p '{2}'" >> ~/hostdb_entries.sh
        """.format(test_vars["staf_server_priv_ip"],
                   test_vars["cluster_mgmt_ip"],
                   os.environ["AVERE_ADMIN_PW"]).split("\n")

        # Add hostdb entry calls for each regression client.
        for i, staf_client_ip in enumerate(test_vars["staf_client_priv_ips"]):
            commands.append(
                "echo 'averesv/hostdb.py -L regclient{0} -m {1}' >> ~/hostdb_entries.sh"
                .format(i, staf_client_ip))

        # Get the storage account's access key and add that hostdb entry, too.
        sa_key = atd.st_client.storage_accounts.list_keys(
            atd.resource_group, test_vars["storage_account"]).keys[0].value
        commands.append(
            "echo 'averesv/hostdb.py -s {0}.blob.core.windows.net -m {0}.blob.core.windows.net -M az --cloudCreds \"{0}::{1}\"' >> ~/hostdb_entries.sh"
            .format(test_vars["storage_account"], sa_key))

        last_error = None
        for staf_client_ip in test_vars["staf_client_priv_ips"]:
            for port_attempt in range(1, 11):
                tunnel_local_port = get_unused_local_port()
                with Connection(test_vars["public_ip"],
                                user=test_vars["controller_user"],
                                connect_kwargs={
                                    "key_filename": test_vars["ssh_priv_key"],
                                }).forward_local(local_port=tunnel_local_port,
                                                 remote_port=22,
                                                 remote_host=staf_client_ip):
                    node_c = Connection("127.0.0.1",
                                        user=test_vars["controller_user"],
                                        port=tunnel_local_port,
                                        connect_kwargs={
                                            "key_filename":
                                            test_vars["ssh_priv_key"],
                                        })
                    try:
                        node_c.open()

                        # If port_attempt > 1, last_error had the exception
                        # from the last iteration. Clear it.
                        last_error = None
                    except NoValidConnectionsError as ex:
                        last_error = ex
                        exp_err = "Unable to connect to port {} on 127.0.0.1".format(
                            tunnel_local_port)
                        if exp_err not in str(ex):
                            raise
                        else:
                            log.warning("{0} (attempt #{1}, retrying)".format(
                                exp_err, str(port_attempt)))
                            continue  # iterate

                    run_ssh_commands(node_c.client, commands)

                    # Copy SSH keys to the client.
                    scp_cli = SCPClient(node_c.transport)
                    scp_cli.put(test_vars["ssh_priv_key"], "~/.ssh/id_rsa")
                    scp_cli.put(test_vars["ssh_pub_key"], "~/.ssh/id_rsa.pub")
                    scp_cli.close()
                log.debug("Connection to {} closed".format(staf_client_ip))
                break  # no need to iterate again

            if last_error:
                log.error(
                    "See previous error(s) above. Raising last exception.")
                raise last_error