Esempio n. 1
0
 def test_event_hub(self, ssh_con, test_vars):  # noqa: F811
     log = logging.getLogger("test_event_hub")
     atd = test_vars["atd_obj"]
     atd.template = requests.get(
         url=
         'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/301-eventHub-create-authrule-namespace-and-eventHub/azuredeploy.json'
     ).json()
     eh_name = "eh-" + atd.deploy_id
     atd.deploy_params = {
         "namespaceName": eh_name + "-ns",
         "namespaceAuthorizationRuleName": eh_name + "-nsar",
         "eventHubName": eh_name,
         "eventhubAuthorizationRuleName": eh_name + "-ehar",
         "eventhubAuthorizationRuleName1": eh_name + "-ehar1",
         "consumerGroupName": eh_name + "-cgn",
     }
     atd.deploy_name = "test_event_hub"
     log.debug('Generated deploy parameters: \n{}'.format(
         json.dumps(atd.deploy_params, indent=4)))
     deploy_result = wait_for_op(atd.deploy())
     test_vars["deploy_eh_outputs"] = deploy_result.properties.outputs
     log.debug(test_vars["deploy_eh_outputs"])
     policy_primary_key = test_vars["deploy_eh_outputs"][
         "eventHubSharedAccessPolicyPrimaryKey"]["value"]
     log.debug("policy_primary_key = {}".format(policy_primary_key))
     commands = """
         export AZURE_EVENTHUB_SENDERKEYNAME="RootManageSharedAccessKey"
         export AZURE_EVENTHUB_SENDERKEY={0}
         export AZURE_EVENTHUB_NAMESPACENAME="edasimeventhub2"
         """.format(policy_primary_key).split("\n")
     run_ssh_commands(ssh_con, commands)
     test_vars[
         "cmd2"] = "AZURE_EVENTHUB_SENDERKEYNAME=\"RootManageSharedAccessKey\" AZURE_EVENTHUB_SENDERKEY=\"{}\" AZURE_EVENTHUB_NAMESPACENAME=\"edasimeventhub2\"".format(
             policy_primary_key)
Esempio n. 2
0
def ext_vnet(test_vars):
    """
    Creates a resource group containing a new VNET, subnet, public IP, and
    jumpbox for use in other tests.
    """
    log = logging.getLogger("ext_vnet")
    vnet_atd = ArmTemplateDeploy(
        location=test_vars["location"],
        resource_group=test_vars["atd_obj"].deploy_id + "-rg-vnet")
    rg = vnet_atd.create_resource_group()
    log.info("Resource Group: {}".format(rg))

    vnet_atd.deploy_name = "ext_vnet"
    with open("{}/src/vfxt/azuredeploy.vnet.json".format(
            test_vars["build_root"])) as tfile:
        vnet_atd.template = json.load(tfile)

    with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
        ssh_pub_key = ssh_pub_f.read()

    vnet_atd.deploy_params = {
        "uniqueName": test_vars["atd_obj"].deploy_id,
        "jumpboxAdminUsername": "******",
        "jumpboxSSHKeyData": ssh_pub_key
    }
    test_vars["ext_vnet"] = wait_for_op(vnet_atd.deploy()).properties.outputs
    log.debug(test_vars["ext_vnet"])
    return test_vars["ext_vnet"]
Esempio n. 3
0
    def test_no_storage_account_deploy(self, resource_group,
                                       test_vars):  # noqa: E501, F811
        """
        Deploy a vFXT cluster.
          - create a new VNET
          - do NOT use an Avere-backed storage account
        """
        log = logging.getLogger("test_no_storage_account_deploy")
        atd = test_vars["atd_obj"]
        with open("{}/src/vfxt/azuredeploy-auto.json".format(
                test_vars["build_root"])) as tfile:
            atd.template = json.load(tfile)
        with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
            ssh_pub_key = ssh_pub_f.read()
        atd.deploy_params = {
            "adminPassword": os.environ["AVERE_ADMIN_PW"],
            "avereClusterName": atd.deploy_id + "-cluster",
            "avereInstanceType": "Standard_E32s_v3",
            "avereNodeCount": 3,
            "controllerAdminUsername": "******",
            "controllerAuthenticationType": "sshPublicKey",
            "controllerName": atd.deploy_id + "-con",
            "controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
            "controllerSSHKeyData": ssh_pub_key,
            "enableCloudTraceDebugging": True,
            "rbacRoleAssignmentUniqueId": str(uuid4()),
            "createVirtualNetwork": True,
            "virtualNetworkName": atd.deploy_id + "-vnet",
            "virtualNetworkResourceGroup": atd.resource_group,
            "virtualNetworkSubnetName": atd.deploy_id + "-subnet",
            "useAvereBackedStorageAccount": False,
            "avereBackedStorageAccountName": atd.deploy_id + "sa",  # BUG
        }

        if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
            atd.deploy_params["controllerImageReferenceId"] = os.environ[
                "VFXT_CONTROLLER_IMG_REF_ID"]

        test_vars["controller_name"] = atd.deploy_params["controllerName"]
        test_vars["controller_user"] = atd.deploy_params[
            "controllerAdminUsername"]
        log.debug("Generated deploy parameters: \n{}".format(
            json.dumps(atd.deploy_params, indent=4)))

        atd.deploy_name = "test_no_storage_account_deploy"
        try:
            deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
            test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
            test_vars["cluster_vs_ips"] = split_ip_range(
                deploy_outputs["vserver_ips"]["value"])
            time.sleep(60)
        finally:
            # (c_priv_ip, c_pub_ip) = get_vm_ips(
            #     atd.nm_client, atd.resource_group, test_vars["controller_name"])
            # test_vars["controller_ip"] = c_pub_ip or c_priv_ip
            test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
                atd.resource_group,
                "publicip-" + test_vars["controller_name"]).ip_address
            test_vars["controller_ip"] = test_vars["public_ip"]
Esempio n. 4
0
 def test_client_docker_deploy(self, test_vars):  # noqa: F811
     log = logging.getLogger("test_vdbench_deploy")
     atd = test_vars["atd_obj"]
     with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
         ssh_pub_key = ssh_pub_f.read()
     with open("{}/src/client/vmas/azuredeploy.json".format(
               test_vars["build_root"])) as tfile:
         atd.template = json.load(tfile)
     atd.deploy_params = {
         "uniquename": atd.deploy_id,
         "sshKeyData": ssh_pub_key,
         "virtualNetworkResourceGroup": atd.resource_group,
         "virtualNetworkName": atd.deploy_id + "-vnet",
         "virtualNetworkSubnetName": atd.deploy_id + "-subnet",
         "nfsCommaSeparatedAddresses": ",".join(test_vars["cluster_vs_ips"]),
         "vmCount": 1,
         "nfsExportPath": "/msazure",
         "bootstrapScriptPath": "/bootstrap/bootstrap.vdbench.sh",
     }
     atd.deploy_name = "test_client_docker"
     deploy_result = wait_for_op(atd.deploy())
     test_vars["deploy_client_docker_outputs"] = deploy_result.properties.outputs
Esempio n. 5
0
 def test_edasim_deploy(self, test_vars):  # noqa: F811
     atd = test_vars["atd_obj"]
     with open(test_vars["ssh_pub_key"], 'r') as ssh_pub_f:
         ssh_pub_key = ssh_pub_f.read()
     with open(
             "{}/src/go/cmd/edasim/deploymentartifacts/template/azuredeploy.json"
             .format(test_vars["build_root"])) as tfile:
         atd.template = json.load(tfile)
     atd.deploy_params = {
         "secureAppEnvironmentVariables":
         test_vars["cmd1"] + test_vars["cmd2"],
         "uniquename": atd.deploy_id,
         "sshKeyData": ssh_pub_key,
         "virtualNetworkResourceGroup": atd.resource_group,
         "virtualNetworkName": atd.deploy_id + "-vnet",
         "virtualNetworkSubnetName": atd.deploy_id + "-subnet",
         "nfsCommaSeparatedAddresses":
         ",".join(test_vars["cluster_vs_ips"]),
         "nfsExportPath": "/msazure",
     }
     atd.deploy_name = "test_edasim_deploy"
     deploy_result = wait_for_op(atd.deploy())
     test_vars["deploy_edasim_outputs"] = deploy_result.properties.outputs
Esempio n. 6
0
    def test_reg_clients_deploy(self, test_vars):  # noqa: F811
        """
        Deploys <num_vms> VM clients for vFXT regression testing.
        <num_vms> must be at least 1.
          * the first VM is a STAF server
          * the next (<num_vms> - 1) VMs are STAF clients
        All regression client VMs can run SV.
        """
        log = logging.getLogger("test_reg_clients_deploy")
        atd = test_vars["atd_obj"]

        num_vms = -1  # number of regression VMs (-1 initially, 1 by default)
        if "NUM_REG_VMS" in os.environ:
            num_vms = int(os.environ["NUM_REG_VMS"])
        if "num_reg_vms" in test_vars:
            num_vms = int(test_vars["num_reg_vms"])
        if num_vms < 1:
            if num_vms != -1:  # user set the value incorrectly; enforce min
                log.warning("Number of VMs must be > 0. Setting to 1.")
            num_vms = 1
        log.info("Deploying {} regression VMs".format(num_vms))

        with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
            ssh_pub_key = ssh_pub_f.read()
        with open("{}/src/client/vmas/azuredeploy.json".format(
                test_vars["build_root"])) as tfile:
            atd.template = json.load(tfile)

        staf_server_unique_name = atd.deploy_id + "-rc-staf"
        atd.deploy_params = {
            "uniquename": staf_server_unique_name,
            "sshKeyData": ssh_pub_key,
            "virtualNetworkResourceGroup": atd.resource_group,
            "virtualNetworkName": atd.deploy_id + "-vnet",
            "virtualNetworkSubnetName": atd.deploy_id + "-subnet",
            "nfsCommaSeparatedAddresses":
            ",".join(test_vars["cluster_vs_ips"]),
            "vmCount": 1,
            "nfsExportPath": "/msazure",
            "bootstrapScriptPath": "/bootstrap/bootstrap.reg_client.sh",
            "appEnvironmentVariables": " REG_CLIENT_TYPE=SERVER "
        }

        # The first regression client to be deployed is also a STAF server.
        atd.deploy_name = "deploy_reg_client_1"
        deploy_handle = atd.deploy()

        if ((num_vms - 1) > 0):
            # Remove the first entry of the "resources" section, which is an
            # empty deployment used for tracking purposes. This avoids a
            # collision when attempting to update the empty deployment.
            atd.template["resources"].pop(0)

            log.debug("Deploying {} more regression VM(s)".format(num_vms - 1))
            atd.deploy_params["uniquename"] = atd.deploy_id + "-rc"
            atd.deploy_params["vmCount"] = num_vms - 1
            atd.deploy_params[
                "appEnvironmentVariables"] = " REG_CLIENT_TYPE=CLIENT "
            atd.deploy_name = "deploy_reg_clients_N"
            deploy_result_2 = wait_for_op(atd.deploy())

        # Wait for the result of the first deployment.
        deploy_result_1 = wait_for_op(deploy_handle)

        log.debug("deploy 1 outputs = {}".format(
            deploy_result_1.properties.outputs))

        log.debug("Get the IP for the first regression client (STAF server).")
        test_vars[
            "staf_server_priv_ip"] = atd.nm_client.network_interfaces.get(
                atd.resource_group,
                "vmnic-" + staf_server_unique_name + "-0",
            ).ip_configurations[0].private_ip_address

        test_vars["staf_client_priv_ips"] = []
        if ((num_vms - 1) > 0):
            log.debug("deploy 2 outputs = {}".format(
                deploy_result_2.properties.outputs))
            log.debug("Get private IPs for the STAF clients.")
            for i in range(num_vms - 1):
                test_vars["staf_client_priv_ips"].append(
                    atd.nm_client.network_interfaces.get(
                        atd.resource_group,
                        "vmnic-" + atd.deploy_params["uniquename"] + "-" +
                        str(i),
                    ).ip_configurations[0].private_ip_address)