def test_get_pc_uuid_header_over_5_8(self, get_nutanix_metadata, **kwargs):
     get_nutanix_metadata.return_value = NutanixMetadata(
         version="el7.3-release-euphrates-5.8-stable-"
         "4c26d1af153833c54b67536fb0a4044e6e8c1b07")
     client = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     header = client._get_pc_uuid_header()
     self.assertEqual(header, {})
 def test_get_pc_uuid_header_master_version(self, get_nutanix_metadata,
                                            get_pc_uuid, **kwargs):
     get_nutanix_metadata.return_value = NutanixMetadata(
         version=TestPrismAPIVersions.VALID_AOS_MASTER_VERSION)
     client = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     header = client._get_pc_uuid_header()
     self.assertEqual(header, {})
    def test_get_pc_uuid_header_under_5_5(self, get_nutanix_metadata,
                                          **kwargs):

        get_nutanix_metadata.return_value = NutanixMetadata(
            version="el6-release-danube-4.6-stable-"
            "ee1b1aab1ac3a630694d9fd45ac1c6b91c1d3dd5")
        client = NutanixRestApiClient(self.HOSTS[0], "user", "password")
        header = client._get_pc_uuid_header()
        self.assertEqual(header, {})
 def test_get_pc_uuid_header_5_5(self, get_nutanix_metadata, get_pc_uuid,
                                 **kwargs):
     get_nutanix_metadata.return_value = NutanixMetadata(
         version="el7.3-release-euphrates-5.5.0.5-stable-"
         "0f7655edaa04231239690f59cfb9fce39377ef89")
     get_pc_uuid.return_value = "UUID-UUID-UUID-UUID-UUID"
     client = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     header = client._get_pc_uuid_header()
     self.assertEqual(header,
                      {"X-NTNX-PC-UUID": "UUID-UUID-UUID-UUID-UUID"})
    def test_retry_on_proxy_error_genesis_failure(self, resp, **kwargs):
        post = kwargs["post"]
        ping_ip = kwargs["ping_ip"]
        ping_ip.return_value = True
        resp.json.return_value = self._DUMMY_RPC_PROXY_ERROR_RET
        resp.status_code = 200
        post.side_effect = [resp] * (self.GENESIS_MAX_RETRIES + 1)

        cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")

        # Call genesis RPC method.
        with self.assertRaises(CurieException):
            cli.genesis_cluster_status()
 def test_get_pc_uuid_header_5_5_unsupported_version(
         self, get_nutanix_metadata, get_pc_uuid, **kwargs):
     get_nutanix_metadata.return_value = NutanixMetadata(
         version="el7.3-release-euphrates-5.5-stable-"
         "cbd9acfdebb3c7fd70ff2b4c4061515daadb50b1")
     get_pc_uuid.return_value = "UUID-UUID-UUID-UUID-UUID"
     # Get version
     client = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     with self.assertRaises(CurieException) as ar:
         client._get_pc_uuid_header()
         self.assertEqual(str(ar.exception), (
             "Cluster is running a 5.5 version which is incompatible with X-Ray "
             "when connected to Prism Central. Please upgrade AOS to 5.5.0.2 or "
             "newer, or disconnect Prism Central."))
 def test_vms_get_no_retries(self, **_):
     cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     expected = {
         "entities": [
             {
                 "uuid": "uuid_0"
             },
             {
                 "uuid": "uuid_1"
             },
         ]
     }
     with mock.patch.object(cli,
                            "_NutanixRestApiClient__get") as mock_cli_get:
         mock_cli_get.return_value = expected
         self.assertDictEqual(expected, cli.vms_get(vm_ip="123.45.67.89"))
Example #8
0
  def _update_cluster_version_info_prism(cluster_pb):
    """
    See 'DiscoveryUtil.update_cluster_version_info' for info.
    """
    mgmt_info = cluster_pb.cluster_management_server_info.prism_info
    software_info = cluster_pb.cluster_software_info.nutanix_info
    hyp_info = cluster_pb.cluster_hypervisor_info.ahv_info

    cli = NutanixRestApiClient.from_proto(mgmt_info, timeout_secs=10)

    DiscoveryUtil._update_cluster_version_info_nos(cli, cluster_pb)
    mgmt_info.prism_version = software_info.version

    for host in cli.hosts_get().get("entities", []):
      if host["clusterUuid"] != software_info.cluster_uuid:
        continue

      # We only support homogeneous AHV clusters via Prism.
      if host.get("hypervisorType") != "kKvm":
        raise CurieException(CurieError.kInvalidParameter,
                              "Specified cluster is mixed hypervisor")

      # Strip any "Nutanix " prefix from AHV version strings.
      curr_hyp_version = re.sub(
        "^Nutanix ", "", DiscoveryUtil._get_hyp_version_for_host(host))
      hyp_info.version.extend([curr_hyp_version])
 def test_vms_get_drop_duplicates_false(self, **_):
     cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     expected = {
         "entities": [
             {
                 "uuid": "uuid_0"
             },
             {
                 "uuid": "uuid_0"
             },
         ]
     }
     with mock.patch.object(cli,
                            "_NutanixRestApiClient__get") as mock_cli_get:
         mock_cli_get.return_value = expected
         response = cli.vms_get(vm_ip="123.45.67.89", drop_duplicates=False)
     self.assertEqual(response, expected)
    def test_retry_success(self, resp, **kwargs):
        get = kwargs["get"]
        ping_ip = kwargs["ping_ip"]
        ping_ip.return_value = True
        resp.status_code = 200
        resp.json.return_value = self._DUMMY_CLUSTERS_GET_RET

        get.side_effect = [self._make_conn_error_for_host(self.HOSTS[0])
                           ] * 2 + [resp]

        cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")
        # Call standard REST API method.
        cli.clusters_get()

        # Verify calls cycled through available hosts.
        self._verify_url_in_call_args([
            self.HOST_URL_PREFIXES[0],
        ], get)
    def test_retry_on_proxy_error_genesis_success(self, resp, **kwargs):
        post = kwargs["post"]
        ping_ip = kwargs["ping_ip"]
        ping_ip.return_value = True
        resp.json.side_effect = ([self._DUMMY_RPC_PROXY_ERROR_RET] * 3 +
                                 [self._DUMMY_RPC_RET])
        resp.status_code = 200
        post.side_effect = [resp] * 4

        cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")

        # Call genesis RPC method.
        cli.genesis_cluster_status()

        # Verify calls cycled through available hosts.
        self._verify_url_in_call_args([
            self.HOST_URL_PREFIXES[0],
        ], post)
    def test_retry_failure(self, **kwargs):
        get = kwargs["get"]
        ping_ip = kwargs["ping_ip"]
        ping_ip.return_value = True
        side_effect = [self._make_conn_error_for_host(self.HOSTS[0])
                       ] * REST_API_MAX_RETRIES
        side_effect.append(
            ConnectionError("Last error, should raise exception"))
        get.side_effect = side_effect

        cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")
        # Call standard REST API method.
        with self.assertRaises(CurieException):
            cli.clusters_get()

        # Verify calls cycled through available hosts.
        self._verify_url_in_call_args([
            self.HOST_URL_PREFIXES[0],
        ], get)
    def test_retry_failure_genesis(self, **kwargs):
        post = kwargs["post"]
        ping_ip = kwargs["ping_ip"]
        ping_ip.return_value = True
        side_effect = [self._make_conn_error_for_host(self.HOSTS[0])
                       ] * self.GENESIS_MAX_RETRIES
        side_effect.append(
            ConnectionError("Last error, should trigger exception"))
        post.side_effect = side_effect

        cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")

        # Call standard REST API method.
        with self.assertRaises(CurieException):
            ret = cli.genesis_cluster_status()

        # Verify calls cycled through available hosts.
        self._verify_url_in_call_args([
            self.HOST_URL_PREFIXES[0],
        ], post)
Example #14
0
    def cleanup(self, test_ids=()):
        """Remove all Curie templates and state from this cluster.
    """
        log.info("Cleaning up state on cluster %s",
                 self.metadata().cluster_name)

        with self._open_vcenter_connection() as vcenter:
            self._cleanup_datastore(vcenter)
            cluster_software_info = self._metadata.cluster_software_info
            if cluster_software_info.HasField("nutanix_info"):
                client = NutanixRestApiClient.from_proto(
                    cluster_software_info.nutanix_info)
                client.cleanup_nutanix_state(test_ids)
Example #15
0
    def __init__(self, node, rest_api_timeout_secs=60):
        # Full cluster metadata proto.
        self.__cluster_metadata = node.cluster().metadata()

        # Node object for which this util is used.
        self.__node = node

        software_info = self.__cluster_metadata.cluster_software_info
        CHECK(software_info.HasField("nutanix_info"))

        # NutanixRestApiClient instance to use.
        self.__api = NutanixRestApiClient.from_proto(
            software_info.nutanix_info, timeout_secs=rest_api_timeout_secs)
Example #16
0
    def __init__(self, cluster_metadata):
        super(NutanixClusterDPMixin, self).__init__(cluster_metadata)
        self.cluster_metadata = cluster_metadata
        # A map for keeping track of the snapshot count for a set of VMs.
        # Key = pd_name, Value = snapshot count
        self.__snapshot_count = {}

        # A map for keeping track of pd_name to a set of VMs.
        # Key = vm_id_set, Value = pd_name
        self.__pd_name_map = {}

        # Prism client for issuing REST API calls.
        self._prism_client = NutanixRestApiClient.from_proto(
            self.cluster_metadata.cluster_software_info.nutanix_info)
Example #17
0
  def _update_cluster_version_info_vcenter(cluster_pb):
    """
    See 'DiscoveryUtil.update_cluster_version_info' for info.
    """
    mgmt_info = cluster_pb.cluster_management_server_info.vcenter_info
    hyp_info = cluster_pb.cluster_hypervisor_info.esx_info

    with VsphereVcenter.from_proto(mgmt_info) as vcenter:
      vim_dc = vcenter.lookup_datacenter(mgmt_info.vcenter_datacenter_name)
      vim_cluster = vcenter.lookup_cluster(vim_dc,
                                           mgmt_info.vcenter_cluster_name)
      if vim_cluster is None:
        raise CurieException(CurieError.kInvalidParameter,
                              "Cluster not found in specified vCenter")

      esx_version_pairs = vcenter.get_esx_versions(vim_cluster)
      hyp_info.version.extend(pair[0] for pair in esx_version_pairs)
      hyp_info.build.extend(pair[1] for pair in esx_version_pairs)

      mgmt_info.vcenter_version, mgmt_info.vcenter_build = \
          vcenter.get_vcenter_version_info()

      if cluster_pb.cluster_software_info.HasField("nutanix_info"):
        cvms = [vim_vm for vim_vm in vcenter.lookup_vms(vim_cluster)
                if vcenter.vim_vm_is_nutanix_cvm(vim_vm)]
        if not cvms:
          raise CurieException(
            CurieError.kInvalidParameter,
            "Unable to locate any CVMs on cluster. Is this a Nutanix cluster?")
        for cvm in cvms:
          ip = get_optional_vim_attr(cvm.guest, "ipAddress")
          if ip and CurieUtil.is_ipv4_address(ip):
            break
        else:
          raise CurieException(
            CurieError.kInvalidParameter,
            "Unable to locate any CVMs with IPv4 addresses on cluster")

        software_info = cluster_pb.cluster_software_info.nutanix_info
        cli = NutanixRestApiClient(
          ip,
          software_info.decrypt_field("prism_user"),
          software_info.decrypt_field("prism_password"))
        DiscoveryUtil._update_cluster_version_info_nos(cli, cluster_pb)
Example #18
0
  def discover_nodes_prism(arg, ret):
    """
    See 'DiscoveryUtil.handle_nodes_discovery_v2' for info.
    """
    cli = NutanixRestApiClient.from_proto(arg.mgmt_server.conn_params)
    # TODO (jklein): Optimize loops here. Currently OK as nothing is doing
    # discovery on more than one cluster.

    for target_cluster in arg.cluster_collection.cluster_vec:
      cluster_uuid = AcropolisCluster.identifier_to_cluster_uuid(
        cli, target_cluster.name)

      node_coll = ret.node_collection_vec.add()
      node_coll.cluster_id = cluster_uuid

      for host in cli.hosts_get().get("entities", []):
        if host["clusterUuid"] != cluster_uuid:
          continue
        node = node_coll.node_vec.add()
        node.id = host["uuid"]
        # Use UUID as name as this will remain constant. In failure
        # scenarios, hosts revert to an IP address from the name for some
        # reason. See XRAY-276.
        node.name = host["uuid"]

        # Have already validated that either both fields are set or not.
        if (arg.oob_info.conn_params.username and
            arg.oob_info.type != OobInterfaceType.kNone):
          node.oob_info.CopyFrom(arg.oob_info)
          node.oob_info.conn_params.address = host["ipmiAddress"]
          if host.get("bmcModel") in ["X9_ATEN", "X10_ATEN"]:
            node.oob_info.vendor = node.oob_info.kSupermicro
          else:
            node.oob_info.vendor = node.oob_info.kUnknownVendor
        # We only support homogeneous AHV clusters via Prism.
        if host.get("hypervisorType") != "kKvm":
          raise CurieException(
            CurieError.kInvalidParameter,
            "Provided cluster is mixed hypervisor")
        node.hypervisor.type = node.hypervisor.kAhv
        node.hypervisor.version = DiscoveryUtil._get_hyp_version_for_host(host)
Example #19
0
 def _run(self):
   node = self.scenario.cluster.nodes()[self.node_index]
   client = NutanixRestApiClient.from_proto(
     self.scenario.cluster.metadata().cluster_software_info.nutanix_info)
   for item in client.hosts_get()["entities"]:
     if item["hypervisorAddress"] == node.node_ip():
       break  # Success
   else:
     raise CurieTestException(
       cause=
       "Node '%s' with hypervisor address '%s' is not a member of the "
       "Nutanix cluster managed at '%s'." %
       (self.node_index, node.node_id(), client.host),
       impact=
       "The configured nodes belong to multiple Nutanix clusters, which is "
       "not supported.",
       corrective_action=
       "Please choose a set of nodes that belong to a single Nutanix "
       "cluster. If the target is configured for metro availability, please "
       "choose nodes that all belong to a single site."
     )
Example #20
0
 def _run(self):
   node = self.scenario.cluster.nodes()[self.node_index]
   cluster_software_info = self.scenario.cluster.metadata().cluster_software_info
   # On a Nutanix cluster, check that the datastore is also visible on all
   # nodes in Prism.
   client = NutanixRestApiClient.from_proto(
     cluster_software_info.nutanix_info)
   for item in client.datastores_get():
     if item["hostIpAddress"] == node.node_ip():
       if item["datastoreName"] == self.datastore_name:
         break  # Success
   else:
     raise CurieTestException(
       cause=
       "Datastore '%s' not mounted to ESXi host '%s' with address '%s'" %
       (self.datastore_name, node.node_id(), node.node_ip()),
       impact=
       "The local path to the datastore is unavailable.",
       corrective_action=
       "Please choose a datastore that is mapped to all hosts, including the "
       "ESXi host '%s' with address '%s'" % (node.node_id(), node.node_ip()),
     )
Example #21
0
  def _update_cluster_version_info_vmm(cluster_pb):
    """
    See 'DiscoveryUtil.update_cluster_version_info' for info.
    """
    mgmt_info = cluster_pb.cluster_management_server_info.vmm_info
    hyp_info = cluster_pb.cluster_hypervisor_info.hyperv_info

    vmm_client = VmmClient(address=mgmt_info.vmm_server,
                           username=mgmt_info.vmm_user,
                           password=mgmt_info.vmm_password)
    with vmm_client:
      cluster = vmm_client.get_clusters(
        cluster_name=mgmt_info.vmm_cluster_name)[0]
      mgmt_info.vmm_version = vmm_client.get_vmm_version()
      nodes = vmm_client.get_nodes(mgmt_info.vmm_cluster_name)
      hyp_info.version.extend(node["version"] for node in nodes)
    if cluster_pb.cluster_software_info.HasField("nutanix_info"):
      software_info = cluster_pb.cluster_software_info.nutanix_info
      cli = NutanixRestApiClient(
        software_info.prism_host,
        software_info.decrypt_field("prism_user"),
        software_info.decrypt_field("prism_password"))
      DiscoveryUtil._update_cluster_version_info_nos(cli, cluster_pb)
Example #22
0
    def update_metadata(self, include_reporting_fields):
        with self._open_vcenter_connection() as vcenter:
            vim_cluster = self._lookup_vim_cluster(vcenter)
            vcenter.fill_cluster_metadata(vim_cluster, self._metadata,
                                          include_reporting_fields)
            vcenter.match_node_metadata_to_vcenter(vim_cluster, self._metadata)

            if (self._metadata.cluster_software_info.HasField("vsan_info")
                    or self._metadata.cluster_software_info.HasField(
                        "generic_info")):

                # TODO (jklein): Clean-up/properly abstract out these checks.
                mgmt_info = self._metadata.cluster_management_server_info
                if mgmt_info.HasField("vcenter_info"):
                    ds_name = mgmt_info.vcenter_info.vcenter_datastore_name
                else:
                    raise CurieTestException("Invalid metadata")
                vim_ds = vcenter.lookup_datastore(vim_cluster, ds_name)
                if vim_ds.summary.type == "vsan":
                    if self._metadata.cluster_software_info.HasField(
                            "generic_info"):
                        self._metadata.cluster_software_info.ClearField(
                            "generic_info")
                        self._metadata.cluster_software_info.vsan_info.SetInParent(
                        )
                else:
                    if self._metadata.cluster_software_info.HasField(
                            "vsan_info"):
                        raise CurieTestException(
                            "Target is not configured for VSAN")

            if include_reporting_fields:
                version_pairs = VsphereVcenter.get_esx_versions(
                    vim_cluster, self._metadata.cluster_nodes)

                esx_versions = [pair[0] for pair in version_pairs]
                esx_builds = [pair[1] for pair in version_pairs]
                self._metadata.cluster_hypervisor_info.esx_info.version.extend(
                    esx_versions)
                self._metadata.cluster_hypervisor_info.esx_info.build.extend(
                    esx_builds)
                cluster_software_info = self._metadata.cluster_software_info
                if cluster_software_info.HasField("nutanix_info"):
                    client = NutanixRestApiClient.from_proto(
                        cluster_software_info.nutanix_info)
                    nutanix_metadata = client.get_nutanix_metadata()
                    if nutanix_metadata.version is not None:
                        cluster_software_info.nutanix_info.version = \
                            nutanix_metadata.version
                    if nutanix_metadata.cluster_uuid is not None:
                        cluster_software_info.nutanix_info.cluster_uuid = \
                            nutanix_metadata.cluster_uuid
                    if nutanix_metadata.cluster_incarnation_id is not None:
                        cluster_software_info.nutanix_info.cluster_incarnation_id = \
                            nutanix_metadata.cluster_incarnation_id
                elif cluster_software_info.HasField("vsan_info"):
                    pass
                elif cluster_software_info.HasField("generic_info"):
                    pass
                else:
                    raise CurieException(
                        CurieError.kInternalError,
                        "Unsupported software on vSphere cluster, %s" %
                        self._metadata)
        self._node_id_metadata_map = dict([
            (node.id, node) for node in self._metadata.cluster_nodes
        ])
Example #23
0
  def discover_clusters_prism(address, username, password, ret):
    """
    Args:
      address (str): Address of the management server.
      username (str): Name of user of the management server.
      password (str): Password of user of the management server
      ret (DiscoverClustersV2Ret): Return proto to be populated.
    """
    api_client = NutanixRestApiClient(host=address,
                                      api_user=username,
                                      api_password=password,
                                      timeout_secs=10)
    coll = ret.cluster_inventory.cluster_collection_vec.add()
    coll.name = "Prism"

    networks = api_client.networks_get()
    network_proto_list = []
    for net in networks.get("entities", []):
      net_info = LogicalNetwork()
      net_info.id = net["name"]  # Curie expects the network by name
      net_info.name = net["name"]
      network_proto_list.append(net_info)

    containers = api_client.containers_get()
    cluster_uuid_container_map = dict(
      (cid, list(ctrs)) for (cid, ctrs) in
      itertools.groupby(containers.get("entities", []),
                        lambda ctr: ctr["clusterUuid"]))

    clusters = api_client.clusters_get()

    cluster_json_list = clusters.get("entities", [])
    if len(cluster_json_list) == 1:
      # Assume that if we're only discovering one cluster, we are expecting
      # the Prism management address to match the cluster virtual IP.
      # TODO (jklein): This needs to change if we start supporting PC.
      cluster_json = cluster_json_list[0]
      cluster_vip = cluster_json.get("clusterExternalIPAddress")
      if cluster_vip is None:
        raise CurieException(
          CurieError.kInvalidParameter,
          "Cluster '%s' does not appear to be configured with a virtual IP"
          % cluster_json["name"])
      # NB: If the VIP does not match the provided Prism address, it will be
      # corrected during an UpdateAndValidateCluster RPC.

    for cluster_json in cluster_json_list:
      cluster_info = coll.cluster_vec.add()
      cluster_info.id = cluster_json["clusterUuid"]
      cluster_info.name = cluster_json["name"]
      cluster_info.clustering_software.type = \
          cluster_info.clustering_software.kNutanix
      cluster_info.clustering_software.version = cluster_json.get(
        "fullVersion", "")
      cluster_info.management_server.type = \
          cluster_info.management_server.kPrism
      cluster_info.management_server.version = \
          cluster_info.clustering_software.version

      for ctr in cluster_uuid_container_map[cluster_info.id]:
        storage_info = cluster_info.storage_info_vec.add()
        storage_info.name = ctr["name"]
        storage_info.id = ctr["containerUuid"]
        storage_info.kind = storage_info.kNutanixContainer
      cluster_info.network_info_vec.extend(network_proto_list)
Example #24
0
  def update_cluster_virtual_ip(cluster_pb):
    """
    Updates 'prism_host' to correspond to the cluster virtual IP.

    The 'prism_host' field is set for management and clustering software as
    appropriate for the target hypervisor.

    Returns:
      True if 'prism_host' was updated, else False.

    Raises:
      CurieException<kInvalidParameter>: 'cluster_pb' is a Nutanix cluster but
        does not have a Virtual IP.
    """
    if not cluster_pb.cluster_software_info.HasField("nutanix_info"):
      return False

    prism_proto = None
    ntnx_proto = cluster_pb.cluster_software_info.nutanix_info
    prism_user = ntnx_proto.decrypt_field("prism_user")
    prism_password = ntnx_proto.decrypt_field("prism_password")
    c_uuid = ntnx_proto.cluster_uuid

    if cluster_pb.cluster_management_server_info.HasField("prism_info"):
      prism_proto = cluster_pb.cluster_management_server_info.prism_info
      client = NutanixRestApiClient.from_proto(prism_proto, timeout_secs=10)
      cluster_json = client.clusters_get(cluster_id=c_uuid)
    else:
      cvm_addresses = []
      if cluster_pb.cluster_management_server_info.HasField("vmm_info"):
        vmm_info = cluster_pb.cluster_management_server_info.vmm_info
        vmm_client = VmmClient(address=vmm_info.vmm_server,
                               username=vmm_info.vmm_user,
                               password=vmm_info.vmm_password)
        with vmm_client:
          vms = vmm_client.get_vms(cluster_name=vmm_info.vmm_cluster_name)
          for vm in vms:
            if VmmClient.is_nutanix_cvm(vm):
              if VmmClient.is_powered_on(vm):
                log.debug("Found CVM '%s' with IPs: %s", vm["name"], vm["ips"])
                cvm_addresses.extend(vm["ips"])
              else:
                log.debug("Skipping CVM '%s' because it is not powered on.", vm["name"])
      else:
        node_ids = [node.id for node in cluster_pb.cluster_nodes]
        # NB: We currently have an asymmetrical input for Prism credentials
        # depending on whether they're considered as management software or
        # clustering software. In the latter case, which is when 'nutanix_info'
        # is set but not 'prism_info', the user is not asked for a Prism host.
        # In this case, we discover CVMs via vCenter, attempt to connect to Prism
        # on each in sequence until successful, and then query the virtual IP.
        mgmt_info = cluster_pb.cluster_management_server_info.vcenter_info
        with VsphereVcenter.from_proto(mgmt_info) as vcenter:
          vim_dc = vcenter.lookup_datacenter(mgmt_info.vcenter_datacenter_name)
          vim_cluster = vcenter.lookup_cluster(vim_dc,
                                               mgmt_info.vcenter_cluster_name)
          for vim_cvm in (vm for vm in vcenter.lookup_vms(vim_cluster)
                          if vcenter.vim_vm_is_nutanix_cvm(vm)):
            vim_host = get_optional_vim_attr(vim_cvm.runtime, "host")
            if vim_host:
              if vim_host.name in node_ids:
                cvm_address = vcenter.get_vim_vm_ip_address(vim_cvm)
                if cvm_address:
                  log.debug("Found CVM '%s' with address '%s'" %
                            (vim_cvm.name, cvm_address))
                  cvm_addresses.append(cvm_address)
              else:
                log.debug("Skipping CVM '%s'; Host '%s' is not in the "
                          "metadata" % (vim_cvm.name, vim_host.name))
      # We run Nutanix API only against powered on CVMs.
      if not cvm_addresses:
        raise CurieTestException(
          cause="No Nutanix CVMs found.",
          impact="The cluster virtual IP address can not be discovered.",
          corrective_action="Please verify that the cluster contains Nutanix "
                            "CVMs, and that they are powered on.",
        )
      for cvm_address in cvm_addresses:
        client = NutanixRestApiClient(cvm_address, prism_user, prism_password)
        try:
          cluster_json = client.clusters_get(cluster_id=c_uuid, max_retries=3)
        except BaseException:
          log.warning("Unable to query CVM with IP '%s'",
                      cvm_address, exc_info=True)
        else:
          break
      else:
        raise CurieTestException(
          cause="Failed to query Prism on any Nutanix CVM.",
          impact="The cluster virtual IP address can not be discovered.",
          corrective_action="Please verify that the Nutanix CVMs on the "
                            "cluster are powered on, and that the network "
                            "connectivity to the CVMs is correct.",
        )

    if "clusterExternalIPAddress" in cluster_json:
      cluster_name = cluster_json.get("name")
      cluster_vip = cluster_json["clusterExternalIPAddress"]
    elif "entities" in cluster_json:
      cluster_data = cluster_json["entities"][0]
      cluster_name = cluster_data.get("name")
      cluster_vip = cluster_data["clusterExternalIPAddress"]
    else:
      raise CurieException(
        CurieError.kInvalidParameter,
        "Unrecognized response from NutanixRestApiClient.clusters_get")
    if not cluster_vip:
      raise CurieException(
        CurieError.kInvalidParameter,
        "Cluster '%s' does not appear to be configured with a virtual IP "
        "(received '%s')" % (cluster_name, cluster_vip))
    else:
      log.debug("Identified Nutanix cluster virtual IP address: '%s'",
                cluster_vip)
    ntnx_proto.prism_host = cluster_vip
    if prism_proto:
      prism_proto.prism_host = cluster_vip
    return True
Example #25
0
 def setUp(self):
   self.prism = NutanixRestApiClient("host", "user", "pass")
   self.sample_timestamp = time.time()
Example #26
0
 def _delay(*args, **kwargs):
   time.sleep(1)
   return NutanixRestApiClient("host", "user", "pass")
Example #27
0
    def prereq_runtime_vm_storage_is_ready(cluster):
        """
    Confirms that curie test VM storage on each node in 'cluster' is
    available.

    Raises:
      CurieTestException if curie test VM storage is unavailable on any node.
    """
        metadata = cluster.metadata()
        if metadata.cluster_hypervisor_info.HasField("esx_info"):
            num_nodes = len(metadata.cluster_nodes)
            CHECK(
                metadata.cluster_management_server_info.HasField(
                    "vcenter_info"))
            vcenter_info = metadata.cluster_management_server_info.vcenter_info
            datastore_name = vcenter_info.vcenter_datastore_name
            # Check that the datastore is visible on all nodes in vCenter.
            log.info(
                "Checking that datastore %s is visible on all %s nodes in "
                "vCenter", datastore_name, cluster.name())
            if not cluster.datastore_visible(datastore_name):
                raise CurieTestException(
                    "Datastore %s not visible on all %s nodes "
                    "in vCenter" % (datastore_name, cluster.name()))
            log.info("Datastore %s is visible on all %s nodes in vCenter",
                     datastore_name, cluster.name())
            cluster_software_info = metadata.cluster_software_info
            if cluster_software_info.HasField("nutanix_info"):
                # On a Nutanix cluster, check that the datastore is also visible on all
                # nodes in Prism.
                log.info(
                    "Checking that datastore %s is visible by Prism on all %s "
                    "nodes", datastore_name, cluster.name())
                client = NutanixRestApiClient.from_proto(
                    cluster_software_info.nutanix_info)
                host_id_datastore_map = {}
                for item in client.datastores_get():
                    host_id_datastore_map.setdefault(item["hostId"], set())
                    host_id_datastore_map[item["hostId"]].add(
                        item["datastoreName"])
                CHECK_LE(len(host_id_datastore_map), num_nodes)
                for host_id in host_id_datastore_map:
                    if datastore_name not in host_id_datastore_map[host_id]:
                        raise CurieTestException(
                            "Datastore %s not visible by Prism on %s node %s" %
                            (datastore_name, cluster.name(), host_id))
                log.info("Datastore %s is visible by Prism on all %s nodes",
                         datastore_name, cluster.name())
            elif cluster_software_info.HasField("vsan_info"):
                pass
            elif cluster_software_info.HasField("generic_info"):
                pass
            else:
                raise ValueError("Unknown cluster software info, metadata %s" %
                                 metadata)
        elif metadata.cluster_hypervisor_info.HasField("hyperv_info"):
            # TODO (bferlic): More thorough checking here?
            return True
        elif metadata.cluster_hypervisor_info.HasField("ahv_info"):
            # TODO (jklein): More thorough checking here?
            return True
        else:
            raise ValueError("Unknown hypervisor type, metadata %s" % metadata)
Example #28
0
    def prereq_runtime_vm_storage_is_ready_fix(cluster):
        """
    Attempt to make curie test VM storage available on all nodes.

    Raises:
      CurieTestException on error or timeout.
    """
        metadata = cluster.metadata()
        if metadata.cluster_hypervisor_info.HasField("esx_info"):
            CHECK(
                metadata.cluster_management_server_info.HasField(
                    "vcenter_info"))
            vcenter_info = metadata.cluster_management_server_info.vcenter_info
            datastore_name = vcenter_info.vcenter_datastore_name

            def datastore_visible():
                try:
                    ScenarioUtil.prereq_runtime_vm_storage_is_ready(cluster)
                    return True
                except CurieTestException:
                    pass
            msg = "datastore %s visible on all %s nodes" % \
              (datastore_name, cluster.name())
            # Refresh datastores state on all nodes to try and make the datastore
            # visible from vCenter's perspective.
            log.info("Refreshing datastores on all %s nodes", cluster.name())
            cluster.refresh_datastores()
            if CurieUtil.wait_for(datastore_visible, msg, 60):
                return
            cluster_software_info = metadata.cluster_software_info
            if cluster_software_info.HasField("nutanix_info"):
                client = NutanixRestApiClient.from_proto(
                    cluster_software_info.nutanix_info)
                container_name = None
                for item in client.datastores_get():
                    if item["datastoreName"] == datastore_name:
                        container_name = item["containerName"]
                        break
                if container_name is None:
                    log.warning(
                        "Datastore %s not mounted on any %s nodes, assuming "
                        "container name is the same as the desired datastore "
                        "name", datastore_name, cluster.name())
                    # Assume that the desired datastore has the same name as an existing
                    # container name.
                    container_name = datastore_name
                # Remount the datastore to try and make the datastore visible.
                log.info(
                    "Unmounting and mounting datastore %s (container %s) on %s",
                    datastore_name, container_name, cluster.name())
                try:
                    client.datastores_delete(datastore_name, verify=True)
                except CurieException, ex:
                    if ex.error_code != CurieError.kInvalidParameter:
                        raise
                    # If Prism views the datastore as unmounted, kInvalidParameter is
                    # returned so continue to try and mount the datastore on all nodes.
                client.datastores_create(container_name,
                                         datastore_name=datastore_name)
                cluster.refresh_datastores()
                if not CurieUtil.wait_for(datastore_visible, msg, 60):
                    raise CurieTestException(
                        "Timeout waiting for datastore %s for "
                        "VM storage to become visible on %s" %
                        (datastore_name, cluster.name()))
            elif cluster_software_info.HasField("vsan_info"):
                raise CurieTestException(
                    "VSAN datastore %s not mounted on all %s "
                    "nodes" % (datastore_name, cluster.name()))
            elif cluster_software_info.HasField("generic_info"):
                raise CurieTestException(
                    "Datastore %s not mounted on all %s nodes" %
                    (datastore_name, cluster.name()))
            else:
                raise ValueError("Unknown cluster software info, metadata %s" %
                                 metadata)
 def test_vms_get_invalid_both_vm_ip_and_vm_name(self, **_):
     cli = NutanixRestApiClient(self.HOSTS[0], "user", "password")
     with self.assertRaises(CurieException):
         cli.vms_get(vm_ip="123.45.67.89", vm_name="fake_vm_name")