コード例 #1
0
 def test_get_nodes(self):
     vmm_client = VmmClient(address=self.config["scvmm.address"],
                            username=self.config["scvmm.username"],
                            password=self.config["scvmm.password"])
     with vmm_client:
         nodes = vmm_client.get_nodes(self.config["scvmm.cluster"])
     self.assertIsInstance(nodes, list)
     self.assertEqual(len(nodes), len(self.config["nodes"]))
     for node in nodes:
         self.assertEqual(
             set(node.keys()), {
                 "name", "id", "fqdn", "ips", "state", "version",
                 "bmc_address", "bmc_port", "overall_state"
             })
コード例 #2
0
  def discover_clusters_vmm(address, username, password, ret):
    """
    Args:
      address (str): Address of the management server.
      username (str): Name of user of the management server.
      password (str): Password of user of the management server
      ret (DiscoverClustersV2Ret): Return proto to be populated.
    """
    vmm_client = VmmClient(address=address, username=username,
                           password=password)
    with vmm_client:
      log.debug("Connected to VMM %s", address)
      clusters = vmm_client.get_clusters()
      library_shares = vmm_client.get_library_shares()
      log.debug(library_shares)  # TODO(ryan.hardin) Remove.

    log.debug(clusters)  # TODO(ryan.hardin) Remove.
    cluster_collection = ret.cluster_inventory.cluster_collection_vec.add()
    cluster_collection.name = "HyperV"
    # TODO(ryan.hardin): Set ClusteringSoftware Type (i.e. kNutanix or kS2D).

    for cluster in clusters:
      cluster_pb = cluster_collection.cluster_vec.add()
      cluster_pb.id = cluster["name"]
      cluster_pb.name = cluster["name"]

      for storage in cluster["storage"]:
        logical_storage_pb = cluster_pb.storage_info_vec.add()
        logical_storage_pb.id = storage["path"]
        logical_storage_pb.name = storage["name"]
        logical_storage_pb.kind = logical_storage_pb.kHypervStorage

      for network in cluster["networks"]:
        logical_network_pb = cluster_pb.network_info_vec.add()
        logical_network_pb.id = network
        logical_network_pb.name = network

      for library_share in library_shares:
        library_share_pb = cluster_pb.library_shares.add()
        library_share_pb.name = library_share.get("name")
        library_share_pb.path = library_share.get("path")
        library_share_pb.server = library_share.get("server")

    # No return value since 'ret' is modified.
    log.debug(ret)  # TODO(ryan.hardin) Remove.
    return None
コード例 #3
0
  def discover_nodes_vmm(arg, ret):
    """
    See 'DiscoveryUtil.handle_nodes_discovery_v2' for info.
    """
    conn_params = arg.mgmt_server.conn_params
    vmm_client = VmmClient(address=conn_params.address,
                           username=conn_params.username,
                           password=conn_params.password)
    nodes_data = {}
    with vmm_client:
      log.debug("Connected to VMM %s", conn_params.address)
      for target_cluster in arg.cluster_collection.cluster_vec:
        nodes_data[target_cluster.name] = \
          vmm_client.get_nodes(target_cluster.name)

    log.debug(nodes_data)  # TODO(ryan.hardin) Remove.
    for cluster_name, node_list in nodes_data.iteritems():
      node_collection = ret.node_collection_vec.add()
      node_collection.cluster_id = cluster_name

      for node_data in node_list:
        node = node_collection.node_vec.add()
        node.id = node_data["fqdn"]
        node.name = node_data["fqdn"]
        node.hypervisor.type = node.hypervisor.kHyperv
        node.hypervisor.version = node_data["version"]

        # Have already validated that either both fields are set or not.
        if (arg.oob_info.conn_params.username and
            arg.oob_info.type != OobInterfaceType.kNone):
          node.oob_info.CopyFrom(arg.oob_info)
          node.oob_info.conn_params.address = node_data.get("bmc_address", "")
          node.oob_info.vendor = OobVendor.kUnknownVendor
          if not node.oob_info.conn_params.address:
            oob_type_name = OobInterfaceType.InterfaceType.Name(
              arg.oob_info.type)
            raise RuntimeError(
              "Power management type '%s' selected, but no power management "
              "address configured in VMM for node '%s' in cluster '%s'" %
              (oob_type_name[1:], node.name, cluster_name))

    # No return value since 'ret' is modified.
    log.debug(ret)  # TODO(ryan.hardin) Remove.
    return None
コード例 #4
0
 def test_get_clusters(self):
     vmm_client = VmmClient(address=self.config["scvmm.address"],
                            username=self.config["scvmm.username"],
                            password=self.config["scvmm.password"])
     with vmm_client:
         clusters = vmm_client.get_clusters(self.config["scvmm.cluster"])
     self.assertIsInstance(clusters, list)
     self.assertEqual(len(clusters), 1)
     cluster = clusters[0]
     self.assertEqual(set(cluster.keys()),
                      {"name", "type", "networks", "storage"})
     self.assertEqual(cluster["type"], "HyperV")
     self.assertIsInstance(cluster["networks"], list)
     self.assertIsInstance(cluster["storage"], list)
     for storage in cluster["storage"]:
         self.assertIsInstance(storage, dict)
         self.assertEqual(set(storage.keys()), {"name", "path"})
         self.assertIsInstance(storage["name"], basestring)
         self.assertIsInstance(storage["path"], basestring)
コード例 #5
0
 def test_init_library_server(self):
     vmm_client = VmmClient("fake_hostname",
                            "fake_user",
                            "fake_password",
                            library_server_address="fake_library_server",
                            library_server_share_path="fake_library_path")
     self.assertEqual(vmm_client.library_server_username, "fake_user")
     self.assertEqual(vmm_client.library_server_password, "fake_password")
     self.assertEqual(vmm_client.library_server_address,
                      "fake_library_server")
     self.assertEqual(vmm_client.library_server_share_path,
                      "fake_library_path")
コード例 #6
0
 def test_init_set_host_defaults(self):
     vmm_client = VmmClient("fake_hostname", "fake_username",
                            "fake_password")
     self.assertEqual(vmm_client.address, "fake_hostname")
     self.assertEqual(vmm_client.username, "fake_username")
     self.assertEqual(vmm_client.password, "fake_password")
     self.assertEqual(vmm_client.host_address, "fake_hostname")
     self.assertEqual(vmm_client.host_username, "fake_username")
     self.assertEqual(vmm_client.host_password, "fake_password")
     self.assertEqual(vmm_client.library_server_username, "fake_username")
     self.assertEqual(vmm_client.library_server_password, "fake_password")
     self.assertIsNone(vmm_client.library_server_share_path)
     self.assertIsNone(vmm_client.library_server_address)
コード例 #7
0
  def _update_cluster_version_info_vmm(cluster_pb):
    """
    See 'DiscoveryUtil.update_cluster_version_info' for info.
    """
    mgmt_info = cluster_pb.cluster_management_server_info.vmm_info
    hyp_info = cluster_pb.cluster_hypervisor_info.hyperv_info

    vmm_client = VmmClient(address=mgmt_info.vmm_server,
                           username=mgmt_info.vmm_user,
                           password=mgmt_info.vmm_password)
    with vmm_client:
      cluster = vmm_client.get_clusters(
        cluster_name=mgmt_info.vmm_cluster_name)[0]
      mgmt_info.vmm_version = vmm_client.get_vmm_version()
      nodes = vmm_client.get_nodes(mgmt_info.vmm_cluster_name)
      hyp_info.version.extend(node["version"] for node in nodes)
    if cluster_pb.cluster_software_info.HasField("nutanix_info"):
      software_info = cluster_pb.cluster_software_info.nutanix_info
      cli = NutanixRestApiClient(
        software_info.prism_host,
        software_info.decrypt_field("prism_user"),
        software_info.decrypt_field("prism_password"))
      DiscoveryUtil._update_cluster_version_info_nos(cli, cluster_pb)
コード例 #8
0
ファイル: hyperv_vm.py プロジェクト: nutanix/curie
 def is_powered_on(self):
     return VmmClient.is_powered_on(self._json_vm)
コード例 #9
0
 def test_is_nutanix_cvm_true(self):
     vm = {"name": "NTNX-12345678-A-CVM"}
     self.assertTrue(VmmClient.is_nutanix_cvm(vm))
コード例 #10
0
 def test_is_nutanix_cvm_false(self):
     vm = {"name": "NOT-A-CVM"}
     self.assertFalse(VmmClient.is_nutanix_cvm(vm))
コード例 #11
0
 def setUp(self):
     self.vmm_client = VmmClient("fake_hostname", "fake_username",
                                 "fake_password")
コード例 #12
0
class TestVmmClient(unittest.TestCase):
    def setUp(self):
        self.vmm_client = VmmClient("fake_hostname", "fake_username",
                                    "fake_password")

    def test_init_set_host_defaults(self):
        vmm_client = VmmClient("fake_hostname", "fake_username",
                               "fake_password")
        self.assertEqual(vmm_client.address, "fake_hostname")
        self.assertEqual(vmm_client.username, "fake_username")
        self.assertEqual(vmm_client.password, "fake_password")
        self.assertEqual(vmm_client.host_address, "fake_hostname")
        self.assertEqual(vmm_client.host_username, "fake_username")
        self.assertEqual(vmm_client.host_password, "fake_password")
        self.assertEqual(vmm_client.library_server_username, "fake_username")
        self.assertEqual(vmm_client.library_server_password, "fake_password")
        self.assertIsNone(vmm_client.library_server_share_path)
        self.assertIsNone(vmm_client.library_server_address)

    def test_init_set_host_override(self):
        vmm_client = VmmClient(
            "fake_hostname",
            "fake_username",
            "fake_password",
            host_address="fake_host_hostname",
            host_username="******",
            host_password="******",
            library_server_address="fake_library_server_hostname",
            library_server_username="******",
            library_server_password="******")
        self.assertEqual(vmm_client.address, "fake_hostname")
        self.assertEqual(vmm_client.username, "fake_username")
        self.assertEqual(vmm_client.password, "fake_password")
        self.assertEqual(vmm_client.host_address, "fake_host_hostname")
        self.assertEqual(vmm_client.host_username, "fake_host_username")
        self.assertEqual(vmm_client.host_password, "fake_host_password")
        self.assertEqual(vmm_client.library_server_address,
                         "fake_library_server_hostname")
        self.assertEqual(vmm_client.library_server_username,
                         "fake_library_server_username")
        self.assertEqual(vmm_client.library_server_password,
                         "fake_library_server_password")
        self.assertIsNone(vmm_client.library_server_share_path)

    def test_init_library_server(self):
        vmm_client = VmmClient("fake_hostname",
                               "fake_user",
                               "fake_password",
                               library_server_address="fake_library_server",
                               library_server_share_path="fake_library_path")
        self.assertEqual(vmm_client.library_server_username, "fake_user")
        self.assertEqual(vmm_client.library_server_password, "fake_password")
        self.assertEqual(vmm_client.library_server_address,
                         "fake_library_server")
        self.assertEqual(vmm_client.library_server_share_path,
                         "fake_library_path")

    def test_is_nutanix_cvm_false(self):
        vm = {"name": "NOT-A-CVM"}
        self.assertFalse(VmmClient.is_nutanix_cvm(vm))

    def test_is_nutanix_cvm_true(self):
        vm = {"name": "NTNX-12345678-A-CVM"}
        self.assertTrue(VmmClient.is_nutanix_cvm(vm))

    def test_get_clusters(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_clusters()

        m_ps_client.execute.assert_called_once_with("Get-VmmHypervCluster",
                                                    json_params="{}")

    def test_get_clusters_cluster_name_cluster_type(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_clusters(cluster_name="Fake Cluster",
                                         cluster_type="HyperV")

        m_ps_client.execute.assert_called_once_with(
            "Get-VmmHypervCluster",
            json_params="{\"name\": \"Fake Cluster\", \"type\": \"HyperV\"}")

    def test_get_library_shares(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_library_shares()

        m_ps_client.execute.assert_called_once_with("Get-VmmLibraryShares")

    def test_get_vms(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_vms(cluster_name="Fake Cluster")

        m_ps_client.execute.assert_called_once_with(
            "Get-VmmVM",
            cluster_name="Fake Cluster",
            json_params="[]",
            num_retries=10)

    def test_get_vms_matching_ids(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_vms(cluster_name="Fake Cluster",
                                    vm_input_list=[{
                                        "id": "0"
                                    }, {
                                        "id": "1"
                                    }])

        m_ps_client.execute.assert_called_once_with(
            "Get-VmmVM",
            cluster_name="Fake Cluster",
            json_params="[{\"id\": \"0\"}, {\"id\": \"1\"}]",
            num_retries=10)

    def test_refresh_vms(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.refresh_vms(cluster_name="Fake Cluster")

        m_ps_client.execute.assert_called_once_with(
            "Read-VmmVM", cluster_name="Fake Cluster", json_params="[]")

    def test_refresh_vms_matching_ids(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.refresh_vms(cluster_name="Fake Cluster",
                                        vm_input_list=[{
                                            "id": "0"
                                        }, {
                                            "id": "1"
                                        }])

        m_ps_client.execute.assert_called_once_with(
            "Read-VmmVM",
            cluster_name="Fake Cluster",
            json_params="[{\"id\": \"0\"}, {\"id\": \"1\"}]")

    def test_get_nodes(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_nodes(cluster_name="Fake Cluster")

        m_ps_client.execute.assert_called_once_with(
            "Get-VmmHypervClusterNode",
            cluster_name="Fake Cluster",
            json_params="[]",
            num_retries=10)

    def test_get_nodes_matching_ids(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.get_nodes(cluster_name="Fake Cluster",
                                      nodes=[{
                                          "id": "0"
                                      }, {
                                          "id": "1"
                                      }])

        m_ps_client.execute.assert_called_once_with(
            "Get-VmmHypervClusterNode",
            cluster_name="Fake Cluster",
            json_params="[{\"id\": \"0\"}, {\"id\": \"1\"}]",
            num_retries=10)

    def test_nodes_power_state(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.nodes_power_state(cluster_name="Fake Cluster")

        m_ps_client.execute_async.assert_called_once_with(
            "Set-VmmHypervClusterNodeShutdown",
            cluster_name="Fake Cluster",
            json_params="[]")

    def test_nodes_power_state_matching_ids(self):
        self.vmm_client.host_username = "******"
        self.vmm_client.host_password = "******"
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.nodes_power_state(cluster_name="Fake Cluster",
                                              nodes=[{
                                                  "id": "0"
                                              }, {
                                                  "id": "1"
                                              }])

        m_ps_client.execute_async.assert_called_once_with(
            "Set-VmmHypervClusterNodeShutdown",
            cluster_name="Fake Cluster",
            json_params="[{\"id\": \"0\", "
            "\"password\": \"fake_host_password\", "
            "\"username\": \"fake_host_username\"}, "
            "{\"id\": \"1\", "
            "\"password\": \"fake_host_password\", "
            "\"username\": \"fake_host_username\"}]")

    def test_vms_set_power_state_for_vms(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vms_set_power_state_for_vms(
                cluster_name="Fake Cluster",
                task_req_list=[{
                    "vm_id": "0",
                    "power_state": "off"
                }, {
                    "vm_id": "1",
                    "power_state": "off"
                }])

        m_ps_client.execute.assert_called_once_with(
            "Set-VmmVMPowerState",
            cluster_name="Fake Cluster",
            json_params="[{\"power_state\": \"off\", \"vm_id\": \"0\"}, "
            "{\"power_state\": \"off\", \"vm_id\": \"1\"}]")

    def test_vms_set_possible_owners_for_vms(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vms_set_possible_owners_for_vms(
                cluster_name="Fake Cluster",
                task_req_list=[{
                    "vm_id": "0",
                    "possible_owners": ["0", "1"]
                }, {
                    "vm_id": "1",
                    "possible_owners": ["0", "1"]
                }])

        m_ps_client.execute.assert_called_once_with(
            "Set-VmmVMPossibleOwners",
            cluster_name="Fake Cluster",
            json_params=
            "[{\"possible_owners\": [\"0\", \"1\"], \"vm_id\": \"0\"}, "
            "{\"possible_owners\": [\"0\", \"1\"], \"vm_id\": \"1\"}]")

    def test_vms_set_snapshot(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vms_set_snapshot(cluster_name="Fake Cluster",
                                             task_req_list=[{
                                                 "vm_id":
                                                 "0",
                                                 "name":
                                                 "snapshot_0",
                                                 "description":
                                                 "Snapshot 0"
                                             }, {
                                                 "vm_id":
                                                 "1",
                                                 "name":
                                                 "snapshot_1",
                                                 "description":
                                                 "Snapshot 1"
                                             }])

        m_ps_client.execute.assert_called_once_with(
            "Set-VmmVMSnapshot",
            cluster_name="Fake Cluster",
            json_params="[{\"description\": \"Snapshot 0\", "
            "\"name\": \"snapshot_0\", \"vm_id\": \"0\"}, "
            "{\"description\": \"Snapshot 1\", "
            "\"name\": \"snapshot_1\", \"vm_id\": \"1\"}]")

    def test_vm_get_job_status_vmm_tasks(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vm_get_job_status(task_id_list=[{
                "task_type": "vmm",
                "task_id": "0"
            }, {
                "task_type": "vmm",
                "task_id": "1"
            }])

        m_ps_client.execute.assert_called_once_with(
            "Get-Task",
            json_params="[{\"task_id\": \"0\", \"task_type\": \"vmm\"}, "
            "{\"task_id\": \"1\", \"task_type\": \"vmm\"}]",
            num_retries=10)

    def test_vm_get_job_status_ps_tasks(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            mock_ps_cmd_0 = mock.Mock()
            mock_ps_cmd_1 = mock.Mock()
            m_ps_client.poll.side_effect = [mock_ps_cmd_0, mock_ps_cmd_1]
            self.vmm_client.vm_get_job_status(task_id_list=[{
                "task_type": "ps",
                "task_id": "0"
            }, {
                "task_type": "ps",
                "task_id": "1"
            }])

        m_ps_client.poll.assert_has_calls([mock.call("0"), mock.call("1")])
        mock_ps_cmd_0.as_ps_task.assert_called_once_with()
        mock_ps_cmd_1.as_ps_task.assert_called_once_with()

    def test_vm_get_job_status_unknown_task_type(self):
        with self.assertRaises(ValueError) as ar:
            self.vmm_client.vm_get_job_status(task_id_list=[{
                "task_type": "arduous",
                "task_id": "0"
            }])

        self.assertEqual("Unknown task type 'arduous'", str(ar.exception))

    def test_vm_stop_job_vmm_tasks(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vm_stop_job(task_id_list=[{
                "task_type": "vmm",
                "task_id": "0"
            }, {
                "task_type": "vmm",
                "task_id": "1"
            }])

        m_ps_client.execute.assert_called_once_with(
            "Stop-Task",
            json_params="[{\"task_id\": \"0\", \"task_type\": \"vmm\"}, "
            "{\"task_id\": \"1\", \"task_type\": \"vmm\"}]")

    def test_vm_stop_job_ps_tasks(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            mock_ps_cmd_0 = mock.Mock()
            mock_ps_cmd_1 = mock.Mock()
            m_ps_client.poll.side_effect = [mock_ps_cmd_0, mock_ps_cmd_1]
            self.vmm_client.vm_stop_job(task_id_list=[{
                "task_type": "ps",
                "task_id": "0"
            }, {
                "task_type": "ps",
                "task_id": "1"
            }])

        m_ps_client.poll.assert_has_calls([mock.call("0"), mock.call("1")])
        mock_ps_cmd_0.terminate.assert_called_once_with()
        mock_ps_cmd_1.terminate.assert_called_once_with()
        mock_ps_cmd_0.as_ps_task.assert_called_once_with()
        mock_ps_cmd_1.as_ps_task.assert_called_once_with()

    def test_vm_stop_job_unknown_task_type(self):
        with self.assertRaises(ValueError) as ar:
            self.vmm_client.vm_stop_job(task_id_list=[{
                "task_type": "arduous",
                "task_id": "0"
            }])

        self.assertEqual("Unknown task type 'arduous'", str(ar.exception))

    def test_vms_delete_default(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vms_delete(cluster_name="Fake Cluster",
                                       vm_ids=["0", "1"])

        m_ps_client.execute.assert_called_once_with(
            "Remove-VmmVM",
            cluster_name="Fake Cluster",
            json_params="{\"force_delete\": false, \"vm_ids\": [\"0\", \"1\"]}"
        )

    def test_vms_delete_force_delete_true(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.vms_delete(cluster_name="Fake Cluster",
                                       vm_ids=["0", "1"],
                                       force_delete=True)

        m_ps_client.execute.assert_called_once_with(
            "Remove-VmmVM",
            cluster_name="Fake Cluster",
            json_params="{\"force_delete\": true, \"vm_ids\": [\"0\", \"1\"]}")

    def test_create_vm_template(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.create_vm_template("fake_cluster",
                                               "fake_vm_template", "host_id_0",
                                               "/fake/goldimages/path",
                                               "/fake/datastore/path",
                                               "fake_network")

        m_ps_client.execute.assert_called_once_with(
            "Install-VmmVMTemplate",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "vm_name": "fake_vm_template",
                    "vm_host_id": "host_id_0",
                    "goldimage_disk_path": "/fake/goldimages/path",
                    "vm_datastore_path": "/fake/datastore/path",
                    "vmm_network_name": "fake_network",
                    "vcpus": 1,
                    "ram_mb": 1024
                },
                sort_keys=True))

    def test_create_vm(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.create_vm("fake_cluster", "fake_vm_template",
                                      [{
                                          "vm_name": "fake_vm_0",
                                          "node_id": "0"
                                      }, {
                                          "vm_name": "fake_vm_1",
                                          "node_id": "1"
                                      }], "/fake/datastore/path", None, None)

        m_ps_client.execute.assert_called_once_with(
            "New-VmmVM",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "vm_template_name":
                    "fake_vm_template",
                    "vm_host_map": [{
                        "vm_name": "fake_vm_0",
                        "node_id": "0"
                    }, {
                        "vm_name": "fake_vm_1",
                        "node_id": "1"
                    }],
                    "vm_datastore_path":
                    "/fake/datastore/path",
                    "data_disks":
                    None,
                    "differencing_disks_path":
                    None
                },
                sort_keys=True))

    def test_clone_vm(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.clone_vm("fake_cluster", "fake_vm_id_0",
                                     "fake_new_cloned_vm",
                                     "/fake/datastore/path")

        m_ps_client.execute.assert_called_once_with(
            "New-VmmVMClone",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "base_vm_id": "fake_vm_id_0",
                    "vm_name": "fake_new_cloned_vm",
                    "vm_datastore_path": "/fake/datastore/path"
                },
                sort_keys=True))

    def test_upload_image(self):
        self.vmm_client.library_server_share_path = "/fake/library/share/path"
        self.vmm_client.library_server_address = "fake_library_server"
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.upload_image(
                ["/fake/goldimage/path/0", "/fake/goldimage/path/1"],
                "/fake/goldimage/target/directory", "fake_disk_name")

        m_ps_client.execute_async.assert_called_once_with(
            "Install-VmmDiskImage",
            overwriteFiles=False,
            json_params=json.dumps(
                {
                    "vmm_library_server_share":
                    "/fake/library/share/path",
                    "vmm_library_server_user":
                    "******",
                    "vmm_library_server_password":
                    "******",
                    "vmm_library_server":
                    "fake_library_server",
                    "goldimage_disk_list":
                    ["/fake/goldimage/path/0", "/fake/goldimage/path/1"],
                    "goldimage_target_dir":
                    "/fake/goldimage/target/directory",
                    "disk_name":
                    "fake_disk_name",
                    "transfer_type":
                    None
                },
                sort_keys=True))

    def test_convert_to_template(self):
        self.vmm_client.library_server_share_path = "/fake/library/share/path"
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.convert_to_template(cluster_name="Fake Cluster",
                                                target_dir="fake_target_dir",
                                                template_name="fake_template")

        m_ps_client.execute.assert_called_once_with(
            "ConvertTo-Template",
            cluster_name="Fake Cluster",
            json_params="{\"target_dir\": \"fake_target_dir\", "
            "\"template_name\": \"fake_template\", "
            "\"vmm_library_server_share\": \"/fake/library/share/path\"}")

    def test_migrate_vm(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.migrate_vm("fake_cluster", [{
                "vm_name": "fake_vm_0",
                "node_id": "0"
            }, {
                "vm_name": "fake_vm_1",
                "node_id": "1"
            }], "/fake/datastore/path")

        m_ps_client.execute.assert_called_once_with(
            "Move-VmmVM",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "vm_host_map": [{
                        "vm_name": "fake_vm_0",
                        "node_id": "0"
                    }, {
                        "vm_name": "fake_vm_1",
                        "node_id": "1"
                    }],
                    "vm_datastore_path":
                    "/fake/datastore/path",
                },
                sort_keys=True))

    def test_migrate_vm_datastore(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.migrate_vm_datastore(
                "fake_cluster", [{
                    "vm_id": "fake_vm_id_0",
                    "datastore_name": "fake_datastore_0"
                }, {
                    "vm_id": "fake_vm_id_1",
                    "datastore_name": "fake_datastore_1"
                }])

        m_ps_client.execute.assert_called_once_with(
            "Move-VmmVMDatastore",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "vm_datastore_map": [{
                        "vm_id": "fake_vm_id_0",
                        "datastore_name": "fake_datastore_0"
                    }, {
                        "vm_id": "fake_vm_id_1",
                        "datastore_name": "fake_datastore_1"
                    }],
                },
                sort_keys=True))

    def test_clean_vmm(self):
        self.vmm_client.library_server_share_path = "/fake/library/share/path"
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.clean_vmm("fake_cluster", "fake_target_dir",
                                      "fake_datastore_path", "fake_vm_")

        m_ps_client.execute_async.assert_called_once_with(
            "Remove-ClusterVmmObjects",
            cluster_name="fake_cluster",
            json_params=json.dumps(
                {
                    "vmm_library_server_share": "/fake/library/share/path",
                    "target_dir": "fake_target_dir",
                    "vm_datastore_path": "fake_datastore_path",
                    "vm_name_prefix": "fake_vm_"
                },
                sort_keys=True))

    def test_clean_library_server(self):
        self.vmm_client.library_server_share_path = "/fake/library/share/path"
        self.vmm_client.library_server_address = "fake_library_server"
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.clean_library_server("/fake/target/directory",
                                                 "fake_vm_")

        m_ps_client.execute_async.assert_called_once_with(
            "Remove-VmmDiskImages",
            json_params=json.dumps(
                {
                    "vmm_library_server_share": "/fake/library/share/path",
                    "vmm_library_server_user": "******",
                    "vmm_library_server_password": "******",
                    "vmm_library_server": "fake_library_server",
                    "target_dir": "/fake/target/directory",
                    "vm_name_prefix": "fake_vm_"
                },
                sort_keys=True))

    def test_update_library(self):
        with mock.patch.object(self.vmm_client, "ps_client") as m_ps_client:
            self.vmm_client.update_library("/fake/goldimages/path")

        m_ps_client.execute_async.assert_called_once_with(
            "Update-Library",
            json_params="{\"goldimage_disk_path\": \"/fake/goldimages/path\"}",
            num_retries=3)
コード例 #13
0
  def update_cluster_virtual_ip(cluster_pb):
    """
    Updates 'prism_host' to correspond to the cluster virtual IP.

    The 'prism_host' field is set for management and clustering software as
    appropriate for the target hypervisor.

    Returns:
      True if 'prism_host' was updated, else False.

    Raises:
      CurieException<kInvalidParameter>: 'cluster_pb' is a Nutanix cluster but
        does not have a Virtual IP.
    """
    if not cluster_pb.cluster_software_info.HasField("nutanix_info"):
      return False

    prism_proto = None
    ntnx_proto = cluster_pb.cluster_software_info.nutanix_info
    prism_user = ntnx_proto.decrypt_field("prism_user")
    prism_password = ntnx_proto.decrypt_field("prism_password")
    c_uuid = ntnx_proto.cluster_uuid

    if cluster_pb.cluster_management_server_info.HasField("prism_info"):
      prism_proto = cluster_pb.cluster_management_server_info.prism_info
      client = NutanixRestApiClient.from_proto(prism_proto, timeout_secs=10)
      cluster_json = client.clusters_get(cluster_id=c_uuid)
    else:
      cvm_addresses = []
      if cluster_pb.cluster_management_server_info.HasField("vmm_info"):
        vmm_info = cluster_pb.cluster_management_server_info.vmm_info
        vmm_client = VmmClient(address=vmm_info.vmm_server,
                               username=vmm_info.vmm_user,
                               password=vmm_info.vmm_password)
        with vmm_client:
          vms = vmm_client.get_vms(cluster_name=vmm_info.vmm_cluster_name)
          for vm in vms:
            if VmmClient.is_nutanix_cvm(vm):
              if VmmClient.is_powered_on(vm):
                log.debug("Found CVM '%s' with IPs: %s", vm["name"], vm["ips"])
                cvm_addresses.extend(vm["ips"])
              else:
                log.debug("Skipping CVM '%s' because it is not powered on.", vm["name"])
      else:
        node_ids = [node.id for node in cluster_pb.cluster_nodes]
        # NB: We currently have an asymmetrical input for Prism credentials
        # depending on whether they're considered as management software or
        # clustering software. In the latter case, which is when 'nutanix_info'
        # is set but not 'prism_info', the user is not asked for a Prism host.
        # In this case, we discover CVMs via vCenter, attempt to connect to Prism
        # on each in sequence until successful, and then query the virtual IP.
        mgmt_info = cluster_pb.cluster_management_server_info.vcenter_info
        with VsphereVcenter.from_proto(mgmt_info) as vcenter:
          vim_dc = vcenter.lookup_datacenter(mgmt_info.vcenter_datacenter_name)
          vim_cluster = vcenter.lookup_cluster(vim_dc,
                                               mgmt_info.vcenter_cluster_name)
          for vim_cvm in (vm for vm in vcenter.lookup_vms(vim_cluster)
                          if vcenter.vim_vm_is_nutanix_cvm(vm)):
            vim_host = get_optional_vim_attr(vim_cvm.runtime, "host")
            if vim_host:
              if vim_host.name in node_ids:
                cvm_address = vcenter.get_vim_vm_ip_address(vim_cvm)
                if cvm_address:
                  log.debug("Found CVM '%s' with address '%s'" %
                            (vim_cvm.name, cvm_address))
                  cvm_addresses.append(cvm_address)
              else:
                log.debug("Skipping CVM '%s'; Host '%s' is not in the "
                          "metadata" % (vim_cvm.name, vim_host.name))
      # We run Nutanix API only against powered on CVMs.
      if not cvm_addresses:
        raise CurieTestException(
          cause="No Nutanix CVMs found.",
          impact="The cluster virtual IP address can not be discovered.",
          corrective_action="Please verify that the cluster contains Nutanix "
                            "CVMs, and that they are powered on.",
        )
      for cvm_address in cvm_addresses:
        client = NutanixRestApiClient(cvm_address, prism_user, prism_password)
        try:
          cluster_json = client.clusters_get(cluster_id=c_uuid, max_retries=3)
        except BaseException:
          log.warning("Unable to query CVM with IP '%s'",
                      cvm_address, exc_info=True)
        else:
          break
      else:
        raise CurieTestException(
          cause="Failed to query Prism on any Nutanix CVM.",
          impact="The cluster virtual IP address can not be discovered.",
          corrective_action="Please verify that the Nutanix CVMs on the "
                            "cluster are powered on, and that the network "
                            "connectivity to the CVMs is correct.",
        )

    if "clusterExternalIPAddress" in cluster_json:
      cluster_name = cluster_json.get("name")
      cluster_vip = cluster_json["clusterExternalIPAddress"]
    elif "entities" in cluster_json:
      cluster_data = cluster_json["entities"][0]
      cluster_name = cluster_data.get("name")
      cluster_vip = cluster_data["clusterExternalIPAddress"]
    else:
      raise CurieException(
        CurieError.kInvalidParameter,
        "Unrecognized response from NutanixRestApiClient.clusters_get")
    if not cluster_vip:
      raise CurieException(
        CurieError.kInvalidParameter,
        "Cluster '%s' does not appear to be configured with a virtual IP "
        "(received '%s')" % (cluster_name, cluster_vip))
    else:
      log.debug("Identified Nutanix cluster virtual IP address: '%s'",
                cluster_vip)
    ntnx_proto.prism_host = cluster_vip
    if prism_proto:
      prism_proto.prism_host = cluster_vip
    return True