Ejemplo n.º 1
0
def init_datastoreCache(force=False):
    """
    Initializes the datastore cache with the list of datastores accessible
    from local ESX host. force=True will force it to ignore current cache
    and force init
    """
    with lockManager.get_lock("init_datastoreCache"):
        global datastores
        logging.debug("init_datastoreCache:  %s", datastores)
        if datastores and not force:
            return

        si = vmdk_ops.get_si()

        #  We are connected to ESX so childEntity[0] is current DC/Host
        ds_objects = si.content.rootFolder.childEntity[
            0].datastoreFolder.childEntity
        tmp_ds = []

        for datastore in ds_objects:
            dockvols_path, err = vmdk_ops.get_vol_path(
                datastore=datastore.info.name, create=False)
            if err:
                logging.error(
                    " datastore %s is being ignored as the dockvol path can't be created on it",
                    datastore.info.name)
                continue
            tmp_ds.append(
                (datastore.info.name, datastore.info.url, dockvols_path))
        datastores = tmp_ds
Ejemplo n.º 2
0
    def testAttachDetach(self):
        logging.debug("Start VMDKAttachDetachTest")
        si = vmdk_ops.get_si()
        #find test_vm
        vm = [d for d in si.content.rootFolder.childEntity[0].vmFolder.childEntity 
              if d.config.name == self.vm_name]
        self.assertNotEqual(None, vm)

        # attach max_vol_count disks
        for id in range(1, self.max_vol_count+1):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            ret = vmdk_ops.disk_attach(vmdk_path=fullpath,
                                       vm=vm[0])
            self.assertFalse("Error" in ret)

        # attach one more disk, which should fail    
        volName = 'VmdkAttachDetachTestVol' + str(self.max_vol_count+1)
        fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
        ret = vmdk_ops.disk_attach(vmdk_path=fullpath,
                                   vm=vm[0])
        self.assertTrue("Error" in ret)

        # detach all the attached disks
        for id in range(1, self.max_vol_count+1):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            ret = vmdk_ops.disk_detach(vmdk_path=fullpath,
                                       vm=vm[0])
            self.assertTrue(ret is None)
def get_vm_name_by_uuid(vm_uuid):
    """ Returns vm_name for given vm_uuid, or None """
    si = vmdk_ops.get_si()
    try:
        return vmdk_ops.vm_uuid2name(vm_uuid)
    except:
        return None
Ejemplo n.º 4
0
    def setUp(self):
        """ Setup run before each test """
        logging.debug("VMDKAttachDetachTest setUp path =%s", path)
        self.cleanup()

        if (not self.datastore_name):
            datastores = vmdk_utils.get_datastores()
            datastore = datastores[0]
            if (not datastore):
                logging.error("Cannot find a valid datastore")
                self.assertFalse(True)
            self.datastore_name = datastore[0]
            self.datastore_path = datastore[2]
            logging.debug("datastore_name=%s datastore_path=%s", self.datastore_name,
                                                                 self.datastore_path)   
        
        # get service_instance, and create a VM
        si = vmdk_ops.get_si()
        self.create_vm(si, self.vm_name, self.datastore_name)

        # create max_vol_count+1 VMDK files
        for id in range(1, self.max_vol_count+2):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            self.assertEqual(None,
                                vmdk_ops.createVMDK(vm_name=self.vm_name,
                                                    vmdk_path=fullpath,
                                                    vol_name=volName))
Ejemplo n.º 5
0
    def testAttachDetach(self):
        logging.debug("Start VMDKAttachDetachTest")
        si = vmdk_ops.get_si()
        #find test_vm
        vm = [
            d
            for d in si.content.rootFolder.childEntity[0].vmFolder.childEntity
            if d.config.name == self.vm_name
        ]
        self.assertNotEqual(None, vm)

        # attach max_vol_count disks
        for id in range(1, self.max_vol_count + 1):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            ret = vmdk_ops.disk_attach(vmdk_path=fullpath, vm=vm[0])
            self.assertFalse("Error" in ret)

        # attach one more disk, which should fail
        volName = 'VmdkAttachDetachTestVol' + str(self.max_vol_count + 1)
        fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
        ret = vmdk_ops.disk_attach(vmdk_path=fullpath, vm=vm[0])
        self.assertTrue("Error" in ret)

        # detach all the attached disks
        for id in range(1, self.max_vol_count + 1):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            ret = vmdk_ops.disk_detach(vmdk_path=fullpath, vm=vm[0])
            self.assertTrue(ret is None)
Ejemplo n.º 6
0
def connect_to_vcs(host="localhost", port=443):
    """
    Connect to VCS - currently utilizing VSAN mgmt service on ESX (/vsan) - and return SOAP stub
    """

    si = vmdk_ops.get_si()
    # pylint: disable=no-member
    hostSystem = pyVim.host.GetHostSystem(si)
    token = hostSystem.configManager.vsanSystem.FetchVsanSharedSecret()
    version = pyVmomi.VmomiSupport.newestVersions.Get("vim")
    stub = pyVmomi.SoapStubAdapter(host=host,
                                   port=port,
                                   version=version,
                                   path="/vsan",
                                   poolSize=0)
    vpm = vim.cluster.VsanPerformanceManager("vsan-performance-manager", stub)

    # Disable certificate check during SSL communication
    disable_certificate_check()

    logged_in = vpm.Login(token)
    if not logged_in:
        print("Failed to get sims stub for host %s" % host)
        raise OSError("Failed to login to VSAN mgmt server")

    return stub
Ejemplo n.º 7
0
    def setUp(self):
        """ Setup run before each test """
        logging.debug("VMDKAttachDetachTest setUp path =%s", path)
        self.cleanup()

        if (not self.datastore_name):
            datastores = vmdk_utils.get_datastores()
            datastore = datastores[0]
            if (not datastore):
                logging.error("Cannot find a valid datastore")
                self.assertFalse(True)
            self.datastore_name = datastore[0]
            self.datastore_path = datastore[2]
            logging.debug("datastore_name=%s datastore_path=%s",
                          self.datastore_name, self.datastore_path)

        # get service_instance, and create a VM
        si = vmdk_ops.get_si()
        self.create_vm(si, self.vm_name, self.datastore_name)

        # create max_vol_count+1 VMDK files
        for id in range(1, self.max_vol_count + 2):
            volName = 'VmdkAttachDetachTestVol' + str(id)
            fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
            self.assertEqual(
                None,
                vmdk_ops.createVMDK(vm_name=self.vm_name,
                                    vmdk_path=fullpath,
                                    vol_name=volName))
Ejemplo n.º 8
0
def init_datastoreCache(force=False):
    """
    Initializes the datastore cache with the list of datastores accessible
    from local ESX host. force=True will force it to ignore current cache
    and force init
    """
    with lockManager.get_lock("init_datastoreCache"):
        global datastores
        logging.debug("init_datastoreCache:  %s", datastores)
        if datastores and not force:
            return

        si = vmdk_ops.get_si()

        #  We are connected to ESX so childEntity[0] is current DC/Host
        ds_objects = si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
        tmp_ds = []

        for datastore in ds_objects:
            dockvols_path, err = vmdk_ops.get_vol_path(datastore=datastore.info.name, create=False)
            if err:
                logging.error(" datastore %s is being ignored as the dockvol path can't be created on it", datastore.info.name)
                continue
            tmp_ds.append((datastore.info.name,
                           datastore.info.url,
                           dockvols_path))
        datastores = tmp_ds
Ejemplo n.º 9
0
def connect_to_vcs(host="localhost", port=443):
    """
    Connect to VCS - currently utilizing VSAN mgmt service on ESX (/vsan) - and return SOAP stub
    """

    si = vmdk_ops.get_si()
    # pylint: disable=no-member
    hostSystem = pyVim.host.GetHostSystem(si)
    token = hostSystem.configManager.vsanSystem.FetchVsanSharedSecret()
    version = pyVmomi.VmomiSupport.newestVersions.Get("vim")
    stub = pyVmomi.SoapStubAdapter(host=host,
                                   port=port,
                                   version=version,
                                   path="/vsan",
                                   poolSize=0)
    vpm = vim.cluster.VsanPerformanceManager("vsan-performance-manager", stub)

    # Disable certificate check during SSL communication
    disable_certificate_check()

    logged_in = vpm.Login(token)
    if not logged_in:
        print("Failed to get sims stub for host %s" % host)
        raise OSError("Failed to login to VSAN mgmt server")

    return stub
Ejemplo n.º 10
0
def get_vm_name_by_uuid(vm_uuid):
    """ Returns vm_name for given vm_uuid, or None """
    si = vmdk_ops.get_si()
    try:
        vm = [d for d in si.content.rootFolder.childEntity[0].vmFolder.childEntity if d.config.uuid == vm_uuid]
        return vm[0].config.name
    except:
        return None
Ejemplo n.º 11
0
def get_vsan_datastore():
    """Returns Datastore management object for vsanDatastore, or None"""
    si = vmdk_ops.get_si()
    stores = si.content.rootFolder.childEntity[0].datastore
    try:
        return [d for d in stores if d.summary.type == "vsan"][0]
    except:
        return None
Ejemplo n.º 12
0
def get_vm_uuid_by_name(vm_name):
    """ Returns vm_uuid for given vm_name, or None """
    si = vmdk_ops.get_si()
    try:
        vm = FindChild(GetVmFolder(), vm_name)
        return vm.config.uuid
    except:
        return None
def get_vm_uuid_by_name(vm_name):
    """ Returns vm_uuid for given vm_name, or None """
    si = vmdk_ops.get_si()
    try:
        vm = FindChild(GetVmFolder(), vm_name)
        return vm.config.uuid
    except:
        return None
Ejemplo n.º 14
0
def get_vsan_datastore():
    """Returns Datastore management object for vsanDatastore, or None"""
    si = vmdk_ops.get_si()
    stores = si.content.rootFolder.childEntity[0].datastore
    try:
        return [d for d in stores if d.summary.type == "vsan"][0]
    except:
        return None
 def setUp(self):
     """create a vmdk before each test (method) in this class"""
     si = vmdk_ops.get_si()
     # create VMDK
     err = vmdk_ops.createVMDK(vmdk_path=self.VMDK_PATH,
                               vm_name=self.VM_NAME,
                               vol_name="test_policy_vol")
     self.assertEqual(err, None, err)
Ejemplo n.º 16
0
def get_vm_uuid_by_name(vm_name):
    """Returns vm_uuid for given vm_name, or None"""
    si = vmdk_ops.get_si()
    try:
        vm = [d for d in si.content.rootFolder.childEntity[0].vmFolder.childEntity if d.config.name == vm_name]
        return vm[0].config.uuid
    except:
        return None
    def cleanup(self):
        # cleanup existing tenants
        test_utils.cleanup_tenant(self.tenant1_name)
        test_utils.cleanup_tenant(self.tenant1_new_name)

        # remove VM
        si = vmdk_ops.get_si()
        test_utils.remove_vm(si, self.vm1)
        test_utils.remove_vm(si, self.vm2)
Ejemplo n.º 18
0
    def cleanup(self):
        # remove VM
        si = vmdk_ops.get_si()
        self.remove_vm(si, self.vm_name)

        for v in self.get_testvols():
            self.assertEqual(
                None,
                vmdk_ops.removeVMDK(os.path.join(v['path'], v['filename'])))
Ejemplo n.º 19
0
    def cleanup(self):
        # remove VM
        si = vmdk_ops.get_si()
        self.remove_vm(si, self.vm_name)

        for v in self.get_testvols():
            self.assertEqual(
                None,
                vmdk_ops.removeVMDK(os.path.join(v['path'], v['filename'])))
Ejemplo n.º 20
0
    def cleanup(self):
        # cleanup existing tenants
        test_utils.cleanup_tenant(self.tenant1_name)
        test_utils.cleanup_tenant(self.tenant1_new_name)

        # remove VM
        si = vmdk_ops.get_si()
        test_utils.remove_vm(si, self.vm1)
        test_utils.remove_vm(si, self.vm2)
Ejemplo n.º 21
0
def get_vm_name_by_uuid(vm_uuid):
    """
    Returns vm_name for given vm_uuid, or None
    TODO: Need to refactor further (can be a redundant method)
    """
    si = vmdk_ops.get_si()
    try:
        return vmdk_ops.vm_uuid2name(vm_uuid)
    except:
        return None
Ejemplo n.º 22
0
def get_vm_name_by_uuid(vm_uuid):
    """
    Returns vm_name for given vm_uuid, or None
    TODO: Need to refactor further (can be a redundant method)
    """
    si = vmdk_ops.get_si()
    try:
        return vmdk_ops.vm_uuid2name(vm_uuid)
    except:
        return None
    def cleanup(self):
        # cleanup existing tenant
        error_info = auth_api._tenant_rm(name=self.tenant1_name,
                                         remove_volumes=True)

        error_info = auth_api._tenant_rm(name=self.tenant1_new_name,
                                         remove_volumes=True)

        # remove VM
        si = vmdk_ops.get_si()
        vmdk_ops_test.remove_vm(si, self.vm1)
        vmdk_ops_test.remove_vm(si, self.vm2)
Ejemplo n.º 24
0
    def create_vms(cls):
        si = vmdk_ops.get_si()
        error, cls.vm1 = vmdk_ops_test.create_vm(si=si,
                                                 vm_name=cls.vm1_name,
                                                 datastore_name=cls.datastore)
        if error:
            cls.fail("Failed to create VM1!")

        error, cls.vm2 = vmdk_ops_test.create_vm(si=si,
                                                 vm_name=cls.vm2_name,
                                                 datastore_name=cls.datastore)
        if error:
            cls.fail("Failed to create VM2!")
Ejemplo n.º 25
0
    def create_vms(cls):
        si = vmdk_ops.get_si()
        error, cls.vm1 = vmdk_ops_test.create_vm(si=si,
                                                 vm_name=cls.vm1_name,
                                                 datastore_name=cls.datastore)
        if error:
            cls.fail("Failed to create VM1!")

        error, cls.vm2 = vmdk_ops_test.create_vm(si=si,
                                                 vm_name=cls.vm2_name,
                                                 datastore_name=cls.datastore)
        if error:
            cls.fail("Failed to create VM2!")
Ejemplo n.º 26
0
def init_datastoreCache():
    """
    Initializes the datastore cache with the list of datastores accessible from local ESX host.
    """
    global datastores
    logging.debug("init_datastoreCache: %s", datastores)

    si = vmdk_ops.get_si()

    #  We are connected to ESX so childEntity[0] is current DC/Host
    ds_objects = \
      si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
    datastores = [(d.info.name, os.path.split(d.info.url)[1],
                   os.path.join(d.info.url, 'dockvols')) for d in ds_objects]
Ejemplo n.º 27
0
def get_propertycollector():
    """
    Connect to hostd. If failed, retry.
    Create the property collector with filter to monitor VM power state changes
    Return the property collecter and error (if any)
    """

    si = vmdk_ops.get_si()

    reconnect_interval = HOSTD_RECONNECT_INTERVAL
    for i in range(HOSTD_RECONNECT_ATTEMPT):
        if si:
            break

        # If hostd is not up yet, sleep for a while and try again
        logging.warn("VMChangeListener couldn't connect to hostd.")
        logging.warn("Retrying after %s seconds", reconnect_interval)
        time.sleep(reconnect_interval)
        si = vmdk_ops.get_si()

        # exponential backoff for next retry
        reconnect_interval += reconnect_interval

    # Proceed further only after you get si instance
    if not si:
        # could not connect to hostd even after retries
        # Something is seriously wrong
        return None, "Unable to connect to hostd. Verify that vmware-hostd is running."

    pc = si.content.propertyCollector
    err_msg = create_vm_powerstate_filter(pc, si.content.rootFolder)
    if err_msg:
        # Retrying connection to hostd won't make this error go away. Returning.
        return None, err_msg

    return pc, None
Ejemplo n.º 28
0
def get_propertycollector():
    """
    Connect to hostd. If failed, retry.
    Create the property collector with filter to monitor VM power state changes
    Return the property collecter and error (if any)
    """

    si = vmdk_ops.get_si()

    reconnect_interval = HOSTD_RECONNECT_INTERVAL
    for i in range(HOSTD_RECONNECT_ATTEMPT):
        if si:
            break

        # If hostd is not up yet, sleep for a while and try again
        logging.warn("VMChangeListener couldn't connect to hostd.")
        logging.warn("Retrying after %s seconds", reconnect_interval)
        time.sleep(reconnect_interval)
        si = vmdk_ops.get_si()

        # exponential backoff for next retry
        reconnect_interval += reconnect_interval

    # Proceed further only after you get si instance
    if not si:
        # could not connect to hostd even after retries
        # Something is seriously wrong
        return None, "Unable to connect to hostd. Verify that vmware-hostd is running."

    pc = si.content.propertyCollector
    err_msg = create_vm_powerstate_filter(pc, si.content.rootFolder)
    if err_msg:
        # Retrying connection to hostd won't make this error go away. Returning.
        return None, err_msg

    return pc, None
Ejemplo n.º 29
0
def get_vm_config_path(vm_name):
    """Returns vm_uuid for given vm_name, or None """
    si = vmdk_ops.get_si()
    try:
        vm = FindChild(GetVmFolder(), vm_name)
        config_path = vm.summary.config.vmPathName
    except:
        return None

    # config path has the format like this "[datastore1] test_vm1/test_vm1/test_vm1.vmx"
    datastore, path = config_path.split()
    datastore = datastore[1:-1]
    datastore_path = os.path.join("/vmfs/volumes/", datastore)
    # datastore_path has the format like this /vmfs/volumes/datastore_name
    vm_config_path = os.path.join(datastore_path, path)
    return vm_config_path
Ejemplo n.º 30
0
def init_datastoreCache():
    """
    Initializes the datastore cache with the list of datastores accessible from local ESX host.
    """
    global datastores
    logging.debug("init_datastoreCache: %s", datastores)

    si = vmdk_ops.get_si()

    #  We are connected to ESX so childEntity[0] is current DC/Host
    ds_objects = \
      si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
    datastores = [(d.info.name,
                   os.path.split(d.info.url)[1],
                   os.path.join(d.info.url, 'dockvols'))
                  for d in ds_objects]
Ejemplo n.º 31
0
def get_vm_config_path(vm_name):
    """Returns vm_uuid for given vm_name, or None """
    si = vmdk_ops.get_si()
    try:
        vm = [d for d in si.content.rootFolder.childEntity[0].vmFolder.childEntity if d.config.name == vm_name]
        config_path = vm[0].summary.config.vmPathName
    except:
        return None

     # config path has the format like this "[datastore1] test_vm1/test_vm1/test_vm1.vmx"
    datastore, path = config_path.split()
    datastore = datastore[1:-1]
    datastore_path = os.path.join("/vmfs/volumes/", datastore)
    # datastore_path has the format like this /vmfs/volumes/datastore_name
    vm_config_path = os.path.join(datastore_path, path)
    return vm_config_path
def get_vm_config_path(vm_name):
    """Returns vm_uuid for given vm_name, or None """
    si = vmdk_ops.get_si()
    try:
        vm = FindChild(GetVmFolder(), vm_name)
        config_path = vm.summary.config.vmPathName
    except:
        return None

    # config path has the format like this "[datastore1] test_vm1/test_vm1/test_vm1.vmx"
    datastore, path = config_path.split()
    datastore = datastore[1:-1]
    datastore_path = os.path.join("/vmfs/volumes/", datastore)
    # datastore_path has the format like this /vmfs/volumes/datastore_name
    vm_config_path = os.path.join(datastore_path, path)
    return vm_config_path
def init_datastoreCache():
    """
    Initializes the datastore cache with the list of datastores accessible from local ESX host.
    """
    global datastores
    logging.debug("init_datastoreCache: %s", datastores)

    si = vmdk_ops.get_si()

    #  We are connected to ESX so childEntity[0] is current DC/Host
    ds_objects = \
      si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
    datastores = []

    for datastore in ds_objects:
        dockvols_path, err = vmdk_ops.get_vol_path(datastore.info.name)
        if err:
            continue
        datastores.append(
            (datastore.info.name, datastore.info.url, dockvols_path))
    def setUp(self):
        """ Setup run before each test """

        if (not self.datastore_name):
            datastores = vmdk_utils.get_datastores()
            if datastores:
                datastore = datastores[0]
                self.datastore_name = datastore[0]
                self.datastore_path = datastore[2]

                if len(datastores) > 1:
                    datastore1 = datastores[1]
                    self.datastore1_name = datastore1[0]
                    self.datastoer1_path = datastore[2]

            else:

                self.assertFalse(True)

        self.cleanup()
        # get service_instance, and create VMs
        si = vmdk_ops.get_si()
        error, self.vm1 = vmdk_ops_test.create_vm(si=si,
                                    vm_name=self.vm1_name,
                                    datastore_name=self.datastore_name)
        if error:
            self.assertFalse(True)

        self.vm1_config_path = vmdk_utils.get_vm_config_path(self.vm1_name)

        logging.info("TestTenant: create vm1 name=%s Done", self.vm1_name)

        error, self.vm2 = vmdk_ops_test.create_vm(si=si,
                                    vm_name=self.vm2_name,
                                    datastore_name=self.datastore_name)
        if error:
            self.assertFalse(True)
        self.vm2_config_path = vmdk_utils.get_vm_config_path(self.vm2_name)

        logging.info("TestTenant: create vm2 name=%s Done", self.vm2_name)
    def setUp(self):
        """ Setup run before each test """

        if (not self.datastore_name):
            datastores = vmdk_utils.get_datastores()
            if datastores:
                datastore = datastores[0]
                self.datastore_name = datastore[0]
                self.datastore_path = datastore[2]

                if len(datastores) > 1:
                    datastore1 = datastores[1]
                    self.datastore1_name = datastore1[0]
                    self.datastoer1_path = datastore[2]

            else:
                self.assertFalse(True)

        self.cleanup()
        # get service_instance, and create VMs
        si = vmdk_ops.get_si()
        error, self.vm1 = test_utils.create_vm(si=si,
                                    vm_name=self.vm1_name,
                                    datastore_name=self.datastore_name)
        if error:
            self.assertFalse(True)

        self.vm1_config_path = vmdk_utils.get_vm_config_path(self.vm1_name)

        logging.info("TestTenant: create vm1 name=%s Done", self.vm1_name)

        error, self.vm2 = test_utils.create_vm(si=si,
                                    vm_name=self.vm2_name,
                                    datastore_name=self.datastore_name)
        if error:
            self.assertFalse(True)
        self.vm2_config_path = vmdk_utils.get_vm_config_path(self.vm2_name)

        logging.info("TestTenant: create vm2 name=%s Done", self.vm2_name)
def get_datastore_objects():
    """ return all datastore objects """
    si = vmdk_ops.get_si()
    return si.content.rootFolder.childEntity[0].datastore
Ejemplo n.º 37
0
 def cleanup_vms(cls):
     si = vmdk_ops.get_si()
     vmdk_ops_test.remove_vm(si, cls.vm1)
     vmdk_ops_test.remove_vm(si, cls.vm2)
Ejemplo n.º 38
0
def get_datastore_objects():
    """ return all datastore objects """
    si = vmdk_ops.get_si()
    return si.content.rootFolder.childEntity[0].datastore
Ejemplo n.º 39
0
 def cleanup_vms(cls):
     si = vmdk_ops.get_si()
     vmdk_ops_test.remove_vm(si, cls.vm1)
     vmdk_ops_test.remove_vm(si, cls.vm2)