def sync_with_hypervisor(vmachineguid, storagedriver_id=None): """ Updates a given vmachine with data retrieved from a given pmachine :param vmachineguid: Guid of the virtual machine :param storagedriver_id: Storage Driver hosting the vmachine """ try: vmachine = VMachine(vmachineguid) except Exception as ex: VMachineController._logger.info('Cannot get VMachine object: {0}'.format(str(ex))) raise vm_object = None if vmachine.pmachine.mgmtcenter and storagedriver_id is not None and vmachine.devicename is not None: try: mgmt_center = Factory.get_mgmtcenter(vmachine.pmachine) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) VMachineController._logger.info('Syncing vMachine (name {0}) with Management center {1}'.format(vmachine.name, vmachine.pmachine.mgmtcenter.name)) vm_object = mgmt_center.get_vm_agnostic_object(devicename=vmachine.devicename, ip=storagedriver.storage_ip, mountpoint=storagedriver.mountpoint) except Exception as ex: VMachineController._logger.info('Error while fetching vMachine info from management center: {0}'.format(str(ex))) if vm_object is None and storagedriver_id is None and vmachine.hypervisor_id is not None and vmachine.pmachine is not None: try: # Only the vmachine was received, so base the sync on hypervisor id and pmachine hypervisor = Factory.get(vmachine.pmachine) VMachineController._logger.info('Syncing vMachine (name {0})'.format(vmachine.name)) vm_object = hypervisor.get_vm_agnostic_object(vmid=vmachine.hypervisor_id) except Exception as ex: VMachineController._logger.info('Error while fetching vMachine info from hypervisor: {0}'.format(str(ex))) if vm_object is None and storagedriver_id is not None and vmachine.devicename is not None: try: # Storage Driver id was given, using the devicename instead (to allow hypervisor id updates # which can be caused by re-adding a vm to the inventory) pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) if not hypervisor.file_exists(storagedriver, hypervisor.clean_vmachine_filename(vmachine.devicename)): return vmachine.pmachine = pmachine vmachine.save() VMachineController._logger.info('Syncing vMachine (device {0}, ip {1}, mountpoint {2})'.format(vmachine.devicename, storagedriver.storage_ip, storagedriver.mountpoint)) vm_object = hypervisor.get_vm_object_by_devicename(devicename=vmachine.devicename, ip=storagedriver.storage_ip, mountpoint=storagedriver.mountpoint) except Exception as ex: VMachineController._logger.info('Error while fetching vMachine info from hypervisor using devicename: {0}'.format(str(ex))) if vm_object is None: message = 'Not enough information to sync vmachine' VMachineController._logger.info('Error: {0}'.format(message)) raise RuntimeError(message) VMachineController.update_vmachine_config(vmachine, vm_object)
def update_status(storagedriver_id): """ Sets Storage Driver offline in case hypervisor management Center reports the hypervisor pmachine related to this Storage Driver as unavailable. :param storagedriver_id: ID of the storagedriver to update its status """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) storagerouter = storagedriver.storagerouter if pmachine.mgmtcenter: # Update status pmachine.invalidate_dynamics(['host_status']) else: # No management Center, cannot update status via api logger.info('Updating status of pmachine {0} using SSHClient'.format(pmachine.name)) host_status = 'RUNNING' try: client = SSHClient(storagerouter, username='******') configuration_dir = EtcdConfiguration.get('/ovs/framework/paths|cfgdir') logger.info('SSHClient connected successfully to {0} at {1}'.format(pmachine.name, client.ip)) with Remote(client.ip, [LocalStorageRouterClient]) as remote: lsrc = remote.LocalStorageRouterClient('{0}/storagedriver/storagedriver/{1}.json'.format(configuration_dir, storagedriver.vpool.name)) lsrc.server_revision() logger.info('LocalStorageRouterClient connected successfully to {0} at {1}'.format(pmachine.name, client.ip)) except Exception as ex: logger.error('Connectivity check failed, assuming host {0} is halted. {1}'.format(pmachine.name, ex)) host_status = 'HALTED' if host_status != 'RUNNING': # Host is stopped storagedriver_client = StorageDriverClient.load(storagedriver.vpool) storagedriver_client.mark_node_offline(str(storagedriver.storagedriver_id))
def resize_from_voldrv(volumename, volumesize, volumepath, storagedriver_id): """ Resize a disk Triggered by volumedriver messages on the queue @param volumepath: path on hypervisor to the volume @param volumename: volume id of the disk @param volumesize: size of the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) hypervisor = Factory.get(pmachine) volumepath = hypervisor.clean_backing_disk_filename(volumepath) mutex = VolatileMutex('{}_{}'.format(volumename, volumepath)) try: mutex.acquire(wait=30) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is None: disk = VDiskList.get_by_devicename_and_vpool( volumepath, storagedriver.vpool) if disk is None: disk = VDisk() finally: mutex.release() disk.devicename = volumepath disk.volume_id = volumename disk.size = volumesize disk.vpool = storagedriver.vpool disk.save()
def get_pmachine_by_ip(ip): """ Retrieve Physical Machine object based on its IP :param ip: IP of the Physical Machine :return: Physical Machine DAL object """ return PMachineList.get_by_ip(ip=ip)
def delete_from_voldrv(name, storagedriver_id): """ This method will delete a vmachine based on the name of the vmx given """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) name = hypervisor.clean_vmachine_filename(name) if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) vpool = storagedriver.vpool else: vpool = None vm = VMachineList.get_by_devicename_and_vpool(name, vpool) if vm is not None: MessageController.fire(MessageController.Type.EVENT, { 'type': 'vmachine_deleted', 'metadata': { 'name': vm.name } }) vm.delete(abandon=['vdisks'])
def delete_from_voldrv(volumename, storagedriver_id): """ Delete a disk Triggered by volumedriver messages on the queue @param volumename: volume id of the disk """ _ = storagedriver_id # For logging purposes disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is not None: mutex = VolatileMutex('{}_{}'.format(volumename, disk.devicename)) try: mutex.acquire(wait=20) pmachine = None try: pmachine = PMachineList.get_by_storagedriver_id(disk.storagedriver_id) except RuntimeError as ex: if 'could not be found' not in str(ex): raise # else: pmachine can't be loaded, because the volumedriver doesn't know about it anymore if pmachine is not None: limit = 5 hypervisor = Factory.get(pmachine) exists = hypervisor.file_exists(disk.vpool, disk.devicename) while limit > 0 and exists is True: time.sleep(1) exists = hypervisor.file_exists(disk.vpool, disk.devicename) limit -= 1 if exists is True: logger.info('Disk {0} still exists, ignoring delete'.format(disk.devicename)) return logger.info('Delete disk {}'.format(disk.name)) disk.delete() finally: mutex.release()
def resize_from_voldrv(volumename, volumesize, volumepath, storagedriver_id): """ Resize a disk Triggered by volumedriver messages on the queue @param volumepath: path on hypervisor to the volume @param volumename: volume id of the disk @param volumesize: size of the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) volumepath = hypervisor.clean_backing_disk_filename(volumepath) mutex = VolatileMutex('{}_{}'.format(volumename, volumepath)) try: mutex.acquire(wait=30) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is None: disk = VDiskList.get_by_devicename_and_vpool(volumepath, storagedriver.vpool) if disk is None: disk = VDisk() finally: mutex.release() disk.devicename = volumepath disk.volume_id = volumename disk.size = volumesize disk.vpool = storagedriver.vpool disk.save() VDiskController.sync_with_mgmtcenter(disk, pmachine, storagedriver) MDSServiceController.ensure_safety(disk)
def _find_ovs_model_pmachine_guid_by_hostname(self, hostname): """Find OVS pmachine guid based on storagerouter name :return guid: GUID """ hostname = self._get_real_hostname(hostname) LOG.debug("[_FIND OVS PMACHINE] Hostname %s" % (hostname)) mapping = [(pm.guid, str(sr.name)) for pm in PMachineList.get_pmachines() for sr in pm.storagerouters] for item in mapping: if item[1] == str(hostname): msg = "Found pmachineguid %s for Hostname %s" LOG.info(msg % (item[0], hostname)) return item[0] raise RuntimeError("No PMachine guid found for Hostname %s" % hostname)
def sync_with_hypervisor(vmachineguid, storagedriver_id=None): """ Updates a given vmachine with data retreived from a given pmachine """ try: vmachine = VMachine(vmachineguid) if storagedriver_id is None and vmachine.hypervisor_id is not None and vmachine.pmachine is not None: # Only the vmachine was received, so base the sync on hypervisorid and pmachine hypervisor = Factory.get(vmachine.pmachine) logger.info('Syncing vMachine (name {})'.format(vmachine.name)) vm_object = hypervisor.get_vm_agnostic_object( vmid=vmachine.hypervisor_id) elif storagedriver_id is not None and vmachine.devicename is not None: # Storage Driver id was given, using the devicename instead (to allow hypervisorid updates # which can be caused by re-adding a vm to the inventory) pmachine = PMachineList.get_by_storagedriver_id( storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) hypervisor = Factory.get(pmachine) if not hypervisor.file_exists( vmachine.vpool, hypervisor.clean_vmachine_filename( vmachine.devicename)): return vmachine.pmachine = pmachine vmachine.save() logger.info( 'Syncing vMachine (device {}, ip {}, mtpt {})'.format( vmachine.devicename, storagedriver.storage_ip, storagedriver.mountpoint)) vm_object = hypervisor.get_vm_object_by_devicename( devicename=vmachine.devicename, ip=storagedriver.storage_ip, mountpoint=storagedriver.mountpoint) else: message = 'Not enough information to sync vmachine' logger.info('Error: {0}'.format(message)) raise RuntimeError(message) except Exception as ex: logger.info('Error while fetching vMachine info: {0}'.format( str(ex))) raise if vm_object is None: message = 'Could not retreive hypervisor vmachine object' logger.info('Error: {0}'.format(message)) raise RuntimeError(message) else: VMachineController.update_vmachine_config(vmachine, vm_object)
def _find_ovs_model_pmachine_guid_by_hostname(self, hostname): """Find OVS pmachine guid based on storagerouter name :return guid: GUID """ hostname = self._get_real_hostname(hostname) LOG.debug('[_FIND OVS PMACHINE] Hostname %s' % (hostname)) mapping = [(pm.guid, str(sr.name)) for pm in PMachineList.get_pmachines() for sr in pm.storagerouters] for item in mapping: if item[1] == str(hostname): msg = 'Found pmachineguid %s for Hostname %s' LOG.info(msg % (item[0], hostname)) return item[0] raise RuntimeError('No PMachine guid found for Hostname %s' % hostname)
def rename_from_voldrv(old_name, new_name, storagedriver_id): """ This machine will handle the rename of a vmx file :param old_name: Old name of vmx :param new_name: New name for the vmx :param storagedriver_id: Storage Driver hosting the vmachine """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) vpool = storagedriver.vpool else: vpool = None old_name = hypervisor.clean_vmachine_filename(old_name) new_name = hypervisor.clean_vmachine_filename(new_name) scenario = hypervisor.get_rename_scenario(old_name, new_name) if scenario == 'RENAME': # Most likely a change from path. Updating path vm = VMachineList.get_by_devicename_and_vpool(old_name, vpool) if vm is not None: vm.devicename = new_name vm.save() elif scenario == 'UPDATE': vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool) if vm is None: # The vMachine doesn't seem to exist, so it's likely the create didn't came trough # Let's create it anyway VMachineController.update_from_voldrv( new_name, storagedriver_id=storagedriver_id) vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool) if vm is None: raise RuntimeError( 'Could not create vMachine on rename. Aborting.') try: VMachineController.sync_with_hypervisor( vm.guid, storagedriver_id=storagedriver_id) vm.status = 'SYNC' except: vm.status = 'SYNC_NOK' vm.save()
def update_status(storagedriver_id): """ Sets Storage Driver offline in case hypervisor management Center reports the hypervisor pmachine related to this Storage Driver as unavailable. :param storagedriver_id: ID of the storagedriver to update its status """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) storagerouter = storagedriver.storagerouter if pmachine.mgmtcenter: # Update status pmachine.invalidate_dynamics(['host_status']) else: # No management Center, cannot update status via api logger.info( 'Updating status of pmachine {0} using SSHClient'.format( pmachine.name)) host_status = 'RUNNING' try: client = SSHClient(storagerouter, username='******') configuration_dir = EtcdConfiguration.get( '/ovs/framework/paths|cfgdir') logger.info( 'SSHClient connected successfully to {0} at {1}'.format( pmachine.name, client.ip)) with Remote(client.ip, [LocalStorageRouterClient]) as remote: lsrc = remote.LocalStorageRouterClient( '{0}/storagedriver/storagedriver/{1}.json'.format( configuration_dir, storagedriver.vpool.name)) lsrc.server_revision() logger.info( 'LocalStorageRouterClient connected successfully to {0} at {1}' .format(pmachine.name, client.ip)) except Exception as ex: logger.error( 'Connectivity check failed, assuming host {0} is halted. {1}' .format(pmachine.name, ex)) host_status = 'HALTED' if host_status != 'RUNNING': # Host is stopped storagedriver_client = StorageDriverClient.load( storagedriver.vpool) storagedriver_client.mark_node_offline( str(storagedriver.storagedriver_id))
def rename_from_voldrv(volumename, volume_old_path, volume_new_path, storagedriver_id): """ Rename a disk Triggered by volumedriver messages @param volumename: volume id of the disk @param volume_old_path: old path on hypervisor to the volume @param volume_new_path: new path on hypervisor to the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) volume_old_path = hypervisor.clean_backing_disk_filename(volume_old_path) volume_new_path = hypervisor.clean_backing_disk_filename(volume_new_path) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk: logger.info("Move disk {} from {} to {}".format(disk.name, volume_old_path, volume_new_path)) disk.devicename = volume_new_path disk.save()
def delete_from_voldrv(volumename, storagedriver_id): """ Delete a disk Triggered by volumedriver messages on the queue @param volumename: volume id of the disk """ _ = storagedriver_id # For logging purposes disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is not None: mutex = VolatileMutex('{}_{}'.format(volumename, disk.devicename)) try: mutex.acquire(wait=20) pmachine = None try: pmachine = PMachineList.get_by_storagedriver_id( disk.storagedriver_id) except RuntimeError as ex: if 'could not be found' not in str(ex): raise # else: pmachine can't be loaded, because the volumedriver doesn't know about it anymore if pmachine is not None: limit = 5 storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) hypervisor = Factory.get(pmachine) exists = hypervisor.file_exists(storagedriver, disk.devicename) while limit > 0 and exists is True: time.sleep(1) exists = hypervisor.file_exists( storagedriver, disk.devicename) limit -= 1 if exists is True: logger.info( 'Disk {0} still exists, ignoring delete'.format( disk.devicename)) return logger.info('Delete disk {}'.format(disk.name)) for mds_service in disk.mds_services: mds_service.delete() disk.delete() finally: mutex.release()
def rename_from_voldrv(old_name, new_name, storagedriver_id): """ This machine will handle the rename of a vmx file :param old_name: Old name of vmx :param new_name: New name for the vmx :param storagedriver_id: Storage Driver hosting the vmachine """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) vpool = storagedriver.vpool else: vpool = None old_name = hypervisor.clean_vmachine_filename(old_name) new_name = hypervisor.clean_vmachine_filename(new_name) scenario = hypervisor.get_rename_scenario(old_name, new_name) if scenario == 'RENAME': # Most likely a change from path. Updating path vm = VMachineList.get_by_devicename_and_vpool(old_name, vpool) if vm is not None: vm.devicename = new_name vm.save() elif scenario == 'UPDATE': vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool) if vm is None: # The vMachine doesn't seem to exist, so it's likely the create didn't came trough # Let's create it anyway VMachineController.update_from_voldrv(new_name, storagedriver_id=storagedriver_id) vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool) if vm is None: raise RuntimeError('Could not create vMachine on rename. Aborting.') try: VMachineController.sync_with_hypervisor(vm.guid, storagedriver_id=storagedriver_id) vm.status = 'SYNC' except: vm.status = 'SYNC_NOK' vm.save()
def sync_with_hypervisor(vmachineguid, storagedriver_id=None): """ Updates a given vmachine with data retreived from a given pmachine """ try: vmachine = VMachine(vmachineguid) if storagedriver_id is None and vmachine.hypervisor_id is not None and vmachine.pmachine is not None: # Only the vmachine was received, so base the sync on hypervisorid and pmachine hypervisor = Factory.get(vmachine.pmachine) logger.info('Syncing vMachine (name {})'.format(vmachine.name)) vm_object = hypervisor.get_vm_agnostic_object(vmid=vmachine.hypervisor_id) elif storagedriver_id is not None and vmachine.devicename is not None: # Storage Driver id was given, using the devicename instead (to allow hypervisorid updates # which can be caused by re-adding a vm to the inventory) pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) if not hypervisor.file_exists(vmachine.vpool, hypervisor.clean_vmachine_filename(vmachine.devicename)): return vmachine.pmachine = pmachine vmachine.save() logger.info('Syncing vMachine (device {}, ip {}, mtpt {})'.format(vmachine.devicename, storagedriver.storage_ip, storagedriver.mountpoint)) vm_object = hypervisor.get_vm_object_by_devicename(devicename=vmachine.devicename, ip=storagedriver.storage_ip, mountpoint=storagedriver.mountpoint) else: message = 'Not enough information to sync vmachine' logger.info('Error: {0}'.format(message)) raise RuntimeError(message) except Exception as ex: logger.info('Error while fetching vMachine info: {0}'.format(str(ex))) raise if vm_object is None: message = 'Could not retreive hypervisor vmachine object' logger.info('Error: {0}'.format(message)) raise RuntimeError(message) else: VMachineController.update_vmachine_config(vmachine, vm_object)
def update_status(storagedriver_id): """ Sets Storage Driver offline in case hypervisor management Center reports the hypervisor pmachine related to this Storage Driver as unavailable. """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.mgmtcenter: # Update status pmachine.invalidate_dynamics(['host_status']) host_status = pmachine.host_status if host_status != 'RUNNING': # Host is stopped storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) storagedriver_client = StorageDriverClient().load(storagedriver.vpool) storagedriver_client.mark_node_offline(str(storagedriver.storagedriver_id)) else: # No management Center, cannot update status via api #TODO: should we try manually (ping, ssh)? pass
def delete_from_voldrv(name, storagedriver_id): """ This method will delete a vmachine based on the name of the vmx given """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) name = hypervisor.clean_vmachine_filename(name) if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) vpool = storagedriver.vpool else: vpool = None vm = VMachineList.get_by_devicename_and_vpool(name, vpool) if vm is not None: MessageController.fire(MessageController.Type.EVENT, {'type': 'vmachine_deleted', 'metadata': {'name': vm.name}}) vm.delete(abandon=['vdisks'])
def rename_from_voldrv(volumename, volume_old_path, volume_new_path, storagedriver_id): """ Rename a disk Triggered by volumedriver messages @param volumename: volume id of the disk @param volume_old_path: old path on hypervisor to the volume @param volume_new_path: new path on hypervisor to the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) volume_old_path = hypervisor.clean_backing_disk_filename( volume_old_path) volume_new_path = hypervisor.clean_backing_disk_filename( volume_new_path) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk: logger.info('Move disk {} from {} to {}'.format( disk.name, volume_old_path, volume_new_path)) disk.devicename = volume_new_path disk.save()
def update_status(storagedriver_id): """ Sets Storage Driver offline in case hypervisor management Center reports the hypervisor pmachine related to this Storage Driver as unavailable. """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.mgmtcenter: # Update status pmachine.invalidate_dynamics(['host_status']) host_status = pmachine.host_status if host_status != 'RUNNING': # Host is stopped storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) storagedriver_client = StorageDriverClient.load( storagedriver.vpool) storagedriver_client.mark_node_offline( str(storagedriver.storagedriver_id)) else: # No management Center, cannot update status via api # @TODO: should we try manually (ping, ssh)? pass
def update_status(storagedriver_id): """ Sets Storage Driver offline in case hypervisor management Center reports the hypervisor pmachine related to this Storage Driver as unavailable. :param storagedriver_id: ID of the storagedriver to update its status :type storagedriver_id: str :return: None """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) storagerouter = storagedriver.storagerouter if pmachine.mgmtcenter: # Update status pmachine.invalidate_dynamics(['host_status']) host_status = pmachine.host_status else: # No management Center, cannot update status via api StorageDriverController._logger.info( 'Updating status of pmachine {0} using SSHClient'.format( pmachine.name)) path = StorageDriverConfiguration( 'storagedriver', storagedriver.vpool.guid, storagedriver.storagedriver_id).remote_path host_status = 'RUNNING' try: client = SSHClient(storagerouter, username='******') StorageDriverController._logger.info( 'SSHClient connected successfully to {0} at {1}'.format( pmachine.name, client.ip)) except UnableToConnectException as ex: StorageDriverController._logger.error( 'SSHClient connectivity check failed, assuming host {0} is halted. {1}' .format(pmachine.name, ex)) host_status = 'HALTED' else: try: with remote(client.ip, [LocalStorageRouterClient]) as rem: lsrc = rem.LocalStorageRouterClient(path) lsrc.server_revision() StorageDriverController._logger.info( 'LocalStorageRouterClient connected successfully to {0} at {1}' .format(pmachine.name, client.ip)) except (EOFError, RuntimeError, ClusterNotReachableException) as ex: StorageDriverController._logger.error( 'LocalStorageRouterClient check failed, assuming volumedriver on host {0} {1} is halted. {2}' .format(pmachine.name, client.ip, ex)) host_status = 'HALTED' if host_status != 'RUNNING': # Host is stopped storagedriver_client = StorageDriverClient.load( storagedriver.vpool) storagedriver_client.mark_node_offline( str(storagedriver.storagedriver_id)) StorageDriverController._logger.warning( 'Storagedriver {0} marked offline'.format( storagedriver.storagedriver_id))
def list(self): """ Overview of all pMachines """ return PMachineList.get_pmachines()
def update_from_voldrv(name, storagedriver_id): """ This method will update/create a vmachine based on a given vmx/xml file """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) name = hypervisor.clean_vmachine_filename(name) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) vpool = storagedriver.vpool machine_ids = [ storagedriver.storagerouter.machine_id for storagedriver in vpool.storagedrivers ] if hypervisor.should_process(name, machine_ids=machine_ids): if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) vpool = storagedriver.vpool else: vpool = None pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) mutex = VolatileMutex('{}_{}'.format( name, vpool.guid if vpool is not None else 'none')) try: mutex.acquire(wait=120) limit = 5 exists = hypervisor.file_exists(storagedriver, name) while limit > 0 and exists is False: time.sleep(1) exists = hypervisor.file_exists(storagedriver, name) limit -= 1 if exists is False: logger.info( 'Could not locate vmachine with name {0} on vpool {1}'. format(name, vpool.name)) vmachine = VMachineList.get_by_devicename_and_vpool( name, vpool) if vmachine is not None: VMachineController.delete_from_voldrv( name, storagedriver_id=storagedriver_id) return finally: mutex.release() try: mutex.acquire(wait=5) vmachine = VMachineList.get_by_devicename_and_vpool( name, vpool) if not vmachine: vmachine = VMachine() vmachine.vpool = vpool vmachine.pmachine = pmachine vmachine.status = 'CREATED' vmachine.devicename = name vmachine.save() finally: mutex.release() if pmachine.hvtype == 'KVM': try: VMachineController.sync_with_hypervisor( vmachine.guid, storagedriver_id=storagedriver_id) vmachine.status = 'SYNC' except: vmachine.status = 'SYNC_NOK' vmachine.save() else: logger.info('Ignored invalid file {0}'.format(name))
def get_all_ips(): """ Retrieve all IPs for all Physical Machines :return: List of IPs """ return [str(pmachine.ip) for pmachine in PMachineList.get_pmachines()]
def get_pmachines(): """ Retrieve all Physical Machines :return: Data-object list of Physical Machines """ return PMachineList.get_pmachines()
def _bootstrap_dal_models(self): """ Load/hook dal models as snmp oids """ _guids = set() enabled_key = "{0}_config_dal_enabled".format(STORAGE_PREFIX) self.instance_oid = 0 try: enabled = self.persistent.get(enabled_key) except KeyNotFoundException: enabled = True # Enabled by default, can be disabled by setting the key if enabled: from ovs.dal.lists.vdisklist import VDiskList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.dal.lists.pmachinelist import PMachineList from ovs.dal.lists.vmachinelist import VMachineList from ovs.dal.lists.vpoollist import VPoolList from ovs.dal.lists.storagedriverlist import StorageDriverList for storagerouter in StorageRouterList.get_storagerouters(): _guids.add(storagerouter.guid) if not self._check_added(storagerouter): self._register_dal_model(10, storagerouter, 'guid', "0") self._register_dal_model(10, storagerouter, 'name', "1") self._register_dal_model(10, storagerouter, 'pmachine', "3", key = 'host_status') self._register_dal_model(10, storagerouter, 'description', "4") self._register_dal_model(10, storagerouter, 'devicename', "5") self._register_dal_model(10, storagerouter, 'dtl_mode', "6") self._register_dal_model(10, storagerouter, 'ip', "8") self._register_dal_model(10, storagerouter, 'machineid', "9") self._register_dal_model(10, storagerouter, 'status', "10") self._register_dal_model(10, storagerouter, '#vdisks', "11", func = lambda storagerouter: len([vdisk for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]), atype = int) self._register_dal_model(10, storagerouter, '#vmachines', "12", func = lambda storagerouter: len(set([vdisk.vmachine.guid for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id])), atype = int) self._register_dal_model(10, storagerouter, '#stored_data', "13", func = lambda storagerouter: sum([vdisk.vmachine.stored_data for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]), atype = int) self.instance_oid += 1 for vm in VMachineList.get_vmachines(): _guids.add(vm.guid) if not self._check_added(vm): if vm.is_vtemplate: self._register_dal_model(11, vm, 'guid', "0") self._register_dal_model(11, vm, 'name', "1") def _children(vmt): children = 0 disks = [vd.guid for vd in vmt.vdisks] for vdisk in [vdisk.parent_vdisk_guid for item in [vm.vdisks for vm in VMachineList.get_vmachines() if not vm.is_vtemplate] for vdisk in item]: for disk in disks: if vdisk == disk: children += 1 return children self._register_dal_model(11, vm, '#children', 2, func = _children, atype = int) self.instance_oid += 1 for vm in VMachineList.get_vmachines(): _guids.add(vm.guid) if not self._check_added(vm): if not vm.is_vtemplate: self._register_dal_model(0, vm, 'guid', "0") self._register_dal_model(0, vm, 'name', "1") self._register_dal_model(0, vm, 'statistics', "2.0", key = "operations", atype = int) self._register_dal_model(0, vm, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.2", key = "data_read", atype = int) self._register_dal_model(0, vm, 'statistics', "2.3", key = "sco_cache_misses", atype = int) self._register_dal_model(0, vm, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.5", key = "sco_cache_hits", atype = int) self._register_dal_model(0, vm, 'statistics', "2.6", key = "write_operations", atype = int) self._register_dal_model(0, vm, 'statistics', "2.7", key = "cluster_cache_misses", atype = int) self._register_dal_model(0, vm, 'statistics', "2.8", key = "read_operations_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.10", key = "backend_write_operations", atype = int) self._register_dal_model(0, vm, 'statistics', "2.11", key = "backend_data_read", atype = int) self._register_dal_model(0, vm, 'statistics', "2.12", key = "cache_hits", atype = int) self._register_dal_model(0, vm, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.15", key = "metadata_store_misses", atype = int) self._register_dal_model(0, vm, 'statistics', "2.16", key = "backend_data_written", atype = int) self._register_dal_model(0, vm, 'statistics', "2.17", key = "data_read_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.18", key = "read_operations", atype = int) self._register_dal_model(0, vm, 'statistics', "2.19", key = "cluster_cache_hits", atype = int) self._register_dal_model(0, vm, 'statistics', "2.20", key = "data_written_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.22", key = "cache_hits_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.23", key = "timestamp", atype = int) self._register_dal_model(0, vm, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.25", key = "backend_data_written_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.26", key = "backend_read_operations", atype = int) self._register_dal_model(0, vm, 'statistics', "2.27", key = "data_written", atype = int) self._register_dal_model(0, vm, 'statistics', "2.28", key = "metadata_store_hits", atype = int) self._register_dal_model(0, vm, 'statistics', "2.29", key = "backend_data_read_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.30", key = "operations_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.32", key = "data_transferred_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.33", key = "write_operations_ps", atype = int) self._register_dal_model(0, vm, 'statistics', "2.34", key = "data_transferred", atype = int) self._register_dal_model(0, vm, 'stored_data', "3", atype = int) self._register_dal_model(0, vm, 'description', "4") self._register_dal_model(0, vm, 'devicename', "5") self._register_dal_model(0, vm, 'dtl_mode', "6") self._register_dal_model(0, vm, 'hypervisorid', "7") self._register_dal_model(0, vm, 'ip', "8") self._register_dal_model(0, vm, 'status', "10") self._register_dal_model(0, vm, 'stored_data', "10", atype = int) self._register_dal_model(0, vm, 'snapshots', "11", atype = int) self._register_dal_model(0, vm, 'vdisks', "12", atype = int) self._register_dal_model(0, vm, 'DTL', '13', func = lambda vm: 'DEGRADED' if all(item == 'DEGRADED' for item in [vd.info['failover_mode'] for vd in vm.vdisks]) else 'OK') self.instance_oid += 1 for vd in VDiskList.get_vdisks(): _guids.add(vd.guid) if not self._check_added(vd): self._register_dal_model(1, vd, 'guid', "0") self._register_dal_model(1, vd, 'name', "1") self._register_dal_model(1, vd, 'statistics', "2.0", key = "operations", atype = int) self._register_dal_model(1, vd, 'statistics', "2.1", key = "data_written_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.2", key = "data_read", atype = int) self._register_dal_model(1, vd, 'statistics', "2.3", key = "sco_cache_misses", atype = int) self._register_dal_model(1, vd, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.5", key = "sco_cache_hits", atype = int) self._register_dal_model(1, vd, 'statistics', "2.6", key = "write_operations", atype = int) self._register_dal_model(1, vd, 'statistics', "2.7", key = "cluster_cache_misses", atype = int) self._register_dal_model(1, vd, 'statistics', "2.8", key = "read_operations_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.10", key = "backend_write_operations", atype = int) self._register_dal_model(1, vd, 'statistics', "2.11", key = "backend_data_read", atype = int) self._register_dal_model(1, vd, 'statistics', "2.12", key = "cache_hits", atype = int) self._register_dal_model(1, vd, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.15", key = "metadata_store_misses", atype = int) self._register_dal_model(1, vd, 'statistics', "2.16", key = "backend_data_written", atype = int) self._register_dal_model(1, vd, 'statistics', "2.17", key = "data_read_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.18", key = "read_operations", atype = int) self._register_dal_model(1, vd, 'statistics', "2.19", key = "cluster_cache_hits", atype = int) self._register_dal_model(1, vd, 'statistics', "2.20", key = "cluster_cache_misses_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.22", key = "cache_hits_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.23", key = "timestamp", atype = int) self._register_dal_model(1, vd, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.25", key = "backend_data_written_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.26", key = "backend_read_operations", atype = int) self._register_dal_model(1, vd, 'statistics', "2.27", key = "data_written", atype = int) self._register_dal_model(1, vd, 'statistics', "2.28", key = "metadata_store_hits", atype = int) self._register_dal_model(1, vd, 'statistics', "2.29", key = "backend_data_read_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.30", key = "operations_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.32", key = "data_transferred_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.33", key = "write_operations_ps", atype = int) self._register_dal_model(1, vd, 'statistics', "2.34", key = "data_transferred", atype = int) self._register_dal_model(1, vd, 'info', "3", key = 'stored', atype = int) self._register_dal_model(1, vd, 'info', "4", key = 'failover_mode', atype = int) self._register_dal_model(1, vd, 'snapshots', "5", atype = int) self.instance_oid += 1 for pm in PMachineList.get_pmachines(): _guids.add(pm.guid) if not self._check_added(pm): self._register_dal_model(2, pm, 'guid', "0") self._register_dal_model(2, pm, 'name', "1") self._register_dal_model(2, pm, 'host_status', "2") self.instance_oid += 1 for vp in VPoolList.get_vpools(): _guids.add(vp.guid) if not self._check_added(vp): self._register_dal_model(3, vp, 'guid', "0") self._register_dal_model(3, vp, 'name', "1") self._register_dal_model(3, vp, 'statistics', "2.0", key = "operations", atype = int) self._register_dal_model(3, vp, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.2", key = "data_read", atype = int) self._register_dal_model(3, vp, 'statistics', "2.3", key = "sco_cache_misses", atype = int) self._register_dal_model(3, vp, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.5", key = "sco_cache_hits", atype = int) self._register_dal_model(3, vp, 'statistics', "2.6", key = "write_operations", atype = int) self._register_dal_model(3, vp, 'statistics', "2.7", key = "cluster_cache_misses", atype = int) self._register_dal_model(3, vp, 'statistics', "2.8", key = "read_operations_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.10", key = "backend_write_operations", atype = int) self._register_dal_model(3, vp, 'statistics', "2.11", key = "backend_data_read", atype = int) self._register_dal_model(3, vp, 'statistics', "2.12", key = "cache_hits", atype = int) self._register_dal_model(3, vp, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.15", key = "metadata_store_misses", atype = int) self._register_dal_model(3, vp, 'statistics', "2.16", key = "backend_data_written", atype = int) self._register_dal_model(3, vp, 'statistics', "2.17", key = "data_read_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.18", key = "read_operations", atype = int) self._register_dal_model(3, vp, 'statistics', "2.19", key = "cluster_cache_hits", atype = int) self._register_dal_model(3, vp, 'statistics', "2.20", key = "data_written_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.22", key = "cache_hits_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.23", key = "timestamp", atype = int) self._register_dal_model(3, vp, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.25", key = "backend_data_written_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.26", key = "backend_read_operations", atype = int) self._register_dal_model(3, vp, 'statistics', "2.27", key = "data_written", atype = int) self._register_dal_model(3, vp, 'statistics', "2.28", key = "metadata_store_hits", atype = int) self._register_dal_model(3, vp, 'statistics', "2.29", key = "backend_data_read_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.30", key = "operations_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.32", key = "data_transferred_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.33", key = "write_operations_ps", atype = int) self._register_dal_model(3, vp, 'statistics', "2.34", key = "data_transferred", atype = int) self._register_dal_model(3, vp, 'status', "3") self._register_dal_model(3, vp, 'description', "4") self._register_dal_model(3, vp, 'vdisks', "5", atype = int) self._register_dal_model(3, vp, '#vmachines', "6", func = lambda vp: len(set([vd.vmachine.guid for vd in vp.vdisks])), atype = int) self.instance_oid += 1 for storagedriver in StorageDriverList.get_storagedrivers(): _guids.add(storagedriver.guid) if not self._check_added(storagedriver): self._register_dal_model(4, storagedriver, 'guid', "0") self._register_dal_model(4, storagedriver, 'name', "1") self._register_dal_model(4, storagedriver, 'stored_data', "2", atype = int) self.instance_oid += 1 try: # try to load OVS Backends from ovs.dal.lists.albabackendlist import AlbaBackendList for backend in AlbaBackendList.get_albabackends(): _guids.add(backend.guid) if not self._check_added(backend): self._register_dal_model(5, backend, 'guid', 0) self._register_dal_model(5, backend, 'name', 1) for disk_id in range(len((backend.all_disks))): self._register_dal_model(5, backend, 'all_disks', '2.{0}.0'.format(disk_id), key = "name", index=disk_id) self._register_dal_model(5, backend, 'all_disks', '2.{0}.1'.format(disk_id), key = "usage.size", atype = long, index=disk_id) self._register_dal_model(5, backend, 'all_disks', '2.{0}.2'.format(disk_id), key = "usage.used", atype = long, index=disk_id) self._register_dal_model(5, backend, 'all_disks', '2.{0}.3'.format(disk_id), key = "usage.available", atype = long, index=disk_id) self._register_dal_model(5, backend, 'all_disks', '2.{0}.4'.format(disk_id), key = "state.state", index=disk_id) self._register_dal_model(5, backend, 'all_disks', '2.{0}.5'.format(disk_id), key = "node_id", index=disk_id) self.instance_oid += 1 except ImportError: print('OVS Backend not present') pass reload = False for object_guid in list(self.model_oids): if object_guid not in _guids: self.model_oids.remove(object_guid) reload = True if reload: self._reload_snmp()
def update_from_voldrv(name, storagedriver_id): """ This method will update/create a vmachine based on a given vmx/xml file :param name: Name of the vmx :param storagedriver_id: Storage Driver hosting the vmachine """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) if pmachine.hvtype not in ['VMWARE', 'KVM']: return hypervisor = Factory.get(pmachine) name = hypervisor.clean_vmachine_filename(name) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) vpool = storagedriver.vpool machine_ids = [storagedriver.storagerouter.machine_id for storagedriver in vpool.storagedrivers] if hypervisor.should_process(name, machine_ids=machine_ids): if pmachine.hvtype == 'VMWARE': storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) vpool = storagedriver.vpool else: vpool = None pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) mutex = volatile_mutex('{0}_{1}'.format(name, vpool.guid if vpool is not None else 'none')) try: mutex.acquire(wait=120) limit = 5 exists = hypervisor.file_exists(storagedriver, name) while limit > 0 and exists is False: time.sleep(1) exists = hypervisor.file_exists(storagedriver, name) limit -= 1 if exists is False: VMachineController._logger.info('Could not locate vmachine with name {0} on vpool {1}'.format(name, vpool.name)) vmachine = VMachineList.get_by_devicename_and_vpool(name, vpool) if vmachine is not None: VMachineController.delete_from_voldrv(name, storagedriver_id=storagedriver_id) return finally: mutex.release() try: mutex.acquire(wait=5) vmachine = VMachineList.get_by_devicename_and_vpool(name, vpool) if not vmachine: vmachines = VMachineList.get_vmachine_by_name(name) if vmachines is not None: vmachine = vmachines[0] if not vmachine: vmachine = VMachine() vmachine.vpool = vpool vmachine.pmachine = pmachine vmachine.status = 'CREATED' vmachine.devicename = name vmachine.save() finally: mutex.release() if pmachine.hvtype == 'KVM': try: mutex.acquire(wait=120) VMachineController.sync_with_hypervisor(vmachine.guid, storagedriver_id=storagedriver_id) vmachine.status = 'SYNC' except: vmachine.status = 'SYNC_NOK' finally: mutex.release() vmachine.save() else: VMachineController._logger.info('Ignored invalid file {0}'.format(name))