def _activateLogicalVolume(self, name, perform_on_nodes=False): """Activates a logical volume on the node/cluster""" # Obtain logical volume path lv_path = self._getLogicalVolumePath(name) # Create command arguments command_args = ['lvchange', '-a', 'y', '--yes', lv_path] try: # Run on the local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.activateLogicalVolume(name=name) cluster = self._get_registered_object('cluster') cluster.run_remote_command( callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst activating logical volume:\n" + str(e))
def increaseSize(self, increase_size): """Increases the size of a VM hard drive, given the size to increase the drive by""" self._get_registered_object('auth').assert_permission( PERMISSIONS.MODIFY_VM, self.vm_object ) # Ensure disk exists self._ensure_exists() # Ensure VM is stopped from mcvirt.virtual_machine.virtual_machine import PowerStates if (self.vm_object._getPowerState() is not PowerStates.STOPPED): raise VmAlreadyStartedException('VM must be stopped before increasing disk size') # Ensure that VM has not been cloned and is not a clone if (self.vm_object.getCloneParent() or self.vm_object.getCloneChildren()): raise VmIsCloneException('Cannot increase the disk of a cloned VM or a clone.') command_args = ('lvextend', '-L', '+%sM' % increase_size, self._getDiskPath()) try: System.runCommand(command_args) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst extending logical volume:\n" + str(e) )
def increaseSize(self, increase_size): """Increases the size of a VM hard drive, given the size to increase the drive by""" self._get_registered_object('auth').assert_permission( PERMISSIONS.MODIFY_VM, self.vm_object) # Ensure disk exists self._ensure_exists() # Ensure VM is stopped from mcvirt.virtual_machine.virtual_machine import PowerStates if (self.vm_object._getPowerState() is not PowerStates.STOPPED): raise VmAlreadyStartedException( 'VM must be stopped before increasing disk size') # Ensure that VM has not been cloned and is not a clone if (self.vm_object.getCloneParent() or self.vm_object.getCloneChildren()): raise VmIsCloneException( 'Cannot increase the disk of a cloned VM or a clone.') command_args = ('lvextend', '-L', '+%sM' % increase_size, self._getDiskPath()) try: System.runCommand(command_args) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst extending logical volume:\n" + str(e))
def _drbdSetPrimary(self, allow_two_primaries=False): """Performs a Drbd 'primary' on the hard drive Drbd resource""" local_role_state, remote_role_state = self._drbdGetRole() # Check Drbd status self._checkDrbdStatus() # Ensure that role states are not unknown if (local_role_state is DrbdRoleState.UNKNOWN or (remote_role_state is DrbdRoleState.UNKNOWN and not self._ignore_drbd)): raise DrbdStateException('Drbd role is unknown for resource %s' % self.resource_name) # Ensure remote role is secondary if (not allow_two_primaries and remote_role_state is not DrbdRoleState.SECONDARY and not (DrbdRoleState.UNKNOWN and self._ignore_drbd)): raise DrbdStateException( 'Cannot make local Drbd primary if remote Drbd is not secondary: %s' % self.resource_name) # Set Drbd resource to primary System.runCommand([NodeDrbd.DrbdADM, 'primary', self.resource_name])
def createBackupSnapshot(self): """Creates a snapshot of the logical volume for backing up and locks the VM""" self._ensure_exists() # Ensure the user has permission to delete snapshot backups self._get_registered_object('auth').assert_permission( PERMISSIONS.BACKUP_VM, self.vm_object) # Ensure VM is registered locally self.vm_object.ensureRegisteredLocally() # Obtain logical volume names/paths backup_volume_path = self._getLogicalVolumePath( self._getBackupLogicalVolume()) snapshot_logical_volume = self._getBackupSnapshotLogicalVolume() # Determine if logical volume already exists if self._checkLogicalVolumeActive(snapshot_logical_volume): raise BackupSnapshotAlreadyExistsException( 'The backup snapshot for \'%s\' already exists: %s' % (backup_volume_path, snapshot_logical_volume)) # Lock the VM self.vm_object._setLockState(LockStates.LOCKED) try: System.runCommand([ 'lvcreate', '--snapshot', backup_volume_path, '--name', self._getBackupSnapshotLogicalVolume(), '--size', self.SNAPSHOT_SIZE ]) return self._getLogicalVolumePath(snapshot_logical_volume) except: self.vm_object._setLockState(LockStates.UNLOCKED) raise
def _resize_logical_volume(self, name, size, perform_on_nodes=False): """Creates a logical volume on the node/cluster""" # Create command list command_args = [ '/sbin/lvresize', '--size', '%sM' % size, self._getLogicalVolumePath(name) ] try: # Create on local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.resize_logical_volume(name=name, size=size) cluster = self._get_registered_object('cluster') cluster.run_remote_command( callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst resizing disk logical volume:\n" + str(e))
def _removeLogicalVolume(self, name, ignore_non_existent=False, perform_on_nodes=False): """Removes a logical volume from the node/cluster""" # Create command arguments command_args = ['lvremove', '-f', self._getLogicalVolumePath(name)] try: # Determine if logical volume exists before attempting to remove it if (not (ignore_non_existent and not self._checkLogicalVolumeExists(name))): System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.removeLogicalVolume( name=name, ignore_non_existent=ignore_non_existent ) cluster = self._get_registered_object('cluster') cluster.run_remote_command(callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst removing disk logical volume:\n" + str(e) )
def _zeroLogicalVolume(self, name, size, perform_on_nodes=False): """Blanks a logical volume by filling it with null data""" # Obtain the path of the logical volume lv_path = self._getLogicalVolumePath(name) # Create command arguments command_args = ['dd', 'if=/dev/zero', 'of=%s' % lv_path, 'bs=1M', 'count=%s' % size, 'conv=fsync', 'oflag=direct'] try: # Create logical volume on local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.zeroLogicalVolume(name=name, size=size) cluster = self._get_registered_object('cluster') cluster.run_remote_command(callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst zeroing logical volume:\n" + str(e) )
def _drbdOverwritePeer(self): """Force Drbd to overwrite the data on the peer""" System.runCommand([NodeDrbd.DrbdADM, '--', '--overwrite-data-of-peer', 'primary', self.resource_name])
def createBackupSnapshot(self): """Creates a snapshot of the logical volume for backing up and locks the VM""" self._ensure_exists() # Ensure the user has permission to delete snapshot backups self._get_registered_object('auth').assert_permission( PERMISSIONS.BACKUP_VM, self.vm_object ) # Ensure VM is registered locally self.vm_object.ensureRegisteredLocally() # Obtain logical volume names/paths backup_volume_path = self._getLogicalVolumePath( self._getBackupLogicalVolume()) snapshot_logical_volume = self._getBackupSnapshotLogicalVolume() # Determine if logical volume already exists if self._checkLogicalVolumeActive(snapshot_logical_volume): raise BackupSnapshotAlreadyExistsException( 'The backup snapshot for \'%s\' already exists: %s' % (backup_volume_path, snapshot_logical_volume) ) # Lock the VM self.vm_object._setLockState(LockStates.LOCKED) try: System.runCommand(['lvcreate', '--snapshot', backup_volume_path, '--name', self._getBackupSnapshotLogicalVolume(), '--size', self.SNAPSHOT_SIZE]) return self._getLogicalVolumePath(snapshot_logical_volume) except: self.vm_object._setLockState(LockStates.UNLOCKED) raise
def _zeroLogicalVolume(self, name, size, perform_on_nodes=False): """Blanks a logical volume by filling it with null data""" # Obtain the path of the logical volume lv_path = self._getLogicalVolumePath(name) # Create command arguments command_args = [ 'dd', 'if=/dev/zero', 'of=%s' % lv_path, 'bs=1M', 'count=%s' % size, 'conv=fsync', 'oflag=direct' ] try: # Create logical volume on local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.zeroLogicalVolume(name=name, size=size) cluster = self._get_registered_object('cluster') cluster.run_remote_command( callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst zeroing logical volume:\n" + str(e))
def resize(self, size, increase=True, _f=None): """Reszie volume""" self._get_registered_object('auth').assert_user_type('ClusterUser', allow_indirect=True) # Ensure volume exists self.ensure_exists() # Get size of current disk, to be able to roll back to current size _f.add_undo_argument(original_size=self.get_size()) # If increasing disk size, prepend with plus (+) if increase: size = '+%s' % size # Compile arguments for resize command_args = ['/sbin/lvresize', '--size', '%sB' % size, self.get_path()] try: # Create on local node System.runCommand(command_args) except MCVirtCommandException, exc: raise ExternalStorageCommandErrorException( "Error whilst resizing disk:\n" + str(exc) )
def _createLogicalVolume(self, name, size, perform_on_nodes=False): """Creates a logical volume on the node/cluster""" volume_group = self._getVolumeGroup() # Create command list command_args = ['/sbin/lvcreate', volume_group, '--name', name, '--size', '%sM' % size] try: # Create on local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.createLogicalVolume(name=name, size=size) cluster = self._get_registered_object('cluster') cluster.run_remote_command(callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: # Remove any logical volumes that had been created if one of them fails self._removeLogicalVolume( name, ignore_non_existent=True, perform_on_nodes=perform_on_nodes) raise ExternalStorageCommandErrorException( "Error whilst creating disk logical volume:\n" + str(e) )
def _removeLogicalVolume(self, name, ignore_non_existent=False, perform_on_nodes=False): """Removes a logical volume from the node/cluster""" # Create command arguments command_args = ['lvremove', '-f', self._getLogicalVolumePath(name)] try: # Determine if logical volume exists before attempting to remove it if (not (ignore_non_existent and not self._checkLogicalVolumeExists(name))): System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.removeLogicalVolume( name=name, ignore_non_existent=ignore_non_existent) cluster = self._get_registered_object('cluster') cluster.run_remote_command( callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst removing disk logical volume:\n" + str(e))
def _createLogicalVolume(self, name, size, perform_on_nodes=False): """Creates a logical volume on the node/cluster""" volume_group = self._getVolumeGroup() # Create command list command_args = [ '/sbin/lvcreate', volume_group, '--name', name, '--size', '%sM' % size ] try: # Create on local node System.runCommand(command_args) if perform_on_nodes and self._is_cluster_master: def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node, registered=False) remote_disk.createLogicalVolume(name=name, size=size) cluster = self._get_registered_object('cluster') cluster.run_remote_command( callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes()) except MCVirtCommandException, e: # Remove any logical volumes that had been created if one of them fails self._removeLogicalVolume(name, ignore_non_existent=True, perform_on_nodes=perform_on_nodes) raise ExternalStorageCommandErrorException( "Error whilst creating disk logical volume:\n" + str(e))
def wipe(self, _f=None): """Wipe the volume""" self._get_registered_object('auth').assert_user_type('ClusterUser', allow_indirect=True) System.perform_dd(source=System.WIPE, destination=self.get_path(), size=self.get_size())
def _drbdDown(self): """Performs a Drbd 'down' on the hard drive Drbd resource""" try: System.runCommand([NodeDrbd.DrbdADM, 'down', self.resource_name]) except MCVirtCommandException: # If the Drbd down fails, attempt to wait 5 seconds and try again time.sleep(5) System.runCommand([NodeDrbd.DrbdADM, 'down', self.resource_name])
def client_key_file(self): """Obtain the private key for the client key""" path = self._get_certificate_path('clientkey.pem') if not self._ensure_exists(path, assert_raise=False): System.runCommand([self.OPENSSL, 'genrsa', '-out', path, '2048']) return path
def ca_key_file(self): """Return/generate the CA prviate key.""" if not self.is_local: raise CACertificateNotFoundException('CA key file not available for remote node') path = self._get_certificate_path('capriv.pem') if not self._ensure_exists(path, assert_raise=False): System.runCommand([self.OPENSSL, 'genrsa', '-out', path, '4096']) return path
def server_key_file(self): """Obtain the server private key file""" if not self.is_local: raise CACertificateNotFoundException('Server key file not available for remote node') path = self._get_certificate_path('serverkey.pem') if not self._ensure_exists(path, assert_raise=False): # Generate new SSL private key System.runCommand([self.OPENSSL, 'genrsa', '-out', path, '2048']) return path
def _drbdSetSecondary(self): """Performs a Drbd 'secondary' on the hard drive Drbd resource""" # Attempt to set the disk as secondary set_secondary_command = [NodeDrbd.DrbdADM, 'secondary', self.resource_name] try: System.runCommand(set_secondary_command) except MCVirtCommandException: # If this fails, wait for 5 seconds, and attempt once more time.sleep(5) System.runCommand(set_secondary_command)
def ca_key_file(self): """Return/generate the CA prviate key.""" if not self.is_local: raise CACertificateNotFoundException( 'CA key file not available for remote node') path = self._get_certificate_path('capriv.pem') if not self._ensure_exists(path, assert_raise=False): System.runCommand([self.OPENSSL, 'genrsa', '-out', path, '4096']) return path
def server_key_file(self): """Obtain the server private key file""" if not self.is_local: raise CACertificateNotFoundException( 'Server key file not available for remote node') path = self._get_certificate_path('serverkey.pem') if not self._ensure_exists(path, assert_raise=False): # Generate new SSL private key System.runCommand([self.OPENSSL, 'genrsa', '-out', path, '2048']) return path
def snapshot(self, destination_volume, size): """Snapshot volume""" # Ensure volume exists self.ensure_exists() try: System.runCommand(['lvcreate', '--snapshot', self.get_path(), '--name', destination_volume.name, '--size', str(size)]) except MCVirtCommandException, exc: raise ExternalStorageCommandErrorException( "Error whilst snapshotting disk:\n" + str(exc) )
def dh_params_file(self): """Return the path to the DH parameters file, and create it if it does not exist""" if not self.is_local: raise CACertificateNotFoundException('DH params file not available for remote node') path = self._get_certificate_path('dh_params') if not self._ensure_exists(path, assert_raise=False): # Generate new DH parameters Syslogger.logger().info('Generating DH parameters file') System.runCommand([self.OPENSSL, 'dhparam', '-out', path, '2048']) Syslogger.logger().info('DH parameters file generated') return path
def dh_params_file(self): """Return the path to the DH parameters file, and create it if it does not exist""" if not self.is_local: raise CACertificateNotFoundException( 'DH params file not available for remote node') path = self._get_certificate_path('dh_params') if not self._ensure_exists(path, assert_raise=False): # Generate new DH parameters Syslogger.logger().info('Generating DH parameters file') System.runCommand([self.OPENSSL, 'dhparam', '-out', path, '2048']) Syslogger.logger().info('DH parameters file generated') return path
def _sign_csr(self, csr): self.client_csr = csr cert_gen_factory = self._get_registered_object('certificate_generator_factory') local_server = cert_gen_factory.get_cert_generator('localhost') System.runCommand(['openssl', 'x509', '-req', '-extensions', 'usr_cert', '-in', self.client_csr, '-CA', local_server.ca_pub_file, '-CAkey', local_server.ca_key_file, '-CAcreateserial', '-out', self.client_pub_file, '-outform', 'PEM', '-days', '10240', '-sha256']) # Regenerate libvirtd configuration, allowing access to this certificate if self.is_local: self._get_registered_object('libvirt_config').hard_restart = True self._get_registered_object('libvirt_config').generate_config() return self._read_file(self.client_pub_file)
def test_verify(self): """Test the Drbd verification for both in-sync and out-of-sync Drbd volumes""" # Create Virtual machine test_vm_object = self.create_vm('TEST_VM_1', 'Drbd') self.assertTrue(self.vm_factory.check_exists(self.test_vms['TEST_VM_1']['name'])) # Wait for 10 seconds after creation to ensure that Drbd # goes into connection -> Resyncing state time.sleep(10) # Wait until the Drbd resource is synced for disk_object in test_vm_object.getHardDriveObjects(): self.rpc.annotate_object(disk_object) wait_timeout = 6 while disk_object.drbdGetConnectionState() != DrbdConnectionState.CONNECTED: # If the Drbd volume has not connected within 1 minute, throw an exception if not wait_timeout: raise DrbdVolumeNotInSyncException('Wait for Drbd connection timed out') time.sleep(10) wait_timeout -= 1 # Perform verification on VM, using the argument parser self.parser.parse_arguments('verify %s' % self.test_vms['TEST_VM_1']['name']) # Ensure the disks are in-sync for disk_object in test_vm_object.getHardDriveObjects(): self.rpc_annotate_object(disk_object) self.assertTrue(disk_object._isInSync()) # Obtain the Drbd raw volume for the VM and write random data to it for disk_object in test_vm_object.getHardDriveObjects(): self.rpc.annotate_object(disk_object) drbd_raw_suffix = disk_object.Drbd_RAW_SUFFIX raw_logical_volume_name = disk_object._getLogicalVolumeName(drbd_raw_suffix) raw_logical_volume_path = disk_object._getLogicalVolumePath(raw_logical_volume_name) System.runCommand(['dd', 'if=/dev/urandom', 'of=%s' % raw_logical_volume_path, 'bs=1M', 'count=8']) System.runCommand(['sync']) # Perform another verification and ensure that an exception is raised with self.assertRaises(DrbdVolumeNotInSyncException): self.parser.parse_arguments('verify %s' % self.test_vms['TEST_VM_1']['name']) # Attempt to start the VM, ensuring an exception is raised with self.assertRaises(DrbdVolumeNotInSyncException): test_vm_object.start()
def _drbdGetRole(self): """Returns the role of the Drbd resource""" _, stdout, _ = System.runCommand([NodeDrbd.DrbdADM, 'role', self.resource_name]) states = stdout.strip() (local_state, remote_state) = states.split('/') return (DrbdRoleState(local_state), DrbdRoleState(remote_state))
def authenticate_username_password(self, args, ignore_cluster): """Authenticate using username and password""" # Check if user/password have been passed. Else, ask for them. username = args.username if args.username else System.getUserInput( 'Username: '******'Password: ', password=True ).rstrip() self.rpc = Connection(username=username, password=password, ignore_cluster=ignore_cluster) self.session_id = self.rpc.session_id self.username = self.rpc.username
def activate(self, _f=None): """Activate volume""" self._get_registered_object('auth').assert_user_type('ClusterUser', allow_indirect=True) # Ensure volume exists self.ensure_exists() # Create command arguments command_args = ['lvchange', '-a', 'y', '--yes', self.get_path()] try: # Run on the local node System.runCommand(command_args) except MCVirtCommandException, exc: raise ExternalStorageCommandErrorException( "Error whilst activating logical volume:\n" + str(exc) )
def create(self, size, _f=None): """Create volume in storage backend""" self._get_registered_object('auth').assert_permission(PERMISSIONS.MANAGE_STORAGE_VOLUME) # Ensure volume does not already exist if self.check_exists(): raise VolumeAlreadyExistsError('Volume (%s) already exists' % self.name) try: # Create on local node System.perform_dd(source=System.WIPE, destination=self.get_path(), size=size) except DDCommandError, exc: raise ExternalStorageCommandErrorException( "Error whilst creating disk logical volume:\n" + str(exc) )
def get_free_vg_space(self): """Returns the free space in megabytes.""" _, out, err = System.runCommand(['vgs', MCVirtConfig().get_config()['vm_storage_vg'], '-o', 'free', '--noheadings', '--nosuffix', '--units', 'm'], False, DirectoryLocation.BASE_STORAGE_DIR) return float(out)
def resync(self, source_node=None, auto_determine=False): """Perform a resync of a Drbd hard drive""" # Ensure user has privileges to create a Drbd volume self._get_registered_object('auth').assert_permission( PERMISSIONS.MANAGE_DRBD, self.vm_object) if source_node: if source_node not in self.vm_object.getAvailableNodes(): raise InvalidNodesException('Invalid node name') if auto_determine: raise TooManyParametersException( 'Only one of source_node an auto_determine should be specified' ) else: if not auto_determine: raise ArgumentParserException( 'Either source_node or auto_determine must be specified' ) elif self.vm_object.getNode(): source_node = self.vm_object.getNode() else: raise VmNotRegistered('Cannot auto-determine node - VM is not registered') # Check Drbd state of disk if self._drbdGetConnectionState() != DrbdConnectionState.CONNECTED: raise DrbdStateException( 'Drbd resource must be connected before performing a resync: %s' % self.resource_name) if source_node == get_hostname(): System.runCommand([NodeDrbd.DrbdADM, 'invalidate-remote', self.resource_name]) # Monitor the Drbd status, until the VM has started syncing while True: if self._drbdGetConnectionState() == DrbdConnectionState.SYNC_SOURCE: break time.sleep(5) # Monitor the Drbd status, until the VM has finished syncing while True: if self._drbdGetConnectionState() != DrbdConnectionState.SYNC_SOURCE: break time.sleep(5) elif not self._cluster_disable: remote_object = self.get_remote_object(remote_node=source_node) remote_object.resync(source_node=source_node)
def _sign_csr(self, csr): self.client_csr = csr local_server = CertificateGenerator('localhost') System.runCommand([ 'openssl', 'x509', '-req', '-extensions', 'usr_cert', '-in', self.client_csr, '-CA', local_server.ca_pub_file, '-CAkey', local_server.ca_key_file, '-CAcreateserial', '-out', self.client_pub_file, '-outform', 'PEM', '-days', '10240', '-sha256' ]) # Regenerate libvirtd configuration, allowing access to this certificate libvirt_config = self._get_registered_object('libvirt_config') if self.is_local: libvirt_config.hard_restart = True libvirt_config.generate_config() return self._read_file(self.client_pub_file)
def clone(self, destination_vm_object): """Clone a VM, using snapshotting, attaching it to the new VM object""" self._ensure_exists() new_disk = Local(vm_object=destination_vm_object, driver=self.driver, disk_id=self.disk_id) self._register_object(new_disk) new_logical_volume_name = new_disk._getDiskName() disk_size = self.getSize() # Perform a logical volume snapshot command_args = ('lvcreate', '-L', '%sM' % disk_size, '-s', '-n', new_logical_volume_name, self._getDiskPath()) try: System.runCommand(command_args) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst cloning disk logical volume:\n" + str(e) )
def handle_add(self, p_, args): """Handle add of node""" cluster_object = p_.rpc.get_connection('cluster') if args.connect_string: connect_string = args.connect_string else: connect_string = System.getUserInput('Enter Connect String: ') cluster_object.add_node(connect_string) p_.print_status('Successfully added node')
def clone(self, destination_vm_object): """Clone a VM, using snapshotting, attaching it to the new VM object""" self._ensure_exists() new_disk = Local(vm_object=destination_vm_object, driver=self.driver, disk_id=self.disk_id) self._register_object(new_disk) new_logical_volume_name = new_disk._getDiskName() disk_size = self.getSize() # Perform a logical volume snapshot command_args = ('lvcreate', '-L', '%sM' % disk_size, '-s', '-n', new_logical_volume_name, self._getDiskPath()) try: System.runCommand(command_args) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst cloning disk logical volume:\n" + str(e))
def handle_change_password(self, p_, args): """Handle change password""" user_factory = p_.rpc.get_connection('user_factory') target_user = args.target_user or p_.username user = user_factory.get_user_by_username(target_user) p_.rpc.annotate_object(user) new_password = args.new_password or System.getNewPassword() user.set_password(new_password) p_.print_status('Updated password')
def ca_pub_file(self): """Return/generate the CA pub file""" base_dir = '/etc/mcvirt' if self.is_local else None path = self._get_certificate_path('cacert.pem', base_dir=base_dir) if not self._ensure_exists(path, assert_raise=False) and self.is_local: # Generate public key for CA System.runCommand([self.OPENSSL, 'req', '-x509', '-new', '-nodes', '-key', self.ca_key_file, '-sha256', '-days', '10240', '-out', path, '-subj', '%s_ca' % self.ssl_dn]) if self.is_local: symlink_path = self._get_certificate_path('cacert.pem') os.symlink(path, symlink_path) return path
def server_pub_file(self): """Obtain the server public key file""" if not self.is_local: raise CACertificateNotFoundException('Server public key not available for remote node') path = self._get_certificate_path('servercert.pem') if not self._ensure_exists(path, assert_raise=False): # Generate certificate request ssl_csr = os.path.join(self.ssl_directory, '%s.csr' % self.server) System.runCommand([self.OPENSSL, 'req', '-new', '-key', self.server_key_file, '-out', ssl_csr, '-subj', self.ssl_dn]) # Generate public key System.runCommand([self.OPENSSL, 'x509', '-req', '-in', ssl_csr, '-CA', self.ca_pub_file, '-CAkey', self.ca_key_file, '-CAcreateserial', '-out', path, '-outform', 'PEM', '-days', '10240', '-sha256']) return path
def verify(self): """Performs a verification of a Drbd hard drive""" self._get_registered_object('auth').assert_permission( PERMISSIONS.MANAGE_DRBD, self.vm_object ) # Check Drbd state of disk if self._drbdGetConnectionState() != DrbdConnectionState.CONNECTED: raise DrbdStateException( 'Drbd resource must be connected before performing a verification: %s' % self.resource_name) # Reset the disk to be marked in a consistent state self.setSyncState(True) try: # Perform a drbdadm verification System.runCommand([NodeDrbd.DrbdADM, 'verify', self.resource_name]) # Monitor the Drbd status, until the VM has started syncing while True: if self._drbdGetConnectionState() == DrbdConnectionState.VERIFY_S: break time.sleep(5) # Monitor the Drbd status, until the VM has finished syncing while True: if self._drbdGetConnectionState() != DrbdConnectionState.VERIFY_S: break time.sleep(5) except Exception: # If an exception is thrown during the verify, mark the VM as # not in-sync self.setSyncState(False) if self._isInSync(): return True else: raise DrbdVolumeNotInSyncException('The Drbd verification for \'%s\' failed' % self.resource_name)
def delete(self, ignore_non_existent=False, _f=None): """Delete volume""" self._get_registered_object('auth').assert_user_type('ClusterUser', allow_indirect=True) # Create command arguments command_args = ['lvremove', '-f', self.get_path()] # Determine if logical volume exists before attempting to remove it if not self.check_exists() and not ignore_non_existent: raise VolumeDoesNotExistError( 'Volume (%s) does not exist' % self.name ) try: System.runCommand(command_args) except MCVirtCommandException, exc: raise ExternalStorageCommandErrorException( "Error whilst removing logical volume:\n" + str(exc) )
def ca_pub_file(self): """Return/generate the CA pub file""" base_dir = '/etc/mcvirt' if self.is_local else None path = self._get_certificate_path('cacert.pem', base_dir=base_dir) if not self._ensure_exists(path, assert_raise=False) and self.is_local: # Generate public key for CA System.runCommand([ self.OPENSSL, 'req', '-x509', '-new', '-nodes', '-key', self.ca_key_file, '-sha256', '-days', '10240', '-out', path, '-subj', '%s_ca' % self.ssl_dn ]) if self.is_local: symlink_path = self._get_certificate_path('cacert.pem') os.symlink(path, symlink_path) return path
def _get_logical_volume_size(self, name): """Obtains the size of a logical volume""" # Use 'lvs' to obtain the size of the disk command_args = ('lvs', '--nosuffix', '--noheadings', '--units', 'm', '--options', 'lv_size', self._getLogicalVolumePath(name)) try: _, command_output, _ = System.runCommand(command_args) except MCVirtCommandException, e: raise ExternalStorageCommandErrorException( "Error whilst obtaining the size of the logical volume:\n" + str(e))
def deleteBackupSnapshot(self): """Deletes the backup snapshot for the disk and unlocks the VM""" self._ensure_exists() # Ensure the user has permission to delete snapshot backups self._get_registered_object('auth').assert_permission( PERMISSIONS.BACKUP_VM, self.vm_object) # Ensure the snapshot logical volume exists if not self._checkLogicalVolumeActive( self._getBackupSnapshotLogicalVolume()): raise BackupSnapshotDoesNotExistException( 'The backup snapshot for \'%s\' does not exist' % self._getLogicalVolumePath(config._getBackupLogicalVolume())) System.runCommand([ 'lvremove', '-f', self._getLogicalVolumePath(self._getBackupSnapshotLogicalVolume()) ]) # Unlock the VM self.vm_object._setLockState(LockStates.UNLOCKED)
def gitRemove(self, message=''): """Remove and commits a configuration file""" if self._checkGitRepo(): session_obj = self._get_registered_object('mcvirt_session') session_obj = self._get_registered_object('mcvirt_session') username = '' user = None if session_obj: try: user = session_obj.get_proxy_user_object() except UserDoesNotExistException: pass if user: username = session_obj.get_proxy_user_object().get_username() message += "\nUser: %s\nNode: %s" % (username, get_hostname()) try: System.runCommand([self.GIT, 'rm', '--cached', self.config_file], cwd=DirectoryLocation.BASE_STORAGE_DIR) System.runCommand([self.GIT, 'commit', '-m', message], cwd=DirectoryLocation.BASE_STORAGE_DIR) System.runCommand([self.GIT, 'push'], raise_exception_on_failure=False, cwd=DirectoryLocation.BASE_STORAGE_DIR) except: pass
def _setTwoPrimariesConfig(self, allow=False): """Configures Drbd to temporarily allow or re-disable whether two allow two primaries""" if allow: # Configure Drbd on both nodes to allow Drbd volume to be set to primary self._checkDrbdStatus() System.runCommand([NodeDrbd.DrbdADM, 'net-options', self.resource_name, '--allow-two-primaries']) else: # Get disk role state local_role, remote_role = self._drbdGetRole() # Ensure neither states are unknown if (local_role is DrbdRoleState.UNKNOWN or remote_role is DrbdRoleState.UNKNOWN): raise DrbdStateException('Cannot disable two-primaries configuration as' ' local or remote role is currently unknown') # Ensure that only one node has been set to primary if (local_role is DrbdRoleState.PRIMARY and remote_role is DrbdRoleState.PRIMARY): raise DrbdStateException('Both nodes are set to primary whilst attempting' ' to disable dual-primary mode') System.runCommand([NodeDrbd.DrbdADM, 'net-options', self.resource_name, '--allow-two-primaries=no']) # Configure remote node(s) if self._is_cluster_master: cluster_instance = self._get_registered_object('cluster') def remoteCommand(node): remote_disk = self.get_remote_object(remote_node=node) remote_disk.setTwoPrimariesConfig(allow=allow) cluster_instance.run_remote_command(callback_method=remoteCommand, nodes=self.vm_object._get_remote_nodes())
def verify(self): """Performs a verification of a Drbd hard drive""" # Check Drbd state of disk if (self._drbdGetConnectionState() != DrbdConnectionState.CONNECTED): raise DrbdStateException( 'Drbd resource must be connected before performing a verification: %s' % self.resource_name) # Reset the disk to be marked in a consistent state self.setSyncState(True) try: # Perform a drbdadm verification System.runCommand([NodeDrbd.DrbdADM, 'verify', self.resource_name]) # Monitor the Drbd status, until the VM has started syncing while True: if self._drbdGetConnectionState() == DrbdConnectionState.VERIFY_S: break time.sleep(5) # Monitor the Drbd status, until the VM has finished syncing while True: if self._drbdGetConnectionState() != DrbdConnectionState.VERIFY_S: break time.sleep(5) except Exception: # If an exception is thrown during the verify, mark the VM as # not in-sync self.setSyncState(False) if self._isInSync(): return True else: raise DrbdVolumeNotInSyncException('The Drbd verification for \'%s\' failed' % self.resource_name)
def duplicate(self, destination_vm_object): """Clone the hard drive and attach it to the new VM object""" self._ensure_exists() # Create new disk object, using the same type, size and disk_id new_disk_object = self.__class__(vm_object=destination_vm_object, disk_id=self.disk_id, driver=self.driver) self._register_object(new_disk_object) new_disk_object.create(self.getSize()) source_drbd_block_device = self._getDiskPath() destination_drbd_block_device = new_disk_object._getDiskPath() # Use dd to duplicate the old disk to the new disk command_args = ('dd', 'if=%s' % source_drbd_block_device, 'of=%s' % destination_drbd_block_device, 'bs=1M') try: System.runCommand(command_args) except MCVirtCommandException, e: new_disk_object.delete() raise ExternalStorageCommandErrorException( "Error whilst duplicating disk logical volume:\n" + str(e))
def server_pub_file(self): """Obtain the server public key file""" if not self.is_local: raise CACertificateNotFoundException( 'Server public key not available for remote node') path = self._get_certificate_path('servercert.pem') if not self._ensure_exists(path, assert_raise=False): # Generate certificate request ssl_csr = os.path.join(self.ssl_directory, '%s.csr' % self.server) System.runCommand([ self.OPENSSL, 'req', '-new', '-key', self.server_key_file, '-out', ssl_csr, '-subj', self.ssl_dn ]) # Generate public key System.runCommand([ self.OPENSSL, 'x509', '-req', '-in', ssl_csr, '-CA', self.ca_pub_file, '-CAkey', self.ca_key_file, '-CAcreateserial', '-out', path, '-outform', 'PEM', '-days', '10240', '-sha256' ]) return path
def _calculateMetaDataSize(self): """Determines the size of the Drbd meta volume""" raw_logical_volume_name = self._getLogicalVolumeName(self.Drbd_RAW_SUFFIX) logical_volume_path = self._getLogicalVolumePath(raw_logical_volume_name) # Obtain size of raw volume _, raw_size_sectors, _ = System.runCommand(['blockdev', '--getsz', logical_volume_path]) raw_size_sectors = int(raw_size_sectors.strip()) # Obtain size of sectors _, sector_size, _ = System.runCommand(['blockdev', '--getss', logical_volume_path]) sector_size = int(sector_size.strip()) # Follow the Drbd meta data calculation formula, see # https://drbd.linbit.com/users-guide/ch-internals.html#s-external-meta-data meta_size_formula_step_1 = int(math.ceil(raw_size_sectors / 262144)) meta_size_formula_step_2 = meta_size_formula_step_1 * 8 meta_size_sectors = meta_size_formula_step_2 + 72 # Convert meta size in sectors to Mebibytes meta_size_mebibytes = math.ceil((meta_size_sectors * sector_size) / (1024 ^ 2)) # Convert from float to int and return return int(meta_size_mebibytes)
def gitRemove(self, message=''): """Remove and commits a configuration file""" from auth.session import Session if self._checkGitRepo(): message += "\nUser: %s\nNode: %s" % ( Session.get_current_user_object().get_username(), get_hostname()) try: System.runCommand( [self.GIT, 'rm', '--cached', self.config_file], cwd=DirectoryLocation.BASE_STORAGE_DIR) System.runCommand([self.GIT, 'commit', '-m', message], cwd=DirectoryLocation.BASE_STORAGE_DIR) System.runCommand([self.GIT, 'push'], raise_exception_on_failure=False, cwd=DirectoryLocation.BASE_STORAGE_DIR) except: pass