def set_kdump_service(config, status): if config and config.get('activate') is False: # activation of kernel crash dump setup is unwanted return calibrated = _kdump_calibrate( config.get('crash_kernel_high') if config else None, config.get('crash_kernel_low') if config else None) grub_defaults_file = '/etc/default/grub' grub_defaults_data = None grub_defaults_digest = hashlib.sha256() with open(grub_defaults_file, 'r') as grub_defaults_handle: grub_defaults_data = grub_defaults_handle.read() grub_defaults_digest.update(format(grub_defaults_data).encode()) grub_defaults_shasum_orig = grub_defaults_digest.hexdigest() grub_defaults_data = re.sub( r'crashkernel=[0-9]+M,low', 'crashkernel={0}M,low'.format(calibrated['Low']), grub_defaults_data) grub_defaults_data = re.sub( r'crashkernel=[0-9]+M,high', 'crashkernel={0}M,high'.format(calibrated['High']), grub_defaults_data) grub_defaults_digest.update(format(grub_defaults_data).encode()) grub_defaults_shasum_new = grub_defaults_digest.hexdigest() if grub_defaults_shasum_orig != grub_defaults_shasum_new: with open(grub_defaults_file, 'w') as grub_defaults_handle: grub_defaults_handle.write(grub_defaults_data) status.set_reboot_required() Command.run(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg']) Command.run(['systemctl', 'restart', 'kdump'])
def _search_for(self, name, in_file): search = '^' + name + ':' try: Command.run(['grep', '-q', search, in_file]) except Exception: return False return True
def import_repository_sources(packages_config, source_provider): repo_sources = packages_config.get('repository') or [] import_data = {} for repository in repo_sources: repository_name = repository['name'] repository_location = '/var/lib/localrepos/{0}'.format(repository_name) Path.create(repository_location) sync_source = repository['source'] if os.path.isdir(sync_source): # normalize source path for rsync, make sure a '/' # is appended at the end of the directory specification # This impacts the behavior of rsync to sync the contents # of the directory into the new location but not the # directory itself. sync_source = os.path.normpath(sync_source) sync_source += os.sep Command.run(['rsync', '-zav', sync_source, repository_location]) if repository.get('source_prefix'): import_data[repository_name] = [ ''.join([repository['source_prefix'], repository_location]) ] else: import_data[repository_name] = [repository_location] install_items = repository.get('install') or [] import_data[repository_name].append(list(install_items)) return import_data
def write_service_log(install_source): log_file = os.sep.join([install_source.location, 'workload.log']) bash_command = ' '.join( ['systemctl', 'status', '-l', '--all', '&>', log_file]) Command.run(['bash', '-c', bash_command], raise_on_error=False) Command.run(['cp', Defaults.get_log_file(), install_source.location], raise_on_error=False)
def mount_config_source(): config_type = namedtuple('config_type', ['name', 'location', 'label']) azure_config = config_type(name=os.path.basename( Defaults.get_config_file_name()), location='/mnt', label='azconfig') mountpoint_result = Command.run(['mountpoint', azure_config.location], raise_on_error=False) if mountpoint_result.returncode == 0: # The azure_config location is already mounted return azure_config lun_result = Command.run([ 'mount', '-o', 'sync', '--label', azure_config.label, azure_config.location ], raise_on_error=False) if lun_result.returncode != 0: iso_result = Command.run( ['mount', '/dev/dvd', azure_config.location], raise_on_error=False) if iso_result.returncode != 0: raise AzureHostedConfigFileSourceMountException( 'Source mount failed with: primary:{0}, fallback{1}'. format(lun_result.error, iso_result.error)) return azure_config
def main(): """ Azure Li/Vli config file lookup Lookup config file as provided by the Azure Li/VLi storage backend and make it locally available at the location described by Defaults.get_config_file_name() """ Logger.setup() status = StatusReport('config_lookup') azure_config = Defaults.mount_config_source() try: azure_config_lookup_paths = [azure_config.location] azure_config_file = Path.which( azure_config.name, azure_config_lookup_paths ) if not azure_config_file: raise AzureHostedConfigFileNotFoundException( 'Config file not found at: {0}/{1}'.format( azure_config.location, azure_config.name ) ) Path.create( os.path.dirname(Defaults.get_config_file_name()) ) Command.run( ['cp', azure_config_file, Defaults.get_config_file_name()] ) os.chmod(Defaults.get_config_file_name(), 0o600) status.set_success() finally: Defaults.umount_config_source(azure_config)
def user_add(self, user_name, options): """ Add user with options :param str user_name: user name :param list options: useradd options """ Command.run(['useradd'] + options + [user_name])
def user_modify(self, user_name, options): """ Modify user with options :param str user_name: user name :param list options: usermod options """ Command.run(['usermod'] + options + [user_name])
def group_add(self, group_name, options): """ Add group with options :param str group_name: group name :param list options: groupadd options """ Command.run(['groupadd'] + options + [group_name])
def enable_extra_kernel_modules(): modules = Defaults.get_stonith_needed_modules() file_content = '' for module in modules: file_content = ''.join([file_content, os.linesep, module]) Command.run(['modprobe', module]) load_module_path = Defaults.get_extra_kernel_modules_file_name() _write_file(load_module_path, file_content)
def test_run_raises_error(self, mock_popen, mock_which): mock_which.return_value = 'command' mock_process = Mock() mock_process.communicate = Mock( return_value=[str.encode('stdout'), str.encode('stderr')] ) mock_process.returncode = 1 mock_popen.return_value = mock_process with raises(AzureHostedCommandException): Command.run(['command', 'args'])
def setup_home_for_user(self, user_name, group_name, home_path): """ Setup user home directory :param str user_name: user name :param str group_name: group name :param str home_path: path name """ user_and_group = user_name + ':' + group_name Command.run(['chown', '-R', user_and_group, home_path])
def set_energy_performance_settings(): cpupower_calls = [ # set CPU Frequency/Voltage scaling ['cpupower', 'frequency-set', '-g', 'performance'], # set low latency and maximum performance ['cpupower', 'set', '-b', '0'] ] for cpupower_call in cpupower_calls: Command.run(cpupower_call) _write_boot_local(cpupower_calls)
def main(): """ Azure Li/Vli storage mount setup Updates fstab with new storage mount entries and activates them in the scope of an Azure Li/Vli instance """ Logger.setup() status = StatusReport('storage') config = RuntimeConfig(Defaults.get_config_file()) storage_config = config.get_storage_config() storage_errors = [] if storage_config: fstab_entries = [] for storage in storage_config: try: if 'device' not in storage or 'mount' not in storage: raise AzureHostedStorageMountException( 'At least one of {0} missing in {1}'.format( ('device', 'mount'), storage)) Path.create(storage['mount']) fstab_entries.append( '{device} {mount} {fstype} {options} 0 0'.format( device=storage['device'], mount=storage['mount'], fstype=storage.get('file_system') or 'auto', options=','.join( storage.get('mount_options', ['defaults'])))) except Exception as issue: storage_errors.append(issue) if fstab_entries: with open('/etc/fstab', 'a') as fstab: fstab.write(os.linesep) for entry in fstab_entries: fstab.write(entry) fstab.write(os.linesep) Command.run(['mount', '-a']) for storage in storage_config: min_size = storage.get('min_size') if min_size: try: check_storage_size_validates_constraint( min_size, storage['mount']) except Exception as issue: storage_errors.append(issue) if storage_errors: raise AzureHostedException(storage_errors) status.set_success()
def setup_change_password_on_logon(self, user_name): """ Setup when a user must change his/her password. The method sets the number of days when the password was last changed to zero. This causes a must change password request on next login :param str user_name: user name """ Command.run(['chage', '-d', '0', user_name])
def test_run_does_not_raise_error(self, mock_popen, mock_which): mock_which.return_value = 'command' mock_process = Mock() mock_process.communicate = Mock( return_value=[str.encode('stdout'), str.encode('')] ) mock_process.returncode = 1 mock_popen.return_value = mock_process result = Command.run(['command', 'args'], os.environ, False) assert result.error == '(no output on stderr)' assert result.output == 'stdout' mock_process.communicate = Mock( return_value=[str.encode(''), str.encode('stderr')] ) result = Command.run(['command', 'args'], os.environ, False) assert result.error == 'stderr' assert result.output == '(no output on stdout)'
def main(): """ Azure Li/Vli script call Calls a custom script in the scope of an Azure Li/Vli instance """ Logger.setup() status = StatusReport('call') config = RuntimeConfig(Defaults.get_config_file()) call_script = config.get_call_script() if call_script: call_source = Defaults.mount_config_source() Command.run([ 'bash', '-c', '{0}/{1}'.format(call_source.location, call_script) ]) status.set_success()
def _kdump_calibrate(high, low): calibration_values = {'Low': low, 'High': high} if not high and not low: kdumptool_call = Command.run(['kdumptool', 'calibrate']) for setting in kdumptool_call.output.split(os.linesep): try: (key, value) = setting.split(':') except Exception: # ignore setting not in key:value format pass calibration_values[key] = int(value) bash_command = ' '.join( ['lsblk', '|', 'grep', 'disk', '|', 'wc', '-l']) storage_luns = int(Command.run(['bash', '-c', bash_command]).output) machine_memory = virtual_memory() memory_TB = max(1, machine_memory.total / 1024**4) calibration_values['High'] = int(calibration_values['High'] * memory_TB + (storage_luns / 2)) return calibration_values
def import_raw_sources(packages_config, source_provider): raw_sources = packages_config.get('raw') import_data = {} if raw_sources: repository_name = raw_sources['name'] repository_location = '/var/lib/localrepos/{0}'.format(repository_name) Path.create(repository_location) bash_command = ' '.join(['rsync', '-zav'] + list( map( lambda dir_name: '{0}/{1}/*'.format(source_provider.location, dir_name), raw_sources['directory'])) + [repository_location]) Command.run(['bash', '-c', bash_command]) Command.run(['createrepo', repository_location]) import_data[repository_name] = [repository_location] install_items = [] for package in glob.iglob('{0}/*.rpm'.format(repository_location)): install_items.append( Command.run(['rpm', '-qp', '--qf', '%{NAME}', package]).output) import_data[repository_name].append(list(install_items)) return import_data
def main(): """ Azure Li/Vli cleanup Uninstall azure-li-services package and its dependencies and check for potential reboot request """ Logger.setup() service_reports = Defaults.get_service_reports() reboot_system = False all_services_successful = True for report in service_reports: if not report.get_state(): # in case a service has unknown or failed state we will # not consider to reboot the machine all_services_successful = False reboot_system = False break if report.get_reboot(): reboot_system = True install_source = Defaults.mount_config_source() try: state_file = os.sep.join([ install_source.location, 'workload_success_is_{}'.format(all_services_successful).lower() ]) with open(state_file, 'w'): pass if not all_services_successful: write_service_log(install_source) finally: Defaults.umount_config_source(install_source) Command.run([ 'zypper', '--non-interactive', 'remove', '--clean-deps', '--force-resolution', 'azure-li-services' ]) Command.run(['systemctl', 'reset-failed']) if reboot_system: Command.run([ 'kexec', '--load', '/boot/vmlinuz', '--initrd', '/boot/initrd', '--command-line', get_boot_cmdline() ]) Command.run(['kexec', '--exec'])
def setup_ssh_authorization(user): if 'ssh-key' in user or 'ssh-private-key' in user: if user['username'] == 'root': ssh_auth_dir = '/root/.ssh/' default_group = 'root' else: ssh_auth_dir = '/home/{0}/.ssh/'.format(user['username']) default_group = 'users' Path.create(ssh_auth_dir) group_setup = user['group'] if 'group' in user else {} uid = pwd.getpwnam(user['username']).pw_uid gid = grp.getgrnam(group_setup.get('name') or default_group).gr_gid os.chmod(ssh_auth_dir, 0o700) if user['username'] != 'root': os.chown(ssh_auth_dir, uid, gid) if 'ssh-key' in user: ssh_auth_file = ssh_auth_dir + 'authorized_keys' with open(ssh_auth_file, 'a') as ssh: ssh.write(os.linesep) ssh.write(user['ssh-key']) os.chmod(ssh_auth_file, 0o600) if user['username'] != 'root': os.chown(ssh_auth_file, uid, gid) if 'ssh-private-key' in user: ssh_key_source = Defaults.mount_config_source() private_key_file = user['ssh-private-key'] Command.run([ 'cp', os.sep.join([ssh_key_source.location, private_key_file]), ssh_auth_dir ]) ssh_key_file = os.path.normpath( os.sep.join([ssh_auth_dir, os.path.basename(private_key_file)])) os.chmod(ssh_key_file, 0o600) if user['username'] != 'root': os.chown(ssh_key_file, uid, gid)
def test_run(self, mock_popen, mock_exists, mock_access): mock_exists.return_value = True command_run = namedtuple( 'command', ['output', 'error', 'returncode'] ) run_result = command_run( output='stdout', error='stderr', returncode=0 ) mock_process = Mock() mock_process.communicate = Mock( return_value=[str.encode('stdout'), str.encode('stderr')] ) mock_process.returncode = 0 mock_popen.return_value = mock_process mock_access.return_value = True assert Command.run(['command', 'args']) == run_result
def main(): """ Azure Li/Vli package installation Creates a local rpm-md repository and registers it with zypper. Installs all packages configured in the scope of an Azure Li/Vli instance """ Logger.setup() status = StatusReport('install') config = RuntimeConfig(Defaults.get_config_file()) packages_config = config.get_packages_config() if packages_config: install_source = Defaults.mount_config_source() local_repos = {} local_repos.update(import_raw_sources(packages_config, install_source)) local_repos.update( import_repository_sources(packages_config, install_source)) for repository_name, repository_metadata in local_repos.items(): repository_location = repository_metadata[0] Command.run(['zypper', 'removerepo', repository_name], raise_on_error=False) Command.run([ 'zypper', 'addrepo', '--no-gpgcheck', repository_location, repository_name ]) packages_to_install = [] for repository_metadata in local_repos.values(): packages_to_install += repository_metadata[1] if packages_to_install: Command.run([ 'zypper', '--non-interactive', 'install', '--auto-agree-with-licenses' ] + list(filter(None, packages_to_install))) status.set_success()
def test_run_failure(self, mock_popen, mock_which): mock_which.return_value = 'command' mock_popen.side_effect = AzureHostedCommandException('Run failure') with raises(AzureHostedCommandException): Command.run(['command', 'args'])
def test_run_invalid_environment(self): with raises(AzureHostedCommandNotFoundException): Command.run(['command', 'args'], {'HOME': '/root'})
def set_hostname(hostname): Command.run(['hostnamectl', 'set-hostname', hostname])
def test_run_command_does_not_exist(self): with raises(AzureHostedCommandNotFoundException): Command.run(['does-not-exist'])
def set_stonith_service(config): logger = logging.getLogger('Azure_LI_Services') # 1. setup InitiatorName initiator_name = 'iqn.1996-04.de.suse:01:{0}'.format( config['initiatorname']) initiator_file = '/etc/iscsi/initiatorname.iscsi' initiator_data = re.sub(r'InitiatorName=.*', 'InitiatorName={0}'.format(initiator_name), _read_file(initiator_file)) _write_file(initiator_file, initiator_data) # 2. setup iscsi options iscsi_conf = '/etc/iscsi/iscsid.conf' iscsi_conf_data = _read_file(iscsi_conf) iscsi_conf_data = re.sub(r'node.session.timeo.replacement_timeout = .*', 'node.session.timeo.replacement_timeout = 5', iscsi_conf_data) iscsi_conf_data = re.sub(r'node.startup = .*', 'node.startup = automatic', iscsi_conf_data) _write_file(iscsi_conf, iscsi_conf_data) # 3. initialize iSCSI devices discovery_output = Command.run([ 'iscsiadm', '-m', 'discovery', '-t', 'st', '-p', '{0}:3260'.format(config['ip']) ]).output.splitlines() # 4. create sbd device # The first line of the iscsiadm discovery output is taken into # account as we expect any group tag to respond with the same # target name. We also expect the discovery output to be in the # format: target_IP:port,target_portal_group_tag proper_target_name discovery_format = '.*:.*,.* .*' udev_settle_timeout = '30' sbd_max_attempts = 3 sbd_nop_time_sec = 2 sbd_setup_successful = False if discovery_output and re.match(discovery_format, discovery_output[0]): target_name = discovery_output[0].split(' ')[1] target_device = '/dev/disk/by-path/ip-{0}:3260-iscsi-{1}-lun-0'.format( config['ip'], target_name) Command.run(['iscsiadm', '-m', 'node', '-l']) Command.run(['rescan-scsi-bus.sh']) Command.run(['systemctl', 'restart', 'iscsi']) Command.run(['systemctl', 'restart', 'iscsid']) Command.run( ['udevadm', 'settle', '--timeout={0}'.format(udev_settle_timeout)]) for try_count in range(sbd_max_attempts): logger.info('SBD setup try[{0}]'.format(try_count)) try: Command.run(['sbd', '-d', target_device, 'create']) Command.run(['sbd', '-d', target_device, 'dump']) sbd_setup_successful = True except Exception as issue: logger.error( 'SBD setup failed with: {0}: Retry in {1}sec'.format( issue, sbd_nop_time_sec)) time.sleep(sbd_nop_time_sec) else: break if not sbd_setup_successful: raise AzureHostedCommandOutputException( 'Stonith: SBD setup failed') _write_file('/etc/sysconfig/sbd', 'SBD_DEVICE="{0}"'.format(target_device)) else: raise AzureHostedCommandOutputException( 'Stonith: Unexpected iSCSI discovery information: {0}'.format( discovery_output))
def test_run_does_not_raise_error_if_command_not_found(self, mock_which): mock_which.return_value = None result = Command.run(['command', 'args'], os.environ, False) assert result.error is None assert result.output is None assert result.returncode == -1
def set_saptune_service(): Command.run(['saptune', 'daemon', 'start']) Command.run(['saptune', 'solution', 'apply', 'HANA'])