def list_ovs_services(host=IP): if GeneralSystem.INIT_SYSTEM == "init": return General.execute_command_on_node(host, "initctl list | grep ovs-*", allow_insecure=True).splitlines() elif GeneralSystem.INIT_SYSTEM == "systemd": return General.execute_command_on_node( host, ["systemctl", "-l", "--no-legend", "--no-pager", "list-units", "ovs-*"] ).splitlines()
def ovs_2493_detect_could_not_acquire_lock_events_test(): """ Verify lock errors """ errorlist = "" command = "grep -C 1 'Could not acquire lock' /var/log/ovs/lib.log" gridips = GeneralPMachine.get_all_ips() for gridip in gridips: out = General.execute_command_on_node(gridip, command + " | wc -l") if not out == '0': errorlist += "node %s \n:{0}\n\n".format(General.execute_command_on_node(gridip, command).splitlines()) % gridip assert len(errorlist) == 0, "Lock errors detected in lib logs on \n" + errorlist
def post_reboot_checks_test(): """ Perform service checks after reboot """ rebooted_host = os.environ.get('POST_REBOOT_HOST') if not rebooted_host: logger.info('Test not setup to run') return logger.info('Post reboot check node {0}\n'.format(rebooted_host)) wait_time = 5 * 60 sleep_time = 5 non_running_services = '' while wait_time > 0: out = General.execute_command_on_node(rebooted_host, "initctl list | grep ovs-*") statuses = out.splitlines() non_running_services = [s for s in statuses if 'start/running' not in s] if len(non_running_services) == 0: break wait_time -= sleep_time time.sleep(sleep_time) assert len(non_running_services) == 0,\ "Found non running services after reboot on node {0}\n{1}".format(rebooted_host, non_running_services)
def ovs_2053_check_for_alba_warnings_test(): """ Check ALBA warning presence """ out = General.execute_command_on_node('127.0.0.1', 'grep "warning: syncfs" /var/log/upstart/*-asd-*.log | wc -l') assert out == '0', \ "syncfs warnings detected in asd logs\n:{0}".format(out.splitlines())
def ssh_check_test(): """ Verify SSH keys """ issues_found = [] env_ips = GeneralPMachine.get_all_ips() for env_ip_connecting_from in env_ips: out = General.execute_command_on_node(env_ip_connecting_from, "cat ~/.ssh/known_hosts") for env_ip_connecting_to in env_ips: if env_ip_connecting_from != env_ip_connecting_to: if env_ip_connecting_to not in out: issues_found.append('Host key verification not found between {0} and {1}'.format(env_ip_connecting_from, env_ip_connecting_to)) assert len(issues_found) == 0, 'Following issues were found:\n - {0}'.format('\n - '.join(issues_found))
def services_check_test(): """ Verify some services """ # get env ips env_ips = GeneralPMachine.get_all_ips() non_running_services = [] for env_ip in env_ips: non_running_services_on_node = [] out = General.execute_command_on_node(env_ip, "initctl list | grep ovs-*") statuses = out.splitlines() non_running_services_on_node.extend([s for s in statuses if 'start/running' not in s]) if len(non_running_services_on_node): non_running_services.append([env_ip, non_running_services_on_node]) assert len(non_running_services) == 0, "Found non running services on {0}".format(non_running_services)
def _get_agent_distribution(agent_name): result = {} total = 0 for ip in alba_node_ips: count = General.execute_command_on_node(ip, 'ls /etc/init/alba-maintenance_{0}-* | wc -l'.format(agent_name)) if count: count = int(count) else: count = 0 total += count result[ip] = count result['total'] = total print 'Maintenance agent distribution: {0}'.format(result) for ip in alba_node_ips: assert (result[ip] == total / len(alba_node_ips) or result[ip] == (total / len(alba_node_ips)) + 1),\ "Agents not equally distributed!" return result
def fdl_0002_add_remove_partition_with_role_and_crosscheck_model_test(): """ FDL-0002 - create/remove disk partition using full disk and verify ovs model - look for an unused disk - add a partition using full disk and assign a DB role to the partition - validate ovs model is correctly updated with DB role - cleanup that partition - verify ovs model is correctly updated """ if TestFlexibleDiskLayout.continue_testing.state is False: logger.info('Test suite signaled to stop') return my_sr = GeneralStorageRouter.get_local_storagerouter() unused_disks = GeneralDisk.get_unused_disks() if not unused_disks: logger.info("At least one unused disk should be available for partition testing") return hdds = dict() ssds = dict() mdisks = GeneralDisk.get_disks() for disk in mdisks: if disk.storagerouter_guid == my_sr.guid: if disk.is_ssd: ssds['/dev/' + disk.name] = disk else: hdds['/dev/' + disk.name] = disk all_disks = dict(ssds) all_disks.update(hdds) # check no partitions are modelled for unused disks partitions = GeneralDisk.get_disk_partitions() partitions_detected = False disk_guid = '' for path in unused_disks: # @TODO: remove the if when ticket OVS-4503 is solved if path in all_disks: disk_guid = all_disks[path].guid for partition in partitions: if partition.disk_guid == disk_guid: partitions_detected = True assert partitions_detected is False, 'Existing partitions detected on unused disks!' # try partition a disk using it's full reported size disk = all_disks[unused_disks[0]] GeneralDisk.configure_disk(storagerouter=my_sr, disk=disk, offset=0, size=int(disk.size), roles=['WRITE']) # lookup partition in model mountpoint = None partitions = GeneralDisk.get_disk_partitions() for partition in partitions: if partition.disk_guid == disk.guid and 'WRITE' in partition.roles: mountpoint = partition.mountpoint break GeneralDisk.configure_disk(storagerouter=my_sr, disk=disk, offset=0, partition=partition, size=int(disk.size), roles=[]) # cleanup disk partition cmd = 'umount {0}; rmdir {0}; echo 0'.format(mountpoint) General.execute_command_on_node(my_sr.ip, cmd, allow_insecure=True) cmd = ['parted', '-s', '/dev/' + disk.name, 'rm', '1'] General.execute_command_on_node(my_sr.ip, cmd, allow_nonzero=True) # wipe partition table to be able to reuse this disk in another test GeneralVDisk.write_to_volume(location=disk.aliases[0], count=64, bs='1M', input_type='zero') GeneralStorageRouter.sync_with_reality() # verify partition no longer exists in ovs model is_partition_removed = True partitions = GeneralDisk.get_disk_partitions() for partition in partitions: if partition.disk_guid == disk_guid and 'WRITE' in partition.roles: is_partition_removed = False break assert is_partition_removed is True,\ 'New partition was not deleted successfully from system/model!' assert mountpoint, 'New partition was not detected in model'