def _test_GET_ihosts_host_id_uppercaseUUID(sysinv_rest): """ Test GET of <resource> with valid authentication and upper case UUID values. RFC 4122 covers the need for uppercase UUID values Args: n/a Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> with proper authentication - Determine if expected status_code of 200 is received Test Teardown: n/a """ path = "/ihosts/{}/addresses" r = sysinv_rest LOG.info("This test case will FAIL until CGTS-8265 is resolved") LOG.info(system_helper.get_hosts()) for host in system_helper.get_hosts(): uuid = system_helper.get_host_values(host, 'uuid')[0] message = "Using requests GET {} with proper authentication" LOG.tc_step(message.format(path)) status_code, text = r.get(resource=path.format(uuid.upper()), auth=True) message = "Retrieved: status_code: {} message: {}" LOG.info(message.format(status_code, text)) LOG.tc_step("Determine if expected code of 200 is received") message = "Expected code of 200 - received {} and message {}" assert status_code == 200, message.format(status_code, text)
def test_delete_unlocked_node_negative(): """ Attempts to delete each unlocked node. Fails if one unlocked node does get deleted. Test Steps: - Creates a list of every unlocked host - Iterate through each host and attempt to delete it - Verify that each host rejected the delete request """ hosts = system_helper.get_hosts(administrative='unlocked') deleted_nodes = [] for node in hosts: LOG.tc_step("attempting to delete {}".format(node)) LOG.info("{} state: {}".format(node, system_helper.get_host_values(node, fields='administrative')[0])) res, out = cli.system('host-delete', node, fail_ok=True) LOG.tc_step("Delete request - result: {}\tout: {}".format(res, out)) assert 1 == res, "FAIL: The delete request for {} was not rejected".format(node) LOG.tc_step("Confirming that the node was not deleted") res, out = cli.system('host-show', node, fail_ok=True) if 'host not found' in out or res != 0: # the node was deleted even though it said it wasn't LOG.tc_step("{} was deleted.".format(node)) deleted_nodes.append(node) assert not deleted_nodes, "Fail: Delete request for the following node(s) " \ "{} was accepted.".format(deleted_nodes)
def get_suitable_hypervisors(): """ Get low latency hypervisors with HT-off TODO: following settings should checked, but most of them cannot be easily done automatically # Processor Configuration # Hyper-Threading = Disabled # Power & Performance # Policy = Performance # Workload = Balanced # P-States # SpeedStep = Enabled # Turbo Boost = Enabled # Energy Efficient Turbo = Disabled # C-States # CPU C-State = Disabled # Acoustic and Performance # Fan Profile = Performance: """ global testable_hypervisors LOG.fixture_step( 'Check if the lab meets conditions required by this test case') hypervisors = host_helper.get_hypervisors() for hypervisor in hypervisors: personality, subfunc = system_helper.get_host_values( hypervisor, ('personality', 'subfunctions')) personalities = subfunc + personality if not personalities or 'lowlatency' not in personalities: continue cpu_info, num_threads, vm_cores, num_cores = get_cpu_info(hypervisor) if cpu_info and 'topology' in cpu_info and cpu_info['topology'][ 'threads'] == 1: if num_threads != 1: LOG.warn( 'conflicting info: num_threads={}, while cpu_info.threads={}' .format(num_threads, cpu_info['topology']['threads'])) testable_hypervisors[hypervisor] = { 'personalities': personalities, 'cpu_info': cpu_info, 'vm_cores': vm_cores, 'num_cores': num_cores, 'for_host_test': False, 'for_vm_test': False, } else: LOG.warning( 'hypervisor:{} has HT-on, ignore it'.format(hypervisor)) return testable_hypervisors.keys()
def test_ceilometer_meters_exist(meters): """ Validate ceilometer meters exist Verification Steps: 1. Check via 'openstack metric list' or 'ceilometer event-list' 2. Check meters for router, subnet, image, and vswitch exists """ # skip('CGTS-10102: Disable TC until US116020 completes') time_create = system_helper.get_host_values('controller-1', 'created_at')[0] current_isotime = datetime.utcnow().isoformat(sep='T') if common.get_timedelta_for_isotimes( time_create, current_isotime) > timedelta(hours=24): skip("Over a day since install. Meters no longer exist.") # Check meter for routers LOG.tc_step( "Check number of 'router.create.end' events is at least the number of existing routers" ) routers = network_helper.get_routers() router_id = routers[0] check_event_in_tenant_or_admin(resource_id=router_id, event_type='router.create.end') # Check meter for subnets LOG.tc_step( "Check number of 'subnet.create' meters is at least the number of existing subnets" ) subnets = network_helper.get_subnets( name=Tenant.get_primary().get('tenant'), strict=False) subnet = random.choice(subnets) LOG.info("Subnet to check in ceilometer event list: {}".format(subnet)) check_event_in_tenant_or_admin(resource_id=subnet, event_type='subnet.create.end') # Check meter for image LOG.tc_step('Check meters for image') images = glance_helper.get_images(field='id') resource_ids = gnocchi_helper.get_metrics(metric_name='image.size', field='resource_id') assert set(images) <= set(resource_ids) # Check meter for vswitch LOG.tc_step('Check meters for vswitch') resource_ids = gnocchi_helper.get_metrics( metric_name='vswitch.engine.util', fail_ok=True, field='resource_id') if system_helper.is_avs(): hypervisors = host_helper.get_hypervisors() assert len(hypervisors) <= len(resource_ids), \ "Each nova hypervisor should have at least one vSwitch core" else: assert not resource_ids, "vswitch meters found for STX build"
def prev_check(request, check_central_alarms_module): LOG.fixture_step( "(module) Ensure both central and subcloud are configured with {} timezone" .format(DEFAULT_ZONE)) subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') central_auth = Tenant.get('admin_platform', dc_region='RegionOne') sub_auth = Tenant.get('admin_platform', dc_region=subcloud) system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=central_auth) code = system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=sub_auth)[0] if code == 0: # allow sometime for change to apply time.sleep(30) prev_central_time = system_helper.get_host_values( host="controller-0", fields='created_at', auth_info=central_auth)[0] prev_sub_time = system_helper.get_host_values(host="controller-0", fields='created_at', auth_info=sub_auth)[0] LOG.fixture_step("prev_time: {}.".format(prev_central_time)) central_zone, sub_zone = __select_two_timezones(current_zone=DEFAULT_ZONE) def _revert(): LOG.fixture_step( "Revert timezone to {} and ensure host created timestamp also reverted" .format(DEFAULT_ZONE)) system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=central_auth) system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=sub_auth) wait_for_timestamp_update(auth_info=central_auth, expt_time=prev_central_time) wait_for_timestamp_update(auth_info=sub_auth, expt_time=prev_sub_time) request.addfinalizer(_revert) return prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, sub_auth, \ subcloud
def enable_disable_murano_agent(enable=True, con_ssh=None, auth_info=Tenant.get('admin')): """ Enable/Disable Murano service and murano agent on the system Args: enable: True/False, True for enable, false for disable con_ssh (SSHClient): auth_info (dict) Returns: code, msg: return code and msg """ # enable Murano msg = "Enabled Murano Service Successfully" if enable: ret, out = system_helper.create_service_parameter( service="murano", section="engine", name="disable_murano_agent", value="false", modify_existing=True, con_ssh=con_ssh) else: ret, out = system_helper.create_service_parameter( service="murano", section="engine", name="disable_murano_agent", value="true", modify_existing=True, con_ssh=con_ssh) if ret != 0: return 1, out if system_helper.get_host_values('controller-0', 'config_status', con_ssh=con_ssh, auth_info=auth_info)[0] \ == 'Config out-of-date': # need to lock/unlock standby and swact lock/unlock ret, out = host_helper.lock_unlock_controllers() if ret != 0: return 1, out else: msg = "Failed to enable/disable Murano engine" return 0, msg
def wait_for_timestamp_update(auth_info, prev_timestamp=None, expt_time=None): timeout = time.time() + 60 while time.time() < timeout: post_timestamp = system_helper.get_host_values(host='controller-0', fields='created_at', auth_info=auth_info)[0] if prev_timestamp and prev_timestamp != post_timestamp: if prev_timestamp != post_timestamp: return post_timestamp elif expt_time: if post_timestamp == expt_time: return post_timestamp time.sleep(5) else: LOG.info("Timestamp for fm event did not change") return None
def test_horizon_host_inventory_display(host_inventory_pg): """ Test the hosts inventory display: Setups: - Login as Admin - Go to Admin > Platform > Host Inventory Test Steps: - Test host tables display Teardown: - Back to Host Inventory page - Logout """ LOG.tc_step('Test host inventory display') host_inventory_pg.go_to_hosts_tab() host_list = system_helper.get_hosts() for host_name in host_list: LOG.info("Checking {}...".format(host_name)) headers_map = host_inventory_pg.hosts_table( host_name).get_cli_horizon_mapping() fields = list(headers_map.keys()) cli_values = system_helper.get_host_values(host_name, fields, rtn_dict=True) cli_values['uptime'] = format_uptime(cli_values['uptime']) if cli_values.get('peers'): cli_values['peers'] = cli_values.get('peers').get('name') horizon_vals = host_inventory_pg.horizon_vals(host_name) for cli_field in fields: cli_val = cli_values[cli_field] horizon_field = headers_map[cli_field] horizon_val = horizon_vals[horizon_field] if cli_field == 'uptime': assert re.match(r'\d+ [dhm]', horizon_val) else: assert str(cli_val).lower() in horizon_val.lower(), \ '{} {} display incorrectly, expect: {} actual: {}'. \ format(host_name, horizon_field, cli_val, horizon_val) horizon.test_result = True
def prepare_modify_cpu(request): """ Finds the first unlocked compute node. Creates a cpu profile. Returns (tuple): (name of the host, uuid of the host, uuid of the new cpu profile) """ computes = system_helper.get_computes( administrative=HostAdminState.UNLOCKED) if not computes: skip("There were no unlocked compute nodes.") host = computes[0] uuid = system_helper.get_host_values(host=host, fields='uuid')[0] headers = get_headers() url = html_helper.create_url(IP_ADDR, HTTPPort.SYS_PORT, HTTPPort.SYS_VER, 'iprofile') data = { 'profilename': 'test_compute_profile', 'profiletype': 'cpu', 'ihost_uuid': uuid } resp = html_helper.post_request(url, headers=headers, data=data, verify=False) iprofile_uuid = resp['uuid'] LOG.info("The new profile uuid is: {}".format(iprofile_uuid)) def unlock(): host_helper.apply_host_cpu_profile(host, iprofile_uuid) url_ = html_helper.create_url(IP_ADDR, HTTPPort.SYS_PORT, HTTPPort.SYS_VER, 'iprofile/{}'.format(iprofile_uuid)) html_helper.delete_request(url_, headers=headers, verify=False) request.addfinalizer(unlock) return host, uuid, iprofile_uuid
def test_GET_ihosts_host_id_invalidUUID(sysinv_rest): """ Test GET of <resource> with valid authentication and upper case UUID values. RFC 4122 covers the need for uppercase UUID values Args: n/a Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> with proper authentication - Determine if expected status_code of 200 is received Test Teardown: n/a """ path = "/ihosts/{}/addresses" r = sysinv_rest LOG.info(path) LOG.info(system_helper.get_hosts()) for host in system_helper.get_hosts(): uuid = system_helper.get_host_values(host, 'uuid')[0] LOG.info("host: {} uuid: {}".format(host, uuid)) message = "Using requests GET {} with proper authentication" LOG.tc_step(message.format(path)) # shift a->g, b->h, etc - all to generate invalid uuid shifted_uuid = ''.join(map(lambda x: chr((ord(x) - ord('a') + 6) % 26 + ord('a')) if x in string.ascii_lowercase else x, uuid.lower())) status_code, text = r.get(resource=path.format(shifted_uuid), auth=True) message = "Retrieved: status_code: {} message: {}" LOG.info(message.format(status_code, text)) LOG.tc_step("Determine if expected code of 400 is received") message = "Expected code of 400 - received {} and message {}" assert status_code == 400, message.format(status_code, text)
def test_change_personality_unlock_negative(): """ TC1943 Verify that a host's personality can't be changed Test Steps: - For each host attempt to update its personality - Verify that each attempt is rejected """ hosts = system_helper.get_hosts() for host in hosts: personality = system_helper.get_host_values(host, 'personality')[0] if personality == 'controller': change_to = 'worker' else: change_to = 'controller' LOG.tc_step("Attempting to change {}'s personality to {}".format(host, personality)) code, out = cli.system('host-update', '{} personality={}'.format(host, change_to), fail_ok=True) LOG.tc_step("Verifying that the cli was rejected") assert 1 == code, "FAIL: The request to modify {}'s personality was not rejected".format(host)
def teardown(): """ If DNS servers are not set, set them. Deprovision internal DNS. """ global UNRESTORED_DNS_SERVERS global HOSTS_AFFECTED if UNRESTORED_DNS_SERVERS: LOG.fixture_step("Restoring DNS entries to: {}".format(UNRESTORED_DNS_SERVERS)) subnet_list = network_helper.get_subnets(network=mgmt_net_id) set_dns_servers(subnet_list, UNRESTORED_DNS_SERVERS, fail_ok=True) UNRESTORED_DNS_SERVERS = [] if system_helper.get_alarms(alarm_id=EventLogID.CONFIG_OUT_OF_DATE): LOG.fixture_step("Config out-of-date alarm(s) present, check {} and lock/unlock if host config out-of-date". format(HOSTS_AFFECTED)) for host in HOSTS_AFFECTED: if system_helper.get_host_values(host, 'config_status')[0] == 'Config out-of-date': LOG.info("Lock/unlock {} to clear config out-of-date status".format(host)) host_helper.lock_unlock_hosts(hosts=host) HOSTS_AFFECTED.remove(host)
def test_GET_various_host_id_valid(sysinv_rest, path): """ Test GET of <resource> with valid authentication. Args: sysinv_rest path Prerequisites: system is running Test Setups: n/a Test Steps: - Using requests GET <resource> with proper authentication - Determine if expected status_code of 200 is received Test Teardown: n/a """ r = sysinv_rest path = re.sub("-", "{}", path) LOG.info(path) LOG.info(system_helper.get_hosts()) for host in system_helper.get_hosts(): uuid = system_helper.get_host_values(host, 'uuid')[0] res = path.format(uuid) message = "Using requests GET {} with proper authentication" LOG.tc_step(message.format(res)) status_code, text = r.get(resource=res, auth=True) message = "Retrieved: status_code: {} message: {}" LOG.info(message.format(status_code, text)) if status_code == 404: pytest.skip("Unsupported resource in this configuration.") else: message = "Determine if expected code of 200 is received" LOG.tc_step(message) message = "Expected code of 200 - received {} and message {}" assert status_code == 200, message.format(status_code, text)
def test_lock_active_controller_reject(no_simplex): """ Verify lock unlock active controller. Expected it to fail Test Steps: - Get active controller - Attempt to lock active controller and ensure it's rejected """ LOG.tc_step('Retrieve the active controller from the lab') active_controller = system_helper.get_active_controller_name() assert active_controller, "No active controller available" # lock standby controller node and verify it is successfully locked LOG.tc_step("Lock active controller and ensure it fail to lock") exit_code, cmd_output = host_helper.lock_host(active_controller, fail_ok=True, swact=False, check_first=False) assert exit_code == 1, 'Expect locking active controller to ' \ 'be rejected. Actual: {}'.format(cmd_output) status = system_helper.get_host_values(active_controller, 'administrative')[0] assert status == 'unlocked', "Fail: The active controller was locked."
def test_dc_modify_timezone(prev_check): """ Test timezone modify on system controller and subcloud. Ensure timezone change is not propagated. Setups: - Ensure both central and subcloud regions are configured with UTC - Get the timestamps for host created_at before timezone modify Test Steps - Change the timezone in central region and wait until the change is applied - Change the timezone to a different zone in subcloud and wait until the change is applied - Verify host created_at timestamp updated according to the local timezone for the region - Swact on subcloud and ensure timezone and host created_at timestamp persists locally - Swact central controller and ensure timezone and host created_at timestamp persists in central and subcloud Teardown - Change timezone to UTC in both central and subcloud regions - Ensure host created_at timestamp is reverted to original """ prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, subcloud_auth, \ subcloud = prev_check LOG.tc_step("Modify timezone to {} in central region".format(central_zone)) system_helper.modify_timezone(timezone=central_zone, auth_info=central_auth) LOG.tc_step( "Waiting for timestamp for host created_at to update in central region" ) post_central_time = wait_for_timestamp_update( prev_timestamp=prev_central_time, auth_info=central_auth) assert post_central_time != prev_central_time, \ "host created_at timestamp did not update after timezone changed " \ "to {} in central region".format(central_zone) LOG.tc_step("Modify timezone to {} in {}".format(sub_zone, subcloud)) system_helper.modify_timezone(timezone=sub_zone, auth_info=subcloud_auth) LOG.tc_step( "Waiting for timestamp for same host created_at to update in {}". format(subcloud)) post_sub_time = wait_for_timestamp_update(prev_timestamp=prev_sub_time, auth_info=subcloud_auth) assert post_sub_time != prev_sub_time, \ "host created_at timestamp did not update after timezone changed to {} " \ "in {}".format(sub_zone, subcloud) assert post_sub_time != post_central_time, \ "Host created_at timestamp is the same on central and {} when configured with different " \ "timezones".format(subcloud) LOG.tc_step( "Ensure host created_at timestamp does not change after subcloud sync audit" ) dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660) post_sync_sub_time = system_helper.get_host_values( host='controller-0', fields='created_at', auth_info=subcloud_auth)[0] assert post_sub_time == post_sync_sub_time, \ "Host created_at timestamp changed after sync audit on {}".format(subcloud) if not system_helper.is_aio_simplex(): LOG.tc_step( "Swact in {} region and verify timezone persists locally".format( subcloud)) host_helper.swact_host(auth_info=subcloud_auth) post_swact_sub_zone = system_helper.get_timezone( auth_info=subcloud_auth) assert post_swact_sub_zone == sub_zone post_swact_sub_time = system_helper.get_host_values( host='controller-0', fields='created_at', auth_info=subcloud_auth)[0] assert post_swact_sub_time == post_sub_time if system_helper.get_standby_controller_name(auth_info=central_auth): LOG.tc_step( "Swact in central region, and ensure timezone persists locally in central" " and subcloud") host_helper.swact_host(auth_info=central_auth) # Verify central timezone persists post_swact_central_zone = system_helper.get_timezone( auth_info=central_auth) assert post_swact_central_zone == central_zone post_swact_central_time = system_helper.get_host_values( host='controller-0', fields='created_at', auth_info=central_auth)[0] assert post_swact_central_time == post_central_time # Verify subcloud timezone persists post_central_swact_sub_zone = system_helper.get_timezone( auth_info=subcloud_auth) assert post_central_swact_sub_zone == sub_zone post_central_swact_sub_time = system_helper.get_host_values( host='controller-0', fields='created_at', auth_info=subcloud_auth)[0] assert post_central_swact_sub_time == post_sub_time
def test_install_cloned_image(install_clone_setup): controller1 = 'controller-1' lab = InstallVars.get_install_var('LAB') install_output_dir = ProjVar.get_var('LOG_DIR') controller0_node = lab['controller-0'] hostnames = install_clone_setup['hostnames'] system_mode = install_clone_setup['system_mode'] lab_name = lab['name'] LOG.info("Starting install-clone on AIO lab {} .... ".format(lab_name)) LOG.tc_step("Booting controller-0 ... ") if controller0_node.telnet_conn is None: controller0_node.telnet_conn = install_helper.open_telnet_session( controller0_node, install_output_dir) try: controller0_node.telnet_conn.login() except: LOG.info("Telnet Login failed. Attempting to reset password") try: controller0_node.telnet_conn.login(reset=True) except: if controller0_node.telnet_conn: controller0_node.telnet_conn.close() controller0_node.telnet_conn = None if controller0_node.telnet_conn: install_helper.wipe_disk_hosts(hostnames) # power off hosts LOG.tc_step("Powring off system hosts ... ") install_helper.power_off_host(hostnames) install_helper.boot_controller(boot_usb=True, small_footprint=True, clone_install=True) # establish telnet connection with controller LOG.tc_step( "Establishing telnet connection with controller-0 after install-clone ..." ) node_name_in_ini = '{}.*\~\$ '.format(controller0_node.host_name) normalized_name = re.sub(r'([^\d])0*(\d+)', r'\1\2', node_name_in_ini) # controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(lab['name'].split('_')[0]) \ # + '|' + Prompt.CONTROLLER_0 \ # + '|{}'.format(node_name_in_ini) \ # + '|{}'.format(normalized_name) if controller0_node.telnet_conn: controller0_node.telnet_conn.close() output_dir = ProjVar.get_var('LOG_DIR') controller0_node.telnet_conn = install_helper.open_telnet_session( controller0_node, output_dir) controller0_node.telnet_conn.login() controller0_node.telnet_conn.exec_cmd("xterm") LOG.tc_step("Verify install-clone status ....") install_helper.check_clone_status( tel_net_session=controller0_node.telnet_conn) LOG.info("Source Keystone user admin environment ...") #controller0_node.telnet_conn.exec_cmd("cd; source /etc/platform/openrc") LOG.tc_step("Checking controller-0 hardware ....") install_helper.check_cloned_hardware_status('controller-0') if system_mode == 'duplex': LOG.tc_step("Booting controller-1 ... ") boot_interfaces = lab['boot_device_dict'] install_helper.open_vlm_console_thread('controller-1', boot_interface=boot_interfaces, vlm_power_on=True, wait_for_thread=True) LOG.info("waiting for {} to boot ...".format(controller1)) LOG.info("Verifying {} is Locked, Disabled and Online ...".format( controller1)) system_helper.wait_for_hosts_states( controller1, check_interval=20, use_telnet=True, con_telnet=controller0_node.telnet_conn, administrative=HostAdminState.LOCKED, operational=HostOperState.DISABLED, availability=HostAvailState.ONLINE) LOG.info("Unlocking {} ...".format(controller1)) rc, output = host_helper.unlock_host( controller1, use_telnet=True, con_telnet=controller0_node.telnet_conn) assert rc == 0, "Host {} unlock failed: {}".format(controller1, output) LOG.info("Host {} unlocked successfully ...".format(controller1)) LOG.info("Host controller-1 booted successfully... ") LOG.tc_step("Checking controller-1 hardware ....") install_helper.check_cloned_hardware_status(controller1) # LOG.tc_step("Customizing the cloned system ....") LOG.info("Changing the OAM IP configuration ... ") install_helper.update_oam_for_cloned_system(system_mode=system_mode) LOG.tc_step("Downloading lab specific license, config and scripts ....") software_version = system_helper.get_sw_version() load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[software_version] install_helper.download_lab_config_files( lab, install_clone_setup['build_server'], load_path) LOG.tc_step("Running lab cleanup to removed source attributes ....") install_helper.run_setup_script(script='lab_cleanup') LOG.tc_step( "Running lab setup script to upadate cloned system attributes ....") rc, output = install_helper.run_lab_setup() assert rc == 0, "Lab setup run failed: {}".format(output) time.sleep(30) LOG.tc_step( "Checking config status of controller-0 and perform lock/unlock if necessary..." ) if system_helper.get_host_values( 'controller-0', 'config_status')[0] == 'Config out-of-date': host_helper.lock_unlock_controllers() LOG.tc_step("Verifying system health after restore ...") system_helper.wait_for_all_alarms_gone(timeout=300) rc, failed = system_helper.get_system_health_query() assert rc == 0, "System health not OK: {}".format(failed)
def test_horizon_host_details_display(host_inventory_pg, host_name): """ Test the host details display: Setups: - Login as Admin - Go to Admin > Platform > Host Inventory > Controller-0 Test Steps: - Test host controller-0 overview display - Test host controller-0 processor display - Test host controller-0 memory display - Test host controller-0 storage display - Test host controller-0 ports display - Test host controller-0 lldp display Teardown: - Logout """ host_table = host_inventory_pg.hosts_table(host_name) host_details_pg = host_inventory_pg.go_to_host_detail_page(host_name) # OVERVIEW TAB LOG.tc_step('Test host: {} overview display'.format(host_name)) host_details_pg.go_to_overview_tab() horizon_vals = host_details_pg.host_detail_overview( host_table.driver).get_content() fields_map = host_details_pg.host_detail_overview( host_table.driver).OVERVIEW_INFO_HEADERS_MAP cli_host_vals = system_helper.get_host_values(host_name, list(fields_map.keys()), rtn_dict=True) for field in fields_map: horizon_header = fields_map[field] cli_host_val = str(cli_host_vals[field]) horizon_val = horizon_vals.get(horizon_header) if horizon_val is None: horizon_val = 'None' assert cli_host_val == horizon_val, '{} display incorrectly'.format( horizon_header) else: assert cli_host_val.upper() in horizon_val.upper( ), '{} display incorrectly'.format(horizon_header) LOG.info('Host: {} overview display correct'.format(host_name)) # PROCESSOR TAB LOG.tc_step('Test host {} processor display'.format(host_name)) host_details_pg.go_to_processor_tab() cpu_table = table_parser.table( cli.system('host-cpu-list {}'.format(host_name))[1]) expt_cpu_info = { 'Processor Model:': table_parser.get_values(cpu_table, 'processor_model')[0], 'Processors:': str(len(set(table_parser.get_values(cpu_table, 'processor')))) } horizon_cpu_info = host_details_pg.inventory_details_processor_info.get_content( ) assert horizon_cpu_info['Processor Model:'] == expt_cpu_info[ 'Processor Model:'] assert horizon_cpu_info['Processors:'] == expt_cpu_info['Processors:'] # MEMORY TABLE LOG.tc_step('Test host {} memory display'.format(host_name)) checking_list = ['mem_total(MiB)', 'mem_avail(MiB)'] host_details_pg.go_to_memory_tab() memory_table = table_parser.table( cli.system('host-memory-list {}'.format(host_name))[1]) colume_names = host_details_pg.memory_table.column_names processor_list = table_parser.get_values(memory_table, colume_names[0]) cli_memory_table_dict = table_parser.row_dict_table(memory_table, colume_names[0], lower_case=False) for processor in processor_list: horizon_vm_pages_val = host_details_pg.get_memory_table_info( processor, colume_names[2]) horizon_memory_val = host_details_pg.get_memory_table_info( processor, 'Memory') if cli_memory_table_dict[processor][ 'hugepages(hp)_configured'] == 'False': assert horizon_vm_pages_val is None, 'Horizon {} display incorrectly'.format( colume_names[2]) else: for field in checking_list: assert cli_memory_table_dict[processor][ field] in horizon_memory_val, 'Memory {} display incorrectly' # STORAGE TABLE # This test will loop each table and test their display # Test may fail in following case: # 1. disk table's Size header eg. Size(GiB) used different unit such as Size (MiB), Size (TiB) # 2. lvg table may display different: # Case 1: Name | State | Access | Size (GiB) | Avail Size(GiB) | Current Physical Volume - Current Logical Volumes # Case 2: Name | State | Access | Size | Current Physical Volume - Current Logical Volumes # Case 2 Size values in horizon are rounded by 2 digits but in CLI not rounded LOG.tc_step('Test host {} storage display'.format(host_name)) host_details_pg.go_to_storage_tab() cmd_list = [ 'host-disk-list {}'.format(host_name), 'host-disk-partition-list {}'.format(host_name), 'host-lvg-list {}'.format(host_name), 'host-pv-list {}'.format(host_name) ] table_names = [ 'disk table', 'disk partition table', 'local volume groups table', 'physical volumes table' ] horizon_storage_tables = [ host_details_pg.storage_disks_table, host_details_pg.storage_partitions_table, host_details_pg.storage_lvg_table, host_details_pg.storage_pv_table ] cli_storage_tables = [] for cmd in cmd_list: cli_storage_tables.append(table_parser.table(cli.system(cmd)[1])) for i in range(len(horizon_storage_tables)): horizon_table = horizon_storage_tables[i] unique_key = horizon_table.column_names[0] horizon_row_dict_table = host_details_pg.get_horizon_row_dict( horizon_table, key_header_index=0) cli_table = cli_storage_tables[i] table_dict_unique_key = list(horizon_table.HEADERS_MAP.keys())[list( horizon_table.HEADERS_MAP.values()).index(unique_key)] cli_row_dict_storage_table = table_parser.row_dict_table( cli_table, table_dict_unique_key, lower_case=False) for key_header in horizon_row_dict_table: for cli_header in horizon_table.HEADERS_MAP: horizon_header = horizon_table.HEADERS_MAP[cli_header] horizon_row_dict = horizon_row_dict_table[key_header] cli_row_dict = cli_row_dict_storage_table[key_header] # Solve parser issue: e.g. Size (GiB)' should be '558.029' not ['5589.', '029'] cli_val = cli_row_dict[cli_header] if isinstance(cli_val, list): cli_row_dict[cli_header] = ''.join(cli_val) assert horizon_row_dict[horizon_header] == cli_row_dict[cli_header], \ 'In {}: disk: {} {} display incorrectly'.format(table_names[i], key_header, horizon_header) LOG.info('{} display correct'.format(table_names[i])) # PORT TABLE LOG.tc_step('Test host {} port display'.format(host_name)) host_details_pg.go_to_ports_tab() horizon_port_table = host_details_pg.ports_table() cli_port_table = table_parser.table( cli.system('host-ethernet-port-list {}'.format(host_name))[1]) horizon_row_dict_port_table = host_details_pg.get_horizon_row_dict( horizon_port_table, key_header_index=0) cli_row_dict_port_table = table_parser.row_dict_table(cli_port_table, 'name', lower_case=False) for ethernet_name in cli_row_dict_port_table: for cli_header in horizon_port_table.HEADERS_MAP: horizon_header = horizon_port_table.HEADERS_MAP[cli_header] horizon_row_dict = horizon_row_dict_port_table[ethernet_name] cli_row_dict = cli_row_dict_port_table[ethernet_name] if cli_header not in cli_row_dict and cli_header == 'mac address': cli_val = cli_row_dict['macaddress'] else: cli_val = cli_row_dict[cli_header] horizon_val = horizon_row_dict[horizon_header] # Solve table parser issue: MAC Address returns list eg: ['a4:bf:01:35:4a:', '32'] if isinstance(cli_val, list): cli_val = ''.join(cli_val) assert cli_val in horizon_val, '{} display incorrectly'.format( horizon_header) # LLDP TABLE LOG.tc_step('Test host {} lldp display'.format(host_name)) host_details_pg.go_to_lldp_tab() lldp_list_table = table_parser.table( cli.system('host-lldp-neighbor-list {}'.format(host_name))[1]) lldp_uuid_list = table_parser.get_values(lldp_list_table, 'uuid') horizon_lldp_table = host_details_pg.lldp_table() cli_row_dict_lldp_table = {} horizon_row_dict_lldp_table = host_details_pg.get_horizon_row_dict( horizon_lldp_table, key_header_index=1) for uuid in lldp_uuid_list: cli_row_dict = {} lldp_show_table = table_parser.table( cli.system('lldp-neighbor-show {}'.format(uuid))[1]) row_dict_key = table_parser.get_value_two_col_table( lldp_show_table, 'port_identifier') for cli_header in horizon_lldp_table.HEADERS_MAP: horizon_header = horizon_lldp_table.HEADERS_MAP[cli_header] horizon_row_dict = horizon_row_dict_lldp_table[row_dict_key] cli_row_dict[cli_header] = table_parser.get_value_two_col_table( lldp_show_table, cli_header) cli_row_dict_lldp_table[row_dict_key] = cli_row_dict assert cli_row_dict[cli_header] == horizon_row_dict[horizon_header], \ 'lldp neighbor:{} {} display incorrectly'.format(row_dict_key, horizon_header) horizon.test_result = True
def enable_disable_murano(enable=True, enable_disable_agent=False, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): """ Enable/Disable Murano service and murano agent on the system Args: enable: True/False, True for enable, false for disable enable_disable_agent: Ture/False, true for the same action as enable above fail_ok: whether return False or raise exception when some services fail to reach enabled-active state con_ssh (SSHClient): auth_info (dict) Returns: code, msg: return code and msg """ # enable Murano if enable: msg = "Enabled Murano Service Successfully" ret, out = system_helper.enable_service('murano', con_ssh=con_ssh, auth_info=auth_info, fail_ok=fail_ok) if ret == 1: return 1, out if enable_disable_agent: ret, out = system_helper.create_service_parameter( service="murano", section="engine", name="disable_murano_agent", value="false", modify_existing=True) if ret != 0: return 1, out else: msg = "Disabled Murano Service Successfully" ret, out = system_helper.disable_service('murano', con_ssh=con_ssh, auth_info=auth_info, fail_ok=fail_ok) if ret == 1: return 1, out if enable_disable_agent: ret, out = system_helper.create_service_parameter( service="murano", section="engine", name="disable_murano_agent", value="true", modify_existing=True) if ret != 0: return 1, out if ret == 0 and system_helper.get_host_values( 'controller-0', 'config_status')[0] == 'Config out-of-date': # need to lock/unlock standby and swact lock/unlock ret, out = host_helper.lock_unlock_controllers(alarm_ok=True) if ret == 1: return 1, out else: msg = "Failed to enable/disable Murano service" return 0, msg
def is_host_provisioned(host, con_ssh=None): invprovisioned = system_helper.get_host_values(host, "invprovision", con_ssh=con_ssh)[0] LOG.info("Host {} is {}".format(host, invprovisioned)) return "provisioned" == invprovisioned.strip()
def upgrade_host(host, timeout=InstallTimeout.UPGRADE, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin_platform'), lock=False, unlock=False): """ Upgrade given host Args: host (str): timeout (int): MAX seconds to wait for host to become online after unlocking fail_ok (bool): con_ssh (SSHClient): auth_info (str): unlock (bool): lock Returns (tuple): (0, "Host is upgraded and in online state.") (1, "Cli host upgrade rejected. Applicable only if ail_ok") (2, "Host failed data migration. Applicable only if fail_ok") (3, "Host did not come online after upgrade. Applicable if fail_ok ") (4, "Host fail lock before starting upgrade". Applicable if lock arg is True and fail_ok") (5, "Host fail to unlock after host upgrade. Applicable if unlock arg is True and fail_ok") (6, "Host unlocked after upgrade, but alarms are not cleared after 120 seconds. Applicable if unlock arg is True and fail_ok") """ LOG.info("Upgrading host {}...".format(host)) if lock: if system_helper.get_host_values(host, 'administrative', con_ssh=con_ssh)[0] == HostAdminState.UNLOCKED: message = "Host is not locked. Locking host before starting upgrade" LOG.info(message) rc, output = host_helper.lock_host(host, con_ssh=con_ssh, fail_ok=True) if rc != 0 and rc != -1: err_msg = "Host {} fail on lock before starting upgrade: {}".format(host, output) if fail_ok: return 4, err_msg else: raise exceptions.HostError(err_msg) if system_helper.is_aio_simplex(): exitcode, output = simplex_host_upgrade(con_ssh=con_ssh) return exitcode, output exitcode, output = cli.system('host-upgrade', host, ssh_client=con_ssh, fail_ok=True, auth_info=auth_info, timeout=timeout) if exitcode == 1: err_msg = "Host {} cli upgrade host failed: {}".format(host, output) if fail_ok: return 1, err_msg else: raise exceptions.HostError(err_msg) # sleep for 180 seconds to let host be re-installed with upgrade release time.sleep(180) if not system_helper.wait_for_host_values(host, timeout=timeout, check_interval=60, availability=HostAvailState.ONLINE, con_ssh=con_ssh, fail_ok=fail_ok): err_msg = "Host {} did not become online after upgrade".format(host) if fail_ok: return 3, err_msg else: raise exceptions.HostError(err_msg) if host.strip() == "controller-1": rc, output = _wait_for_upgrade_data_migration_complete(timeout=timeout, auth_info=auth_info, fail_ok=fail_ok, con_ssh=con_ssh) if rc != 0: err_msg = "Host {} upgrade data migration failure: {}".format(host, output) if fail_ok: return 2, err_msg else: raise exceptions.HostError(err_msg) if unlock: rc, output = host_helper.unlock_host(host, fail_ok=True, available_only=True) if rc != 0: err_msg = "Host {} fail to unlock after host upgrade: ".format(host, output) if fail_ok: return 5, err_msg else: raise exceptions.HostError(err_msg) # wait until 400.001 alarms get cleared if not system_helper.wait_for_alarm_gone("400.001", fail_ok=True): err_msg = "Alarms did not clear after host {} upgrade and unlock: ".format(host) if fail_ok: return 6, err_msg else: raise exceptions.HostError(err_msg) LOG.info("Upgrading host {} complete ...".format(host)) return 0, None
def make_sure_all_hosts_locked(con_ssh, max_tries=5): """ Make sure all the hosts are locked before doing system restore. Args: con_ssh: - ssh connection to the target lab max_tries: - number of times to try before fail the entire test case when any hosts keep failing to lock. Return: None """ LOG.info('System restore procedure requires to lock all nodes except the ' 'active controller/controller-0') LOG.info('current host list before trying to lock them') cli.system('host-list') base_cmd = 'host-lock' locked_offline = { 'administrative': HostAdminState.LOCKED, 'availability': HostAvailState.OFFLINE } for tried in range(1, max_tries + 1): hosts = [ h for h in system_helper.get_hosts(administrative='unlocked', con_ssh=con_ssh) if h != 'controller-0' ] if not hosts: LOG.info( 'all hosts all locked except the controller-0 after tried:{}'. format(tried)) break cmd = base_cmd if tried > 1: cmd = base_cmd + ' -f' locking = [] already_locked = 0 for host in hosts: LOG.info('try:{} locking:{}'.format(tried, host)) admin_state = system_helper.get_host_values(host, 'administrative', con_ssh=con_ssh)[0] if admin_state != 'locked': code, output = cli.system(cmd + ' ' + host, ssh_client=con_ssh, fail_ok=True) if 0 != code: LOG.warn('Failed to lock host:{} using CLI:{}'.format( host, cmd)) else: locking.append(host) else: already_locked += 1 if locking: LOG.info( 'Wating for those accepted locking instructions to be locked: try:{}' .format(tried)) system_helper.wait_for_hosts_states(locking, con_ssh=con_ssh, timeout=600, **locked_offline) elif already_locked == len(hosts): LOG.info( 'all hosts all locked except the controller-0 after tried:{}'. format(tried)) break else: LOG.info('All hosts were rejecting to lock after tried:{}'.format( tried)) else: cli.system('host-list', ssh_client=con_ssh) LOG.info('Failed to lock or force-lock some of the hosts') assert False, 'Failed to lock or force-lock some of the hosts after tried:{} times'.\ format(max_tries) cli.system('host-list', ssh_client=con_ssh) code, output = cli.system('host-list', ssh_client=con_ssh, fail_ok=True) LOG.debug('code:{}, output:{}'.format(code, output))