def test_remote_cli(): LOG.info("Download openrc files from horizon") horizon_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon') tenant1 = Tenant.get('tenant1')['tenant'] tenant2 = Tenant.get('tenant2')['tenant'] admin_openrc = '{}-openrc.sh'.format(Tenant.get('admin')['tenant']) tenant1_openrc = '{}-openrc.sh'.format(tenant1) tenant2_openrc = '{}-openrc.sh'.format(tenant2) # from utils.horizon.pages.project.apiaccesspage import ApiAccessPage from utils.horizon.pages import loginpage driver = HorizonDriver.get_driver() login_pg = loginpage.LoginPage(driver) login_pg.go_to_target_page() home_pg = login_pg.login('admin', 'Li69nux*') home_pg.download_rc_v3() # api_access_page = ApiAccessPage(home_pg.driver) # api_access_page.go_to_target_page() # api_access_page.download_openstack_rc_file() assert os.path.exists(os.path.join(horizon_dir, admin_openrc)), "{} not found after download".format(admin_openrc) # api_access_page.change_project(name=tenant1) # api_access_page.download_openstack_rc_file() home_pg.change_project(name=tenant1) home_pg.download_rc_v3() assert os.path.exists(os.path.join(horizon_dir, tenant1_openrc)), \ "{} not found after download".format(tenant1_openrc) # api_access_page.change_project(name=tenant2) # api_access_page.download_openstack_rc_file() home_pg.change_project(name=tenant2) home_pg.download_rc_v3() assert os.path.exists(os.path.join(horizon_dir, tenant2_openrc)), \ "{} not found after download".format(tenant2_openrc) RemoteCLIClient.get_remote_cli_client() nova_helper.get_basic_flavor() cinder_helper.get_volume_qos_list() glance_helper.get_images() system_helper.get_computes() ceilometer_helper.get_alarms() keystone_helper.is_https_enabled()
def get_host(): if system_helper.is_aio_simplex(): hostname = 'controller-0' elif system_helper.is_aio_duplex(): hostname = system_helper.get_standby_controller_name() else: hostname = system_helper.get_computes( availability=HostAvailState.AVAILABLE)[0] return hostname
def prepare_modify_cpu(request): """ Finds the first unlocked compute node. Creates a cpu profile. Returns (tuple): (name of the host, uuid of the host, uuid of the new cpu profile) """ computes = system_helper.get_computes( administrative=HostAdminState.UNLOCKED) if not computes: skip("There were no unlocked compute nodes.") host = computes[0] uuid = system_helper.get_host_values(host=host, fields='uuid')[0] headers = get_headers() url = html_helper.create_url(IP_ADDR, HTTPPort.SYS_PORT, HTTPPort.SYS_VER, 'iprofile') data = { 'profilename': 'test_compute_profile', 'profiletype': 'cpu', 'ihost_uuid': uuid } resp = html_helper.post_request(url, headers=headers, data=data, verify=False) iprofile_uuid = resp['uuid'] LOG.info("The new profile uuid is: {}".format(iprofile_uuid)) def unlock(): host_helper.apply_host_cpu_profile(host, iprofile_uuid) url_ = html_helper.create_url(IP_ADDR, HTTPPort.SYS_PORT, HTTPPort.SYS_VER, 'iprofile/{}'.format(iprofile_uuid)) html_helper.delete_request(url_, headers=headers, verify=False) request.addfinalizer(unlock) return host, uuid, iprofile_uuid
def test_lock_unlock_compute_hosts(no_simplex, no_duplex): """ Lock - Unlock Compute Hosts """ compute_hosts = system_helper.get_computes() LOG.info(" Compute nodes found: {}".format(len(compute_hosts))) for host in compute_hosts: LOG.info("Compute Host: {}".format(host)) # Lock host_helper.lock_host(host=host, fail_ok=False) host_helper.wait_for_hosts_ready(hosts=host) container_helper.wait_for_apps_status(apps="stx-openstack", status=AppStatus.APPLIED, timeout=600, check_interval=60) # Unlock host_helper.unlock_host(host=host, fail_ok=False) host_helper.wait_for_hosts_ready(hosts=host)
def test_detect_failed_compute(no_simplex, no_duplex): con_ssh = ssh.ControllerClient.get_active_controller() active_controller = system_helper.get_active_controller_name() compute_host = system_helper.get_computes( administrative=HostAdminState.UNLOCKED, operational=HostOperState.ENABLED, availability=HostAvailState.AVAILABLE)[0] compute_su_prompt = r'.*compute\-([0-9]){1,}\:/home/sysadmin#' cmd_get_offset = ("ntpq -p | grep {} -A1 | " "tail -1 | awk '{{print$8}}'".format(active_controller)) cmd_magic_keys_enable = ("echo 1 > /proc/sys/kernel/sysrq") cmd_get_start_date = ("python -c \"import datetime; " "print str(datetime.datetime.now())[:-3]\"") cmd_get_end_date = ("cat /var/log/mtcAgent.log | " "grep \"{} MNFA new candidate\" | " "tail -1 | awk '{{print$1}}'".format(compute_host)) cmd_trigger_reboot = ("echo b > /proc/sysrq-trigger") res = list() for i in range(20): LOG.tc_step("Start of iter {}".format(i)) st = str() offset = float() with host_helper.ssh_to_host(compute_host) as node_ssh: offset = float( node_ssh.exec_cmd(cmd=cmd_get_offset, get_exit_code=False)[1]) / 1000 node_ssh.send_sudo(cmd="su") node_ssh.expect(compute_su_prompt) node_ssh.send_sudo(cmd=cmd_magic_keys_enable) node_ssh.expect(compute_su_prompt) st = node_ssh.exec_cmd(cmd=cmd_get_start_date, get_exit_code=False, blob=compute_su_prompt)[1] node_ssh.exec_sudo_cmd(cmd_trigger_reboot, get_exit_code=False) system_helper.wait_for_hosts_states( compute_host, check_interval=20, availability=HostAvailState.AVAILABLE) pods_health = kube_helper.wait_for_pods_healthy( check_interval=20, timeout=HostTimeout.REBOOT) assert pods_health is True, "Check PODs health has failed" st_date = datetime.datetime.fromtimestamp( datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f').timestamp() - offset) et = con_ssh.exec_cmd(cmd=cmd_get_end_date, get_exit_code=False)[1] et_date = datetime.datetime.strptime(et, '%Y-%m-%dT%H:%M:%S.%f') diff = et_date - st_date LOG.info("\noffset = {}\nstart time = {}\nend time = {}".format( offset, st, et)) LOG.info("\ndiff = {}".format(diff)) res.append(diff) def calc_avg(lst): rtrn_sum = datetime.timedelta() for i in lst: LOG.info("Iter {}: {}".format(lst.index(i), i)) rtrn_sum += i return rtrn_sum / len(lst) final_res = calc_avg(res) LOG.info("Avg time is : {}".format(final_res))