def wait_for_upgrade_states(states, timeout=60, check_interval=6, fail_ok=False): """ Waits for the upgrade state to be changed. Args: states: timeout: check_interval fail_ok Returns: """ end_time = time.time() + timeout if not states: raise ValueError("Expected host state(s) has to be specified via keyword argument states") state_match = False while time.time() < end_time: table_ = system_upgrade_show()[1] act_state = table_parser.get_value_two_col_table(table_, "state") if act_state == states: state_match = True break time.sleep(check_interval) msg = "{} state was not reached ".format(states) if state_match: return True if fail_ok: LOG.warning(msg) return False raise exceptions.TimeoutException(msg)
def wait_for_image_status(image_id, status='active', timeout=ImageTimeout.STATUS_CHANGE, check_interval=3, fail_ok=True, con_ssh=None, auth_info=None): actual_status = None end_time = time.time() + timeout while time.time() < end_time: actual_status = get_image_values(image_id, fields='status', auth_info=auth_info, con_ssh=con_ssh)[0] if status.lower() == actual_status.lower(): LOG.info("Image {} has reached status: {}".format( image_id, status)) return True time.sleep(check_interval) else: msg = "Timed out waiting for image {} status to change to {}. Actual " \ "status: {}".format(image_id, status, actual_status) if fail_ok: LOG.warning(msg) return False raise exceptions.TimeoutException(msg)
def _wait_for_images_deleted(images, timeout=ImageTimeout.STATUS_CHANGE, fail_ok=True, check_interval=3, con_ssh=None, auth_info=Tenant.get('admin')): """ check if a specific field still exist in a specified column of openstack image list Args: images (list|str): timeout (int): fail_ok (bool): check_interval (int): con_ssh: auth_info (dict): Returns (bool): Return True if the specific image_id is found within the timeout period. False otherwise """ if isinstance(images, str): images = [images] imgs_to_check = list(images) imgs_deleted = [] end_time = time.time() + timeout while time.time() < end_time: existing_imgs = get_images(con_ssh=con_ssh, auth_info=auth_info) for img in imgs_to_check: if img not in existing_imgs: imgs_to_check.remove(img) imgs_deleted.append(img) if not imgs_to_check: return True, tuple(imgs_deleted) time.sleep(check_interval) else: if fail_ok: return False, tuple(imgs_deleted) raise exceptions.TimeoutException( "Timed out waiting for all given images to be removed from " "openstack " "image list. Given images: {}. Images still exist: {}.".format( images, imgs_to_check))
def wait_for_upgrade_activate_complete(timeout=300, check_interval=60, fail_ok=False): upgrade_state = '' end_time = time.time() + timeout while time.time() < end_time: upgrade_state = get_upgrade_state() if "activation-complete" in upgrade_state: LOG.info('Upgrade activation-complete') return True time.sleep(check_interval) err_msg = "Upgrade activation did not complete after waiting for {} seconds. Current state is {}".\ format(timeout, upgrade_state) if fail_ok: LOG.warning(err_msg) return False, None raise exceptions.TimeoutException(err_msg)
def import_load(load_path, timeout=120, con_ssh=None, fail_ok=False, upgrade_ver=None): # TODO: Need to support remote_cli. i.e., no hardcoded load_path, etc home_dir = HostLinuxUser.get_home() if upgrade_ver >= '17.07': load_path = '{}/bootimage.sig'.format(HostLinuxUser.get_home()) rc, output = cli.system('load-import {}/bootimage.iso'.format(home_dir), load_path, ssh_client=con_ssh, fail_ok=True) else: rc, output = cli.system('load-import', load_path, ssh_client=con_ssh, fail_ok=True) if rc == 0: table_ = table_parser.table(output) id_ = (table_parser.get_values(table_, "Value", Property='id')).pop() soft_ver = (table_parser.get_values(table_, "Value", Property='software_version')).pop() LOG.info('Waiting to finish importing load id {} version {}'.format( id_, soft_ver)) end_time = time.time() + timeout while time.time() < end_time: state = get_imported_load_state(id_, load_version=soft_ver, con_ssh=con_ssh) LOG.info("Import state {}".format(state)) if "imported" in state: LOG.info("Importing load {} is completed".format(soft_ver)) return [rc, id_, soft_ver] time.sleep(3) err_msg = "Timeout waiting to complete importing load {}".format(soft_ver) LOG.warning(err_msg) if fail_ok: return [1, err_msg] else: raise exceptions.TimeoutException(err_msg) else: err_msg = "CLI command rejected: {}".format(output) if fail_ok: return [1, err_msg] else: raise exceptions.CLIRejected(err_msg)
def wait_for_delete_imported_load(load_id, timeout=120, check_interval=5, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin_platform')): LOG.info("Waiting for imported load {} to be deleted from the load-list ".format(load_id)) end_time = time.time() + timeout while time.time() < end_time: table_ = table_parser.table(cli.system('load-list', ssh_client=con_ssh, auth_info=auth_info)[1]) table_ = table_parser.filter_table(table_, **{'id': load_id}) if len(table_parser.get_values(table_, 'id')) == 0: return True else: if 'deleting' in table_parser.get_column(table_, 'state'): cli.system('load-delete', load_id, ssh_client=con_ssh, fail_ok=True) time.sleep(check_interval) else: err_msg = "Timed out waiting for load {} to get deleted".format(load_id) if fail_ok: LOG.warning(err_msg) return False else: raise exceptions.TimeoutException(err_msg)
def _rsync_files_to_con1(con_ssh=None, central_region=False, file_to_check=None): region = 'RegionOne' if central_region else None auth_info = Tenant.get('admin_platform', dc_region=region) if less_than_two_controllers(auth_info=auth_info, con_ssh=con_ssh): LOG.info("Less than two controllers on system. Skip copying file to " "controller-1.") return LOG.info("rsync test files from controller-0 to controller-1 if not " "already done") stx_home = HostLinuxUser.get_home() if not file_to_check: file_to_check = '{}/images/tis-centos-guest.img'.format(stx_home) try: with host_helper.ssh_to_host("controller-1", con_ssh=con_ssh) as con_1_ssh: if con_1_ssh.file_exists(file_to_check): LOG.info( "Test files already exist on controller-1. Skip rsync.") return except Exception as e: LOG.error("Cannot ssh to controller-1. Skip rsync. " "\nException caught: {}".format(e.__str__())) return cmd = "rsync -avr -e 'ssh -o UserKnownHostsFile=/dev/null -o " \ "StrictHostKeyChecking=no ' " \ "{}/* controller-1:{}".format(stx_home, stx_home) timeout = 1800 with host_helper.ssh_to_host("controller-0", con_ssh=con_ssh) as con_0_ssh: LOG.info("rsync files from controller-0 to controller-1...") con_0_ssh.send(cmd) end_time = time.time() + timeout while time.time() < end_time: index = con_0_ssh.expect( [con_0_ssh.prompt, PASSWORD_PROMPT, Prompt.ADD_HOST], timeout=timeout, searchwindowsize=100) if index == 2: con_0_ssh.send('yes') if index == 1: con_0_ssh.send(HostLinuxUser.get_password()) if index == 0: output = int(con_0_ssh.exec_cmd('echo $?')[1]) if output in [0, 23]: LOG.info( "Test files are successfully copied to controller-1 " "from controller-0") break else: raise exceptions.SSHExecCommandFailed( "Failed to rsync files from controller-0 to " "controller-1") else: raise exceptions.TimeoutException( "Timed out rsync files to controller-1")
def test_detection_of_failed_instance(launch_instances): con_ssh = ssh.ControllerClient.get_active_controller() start_date_cmd = ("python -c \"import datetime; " "print str(datetime.datetime.now())[:-3]\"") kill_cmd = (start_date_cmd + "&& sudo pkill -SIGKILL qemu") vm_host = vm_helper.get_vm_host(launch_instances) vm_name = vm_helper.get_vm_name_from_id(launch_instances) end_date_cmd = ("grep -r \"{}\" /var/log/nfv-vim.log | " "grep \"powering-off\" | " "tail -1 | " "awk '{{print$1}}'".format(vm_name)) res = list() for i in range(20): LOG.tc_step("Start of iter {}".format(i)) try: st = str() et = str() vm_helper.get_vms() with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as con_0_ssh: end_time = time.time() + 120 while time.time() < end_time: con_0_ssh.send(cmd="pgrep qemu") con_0_ssh.expect() matches = re.findall("\n([0-9]+)\n", con_0_ssh.cmd_output) time.sleep(5) if matches: break else: raise exceptions.TimeoutException("Timed out waiting for qemu process") con_0_ssh.send(cmd=kill_cmd) index = con_0_ssh.expect(["Password:"******"Timed out waiting for end time") diff = et_date - st_date LOG.info("\nstart time = {}\nend time = {}".format(st, et)) LOG.info("\ndiff = {}".format(diff)) res.append(diff) finally: time.sleep(5) vm_helper.start_vms(launch_instances) def calc_avg(lst): rtrn_sum = datetime.timedelta() for i in lst: LOG.info("Iter {}: {}".format(lst.index(i), i)) rtrn_sum += i return rtrn_sum/len(lst) final_res = calc_avg(res) LOG.info("Avg time is : {}".format(final_res))