def remove(): LOG.fixture_step("Removing custom firewall rules") user_file_dir = ProjVar.get_var('USER_FILE_DIR') empty_path = user_file_dir + "iptables-empty.rules" client = get_cli_client(central_region=True) client.exec_cmd('touch {}'.format(empty_path)) _modify_firewall_rules(empty_path) active, standby = system_helper.get_active_standby_controllers() con_ssh = ControllerClient.get_active_controller() LOG.fixture_step("Verify custom ports on {}".format(active)) for port in custom_ports: # Verifying ports that are in the iptables file are closed _verify_port_from_natbox(con_ssh, port, port_expected_open=False) if standby: LOG.fixture_step("Swact {}".format(active)) host_helper.swact_host(active) LOG.fixture_step("Verify custom ports on {}".format(standby)) for port in custom_ports: # Verifying ports that are in the iptables file are closed after swact _verify_port_from_natbox(con_ssh, port, port_expected_open=False)
def get_user_data_file(): """ This function is a workaround to adds user_data for restarting the sshd. The sshd daemon fails to start in VM evacuation testcase. Returns:(str) - the file path of the userdata text file """ auth_info = Tenant.get_primary() tenant = auth_info['tenant'] user_data_file = "{}/userdata/{}_test_userdata.txt".format( ProjVar.get_var('USER_FILE_DIR'), tenant) client = get_cli_client() cmd = "test -e {}".format(user_data_file) rc = client.exec_cmd(cmd)[0] if rc != 0: cmd = "cat <<EOF > {}\n" \ "#cloud-config\n\nruncmd: \n - /etc/init.d/sshd restart\n" \ "EOF".format(user_data_file) print(cmd) code, output = client.exec_cmd(cmd) LOG.info("Code: {} output: {}".format(code, output)) return user_data_file
def get_custom_heat_files(file_name, file_dir=HEAT_CUSTOM_TEMPLATES, cli_client=None): """ Args: file_name: file_dir: cli_client: Returns: """ file_path = '{}/{}'.format(file_dir, file_name) if cli_client is None: cli_client = get_cli_client() if not cli_client.file_exists(file_path=file_path): LOG.debug('Create userdata directory if not already exists') cmd = 'mkdir -p {}'.format(file_dir) cli_client.exec_cmd(cmd, fail_ok=False) source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name dest_path = common.scp_from_test_server_to_user_file_dir( source_path=source_file, dest_dir=file_dir, dest_name=file_name, timeout=300, con_ssh=cli_client) if dest_path is None: raise exceptions.CommonError( "Heat template file {} does not exist after download".format( file_path)) return file_path
def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None, timeout=900, con_ssh=None, central_region=False): if con_ssh is None: con_ssh = get_cli_client(central_region=central_region) if dest_name is None: dest_name = source_path.split(sep='/')[-1] if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'): LOG.info("Copy file from test server to localhost") source_server = TestFileServer.SERVER source_user = TestFileServer.USER source_password = TestFileServer.PASSWORD dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name) LOG.info('Check if file already exists on TiS') if con_ssh.file_exists(file_path=dest_path): LOG.info('dest path {} already exists. Return existing path'.format( dest_path)) return dest_path os.makedirs(dest_dir, exist_ok=True) con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, source_path=source_path, dest_path=dest_path, source_pswd=source_password, timeout=timeout) return dest_path else: LOG.info("Copy file from test server to active controller") return scp_from_test_server_to_active_controller( source_path=source_path, dest_dir=dest_dir, dest_name=dest_name, timeout=timeout, con_ssh=con_ssh)
def collect_object_files(request, ceph_backend_installed): cmd = "cd; mkdir {}; cp *.sh {}".format(TEST_OBJ_DIR, TEST_OBJ_DIR) obj_dir = get_obj_dir() client = get_cli_client() client.exec_cmd(cmd) def teardown(): obj_path = '{}/{}'.format(obj_dir, TEST_OBJ_DIR) download_path = '{}/downloads'.format(obj_dir) delete_object_file(obj_path, rm_dir=True, client=client) delete_object_file(download_path, rm_dir=True, client=client) request.addfinalizer(teardown)
def delete_file(get_custom_firewall_rule, request): user_file_dir = ProjVar.get_var('USER_FILE_DIR') invalid_rules_file = '{}iptables.rules.invalid.file'.format(user_file_dir) invalid_rules_path = '{}iptables.rules.invalid'.format(user_file_dir) firewall_rules_path = get_custom_firewall_rule cli_client = get_cli_client() def teardown(): LOG.fixture_step("Cleanup Remove file: {}".format(invalid_rules_file)) cli_client.exec_cmd("rm {}".format(invalid_rules_file)) request.addfinalizer(teardown) return invalid_rules_file, invalid_rules_path, firewall_rules_path, cli_client
def delete_object_file(object_path, rm_dir=False, client=None): def _delete_on_client(client_): cmd = "ls {}".format(object_path) rc, output = client_.exec_cmd(cmd) if rc == 0: cmd = 'rm {} {}'.format('-r' if rm_dir else '', object_path) client_.exec_cmd(cmd) LOG.info("Files deleted {}: {}".format(object_path, output)) if not client: client = get_cli_client() _delete_on_client(client_=client) if not ProjVar.get_var('REMOTE_CLI'): standby_controller = system_helper.get_standby_controller_name() with host_helper.ssh_to_host( standby_controller, username=HostLinuxUser.get_user(), password=HostLinuxUser.get_password()) as standby_ssh: _delete_on_client(client_=standby_ssh) return True
def get_test_obj_file_names(directory=TEST_OBJ_DIR, pattern='.sh'): con_ssh = get_cli_client() cmd = "test -d {}/{}".format(ProjVar.get_var('USER_FILE_DIR'), directory) rc, output = con_ssh.exec_cmd(cmd) if rc != 0: cmd = "cd; mkdir {}; cp *.sh {}".format(directory, directory) con_ssh.exec_cmd(cmd) cmd = "cd; ls -l {} | grep {} | awk ' {{print $5 \" \" $9}}'".format( directory, pattern) rc, output = con_ssh.exec_cmd(cmd) obj_files = [] if rc == 0: objects = output.splitlines() for obj in objects: size, name = obj.split(' ')[0:2] obj_files.append([name.strip(), size.strip()]) LOG.debug("obj_files: {}".format(obj_files)) return obj_files
def get_large_img_file(): client = get_cli_client() if not ProjVar.get_var('REMOTE_CLI'): cmd = "df -h ~ | awk ' {print $4}'" rc, output = client.exec_cmd(cmd) if rc == 0: avail = output.split('\n')[1] g = avail[len(avail) - 1:] s = avail[:len(avail) - 1] if g != 'G' or eval(s) - 8 < 1: return None else: return None obj_dir = get_obj_dir() dest_dir = '{}/{}'.format(obj_dir, TEST_OBJ_DIR) glance_helper.scp_guest_image(img_os='win_2012', dest_dir=dest_dir, timeout=300, con_ssh=client) large_filename = GuestImages.IMAGE_FILES['win_2012'][2] large_file_info = get_test_obj_file_names(pattern=large_filename) return large_file_info
def is_image_storage_sufficient(img_file_path=None, guest_os=None, min_diff=0.05, con_ssh=None, image_host_ssh=None): """ Check if glance image storage disk is sufficient to create new glance image from specified image Args: img_file_path (str): e.g., ~/images/tis-centos-guest.img guest_os (str): used if img_file_path is not provided. e,g., ubuntu_14, ge_edge, cgcs-guest, etc min_diff: minimum difference required between available space and specifiec size. e.g., 0.1G con_ssh (SSHClient): tis active controller ssh client image_host_ssh (SSHClient): such as test server ssh where image file was stored Returns (bool): """ if image_host_ssh is None: image_host_ssh = get_cli_client(central_region=True) file_size = get_image_size(img_file_path=img_file_path, guest_os=guest_os, ssh_client=image_host_ssh) if con_ssh is None: name = 'RegionOne' if ProjVar.get_var('IS_DC') else None con_ssh = ControllerClient.get_active_controller(name=name) if 0 == con_ssh.exec_cmd('ceph df')[0]: # assume image storage for ceph is sufficient return True, file_size, None avail_size = get_avail_image_space(con_ssh=con_ssh) return avail_size - file_size >= min_diff, file_size, avail_size
def scp_guest_image(img_os='ubuntu_14', dest_dir=None, timeout=3600, con_ssh=None): """ Args: img_os (str): guest image os type. valid values: ubuntu, centos_7, centos_6 dest_dir (str): where to save the downloaded image. Default is '~/images' timeout (int) con_ssh (SSHClient): Returns (str): full file name of downloaded image. e.g., '~/images/ubuntu_14.qcow2' """ valid_img_os_types = list(GuestImages.IMAGE_FILES.keys()) if img_os not in valid_img_os_types: raise ValueError( "Invalid guest image OS type provided. Valid values: {}".format( valid_img_os_types)) if not dest_dir: dest_dir = GuestImages.DEFAULT['image_dir'] LOG.info("Downloading guest image from test server...") dest_name = GuestImages.IMAGE_FILES[img_os][2] ts_source_name = GuestImages.IMAGE_FILES[img_os][0] if con_ssh is None: con_ssh = get_cli_client(central_region=True) if ts_source_name: # img saved on test server. scp from test server source_path = '{}/{}'.format( GuestImages.DEFAULT['image_dir_file_server'], ts_source_name) dest_path = common.scp_from_test_server_to_user_file_dir( source_path=source_path, dest_dir=dest_dir, dest_name=dest_name, timeout=timeout, con_ssh=con_ssh) else: # scp from tis system if needed dest_path = '{}/{}'.format(dest_dir, dest_name) if ProjVar.get_var( 'REMOTE_CLI') and not con_ssh.file_exists(dest_path): tis_source_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], dest_name) common.scp_from_active_controller_to_localhost( source_path=tis_source_path, dest_path=dest_path, timeout=timeout) if not con_ssh.file_exists(dest_path): raise exceptions.CommonError( "image {} does not exist after download".format(dest_path)) LOG.info("{} image downloaded successfully and saved to {}".format( img_os, dest_path)) return dest_path
def create_image(name=None, image_id=None, source_image_file=None, volume=None, visibility='public', force=None, store=None, disk_format=None, container_format=None, min_disk=None, min_ram=None, tags=None, protected=None, project=None, project_domain=None, timeout=ImageTimeout.CREATE, con_ssh=None, auth_info=Tenant.get('admin'), fail_ok=False, ensure_sufficient_space=True, sys_con_for_dc=True, wait_for_subcloud_sync=True, cleanup=None, hw_vif_model=None, **properties): """ Create an image with given criteria. Args: name (str): string to be included in image name image_id (str): id for the image to be created source_image_file (str|None): local image file to create image from. DefaultImage will be used if unset volume (str) disk_format (str): One of these: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, iso container_format (str): One of these: ami, ari, aki, bare, ovf min_disk (int): Minimum size of disk needed to boot image (in gigabytes) min_ram (int): Minimum amount of ram needed to boot image (in megabytes) visibility (str): public|private|shared|community protected (bool): Prevent image from being deleted. store (str): Store to upload image to force (bool) tags (str|tuple|list) project (str|None) project_domain (str|None) timeout (int): max seconds to wait for cli return con_ssh (SSHClient): auth_info (dict|None): fail_ok (bool): ensure_sufficient_space (bool) sys_con_for_dc (bool): create image on system controller if it's distributed cloud wait_for_subcloud_sync (bool) cleanup (str|None): add to teardown list. 'function', 'class', 'module', 'session', or None hw_vif_model (None|str): if this is set, 'hw_vif_model' in properties will be overridden **properties: key=value pair(s) of properties to associate with the image Returns (tuple): (rtn_code(int), message(str)) # 1, 2 only applicable if fail_ok=True - (0, <id>, "Image <id> is created successfully") - (1, <id or ''>, <stderr>) # openstack image create cli rejected - (2, <id>, "Image status is not active.") """ # Use source image url if url is provided. Else use local img file. default_guest_img = GuestImages.IMAGE_FILES[ GuestImages.DEFAULT['guest']][2] file_path = source_image_file if not file_path and not volume: img_dir = GuestImages.DEFAULT['image_dir'] file_path = "{}/{}".format(img_dir, default_guest_img) if file_path: if file_path.startswith('~/'): file_path = file_path.replace('~', HostLinuxUser.get_home(), 1) file_path = os.path.normpath(file_path) if 'win' in file_path and 'os_type' not in properties: properties['os_type'] = 'windows' elif 'ge_edge' in file_path and 'hw_firmware_type' not in properties: properties['hw_firmware_type'] = 'uefi' if hw_vif_model: properties[ImageMetadata.VIF_MODEL] = hw_vif_model if sys_con_for_dc and ProjVar.get_var('IS_DC'): con_ssh = ControllerClient.get_active_controller('RegionOne') create_auth = Tenant.get(tenant_dictname=auth_info['tenant'], dc_region='SystemController').copy() image_host_ssh = get_cli_client(central_region=True) else: if not con_ssh: con_ssh = ControllerClient.get_active_controller() image_host_ssh = get_cli_client() create_auth = auth_info if ensure_sufficient_space and not volume: if not is_image_storage_sufficient(img_file_path=file_path, con_ssh=con_ssh, image_host_ssh=image_host_ssh)[0]: skip('Insufficient image storage for creating glance image ' 'from {}'.format(file_path)) source_str = file_path if file_path else '' known_imgs = [ 'cgcs-guest', 'tis-centos-guest', 'ubuntu', 'cirros', 'opensuse', 'rhel', 'centos', 'win', 'ge_edge', 'vxworks', 'debian-8-m-agent' ] name = name if name else 'auto' for img_str in known_imgs: if img_str in name: break elif img_str in source_str: name = img_str + '_' + name break else: if source_str: name_prefix = str(source_str.split(sep='/')[-1]).split(sep='.')[0] name = name_prefix + '_' + name name = common.get_unique_name(name_str=name, existing_names=get_images(), resource_type='image') LOG.info("Creating glance image: {}".format(name)) if not disk_format: if not source_image_file: # default tis-centos-guest image is raw disk_format = 'raw' else: disk_format = 'qcow2' args_dict = { '--id': image_id, '--store': store, '--disk-format': disk_format, '--container-format': container_format if container_format else 'bare', '--min-disk': min_disk, '--min-ram': min_ram, '--file': file_path, '--force': True if force else None, '--protected': True if protected else None, '--unprotected': True if protected is False else None, '--tag': tags, '--property': properties, '--project': project, '--project-domain': project_domain, '--volume': volume, } if visibility: args_dict['--{}'.format(visibility)] = True args_ = '{} {}'.format( common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) try: LOG.info("Creating image {} with args: {}".format(name, args_)) code, output = cli.openstack('image create', args_, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=create_auth, timeout=timeout) except: # This is added to help debugging image create failure in case of # insufficient space con_ssh.exec_cmd('df -h', fail_ok=True, get_exit_code=False) raise table_ = table_parser.table(output) field = 'image_id' if volume else 'id' actual_id = table_parser.get_value_two_col_table(table_, field) if cleanup and actual_id: ResourceCleanup.add('image', actual_id, scope=cleanup) if code > 1: return 1, actual_id, output in_active = wait_for_image_status(actual_id, con_ssh=con_ssh, auth_info=create_auth, fail_ok=fail_ok) if not in_active: return 2, actual_id, "Image status is not active." if image_id and image_id != actual_id: msg = "Actual image id - {} is different than requested id - {}.".\ format(actual_id, image_id) if fail_ok: return 3, actual_id, msg raise exceptions.ImageError(msg) if wait_for_subcloud_sync: wait_for_image_sync_on_subcloud(image_id=actual_id) msg = "Image {} is created successfully".format(actual_id) LOG.info(msg) return 0, actual_id, msg