def _generate_tims_case_id(logical_id, tims_testplan_folder): ''' A tims case_id would link the uploaded results to the previously uploaded testcases that are already residing in TIMS. This function generates the case_id based on testcase logical_id ''' if not tims_testplan_folder: raise Exception("Tims testplan folder in not provided.") folder_xml_data = requests.get(TIMS_URL.\ format(tims_testplan_folder)) folder_json_data = xmltodict.parse(folder_xml_data.text) data_dq = Dq(folder_json_data) dict_of_test_cases = data_dq.contains('Case').\ contains('@docID|LogicalID', regex=True).reconstruct() list_of_test_cases = dict_of_test_cases['Tims']['Case'] for case in list_of_test_cases: if logical_id in case['LogicalID']: return case['@docID']
def get_interface_logical_output_bps(device, logical_interface, interface=None, extensive=False, output_dict=None): """Get logical output bps of a logical interface Args: device ('obj'): device object logical_interface ('str'): Logical interface to check output bps interface ('str'): interface name to pass in show command extensive ('bool'): Use extensive in show command output_dict ('dict'): Pass if dictionary already exist Returns: Device speed or None Raises: None """ out = None try: if not output_dict: try: if interface: cmd = 'show interfaces {interface}'.format( interface=interface) else: cmd = 'show interfaces' if extensive: cmd = '{cmd} extensive'.format(cmd=cmd) out = device.parse(cmd) except SchemaEmptyParserError: return None else: out = output_dict except SchemaEmptyParserError: return None result = True # Get first interface inorder to compare output-bps with other interfaces physical_intf_check = out.q.contains( '{interface}|.*output-bps.*'.format(interface=logical_interface), regex=True) # To handle list within list logical_interface_check = Dq(physical_intf_check.reconstruct()) logical_intf_list = logical_interface_check.contains( 'name|output-bps', regex=True).get_values('logical-interface') for l_i_dict in logical_intf_list: name = l_i_dict.get('name', None) if not name or name != logical_interface: continue transit_traffic_statistic = l_i_dict.get('transit-traffic-statistics', 0) if not transit_traffic_statistic: return None output_bps = transit_traffic_statistic.get('output-bps', 0) if not output_bps: return None return output_bps return None
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files, compact=False, min_free_space_percent=None, dir_output=None): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted compact ('bool'): Compact option for n9k, used for size estimation, default False min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device Returns: True if there is enough space after the operation, False otherwise ''' # Check correct arguments provided if (min_free_space_percent is None and required_size is None): raise ValueError("Either 'required_size' or 'min_free_space_percent' " "must be provided to perform disk space verification") # For n9k compact copy: # observationally, depending on release, the compacted image is 36-48% the # size of the original image. For now we'll use 60% as a conservative estimate. if compact: required_size *= .6 # Parse directory output to check dir_out = dir_output or device.execute('dir {}'.format(destination)) # Get available free space on device available_space = device.api.get_available_space(directory=destination, output=dir_out) # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = device.api.get_total_space(directory=destination, output=dir_out) # Get current available space in % avail_percent = available_space / total_space * 100 log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".\ format(avail=round(avail_percent, 2), compare='less' if \ avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # calculate the required free space in bytes relative to the total disk # space based on given percentage if required size is also provided, # take the larger value of the two if required_size: required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) else: required_size = round(min_free_space_percent * .01 * total_space) # If there're not enough space, delete non-protected files if device.api.verify_enough_disk_space(required_size=required_size, directory=destination, dir_output=dir_out): if required_size < 0: log.info("Required disk space is unknown, will not delete files") else: log.info("Verified there is enough space on the device. " "No files are deleted") return True if skip_deletion: log.error( "'skip_deletion' is set to True and there isn't enough space " "on the device, files cannot be deleted.") return False else: log.info("Deleting unprotected files to free up some space") log.info("Sending 'show version' to learn the current running images") image = device.api.get_running_image() if isinstance(image, list): protected_files.extend([os.path.basename(i) for i in image]) else: protected_files.extend([os.path.basename(image)]) # convert to set for O(1) lookup protected_files = set(protected_files) parsed_dir_out = device.parse('dir {}'.format(destination), output=dir_out) dq = Dq(parsed_dir_out) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] for file in dq.get_values('files'): file_list.append( (file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # append files to delete list until the deleted file sizes reaches the target, to_delete = [] remaining_size = required_size for file, size in file_list: # break if we reach the target if remaining_size < available_space: break # if the file is protected, move skip and check next one in the list elif file in protected_files: continue to_delete.append(file) remaining_size -= size # if target can not be reached, aka loop is not broken, fail else: if min_free_space_percent: log.error( 'It is not possible to reach the target free space percentage after deleting all ' 'unprotected files. Operation will be aborted and no file has been deleted.' ) return False device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=to_delete, dir_output=dir_out) # after deletion verify again fail if still not enough space, # execute dir again since files are changed dir_out_after = device.execute('dir {}'.format(destination)) if min_free_space_percent: available_space = device.api.get_available_space( directory=destination, output=dir_out_after) total_space = device.api.get_total_space(directory=destination, output=dir_out_after) available_percent = available_space / total_space * 100 log.info( "There are {available}% of free space on the disk, which is {compare} than " "the target of {target}%.".format( available=round(available_percent, 2), compare='less' if available_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) if not device.api.verify_enough_disk_space( required_size, destination, dir_output=dir_out_after): log.error( 'There is still not enough space on the device after deleting ' 'unprotected files.') return False else: log.info( "Verified there is enough space on the device after deleting " "unprotected files.") return True
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files, compact=False, min_free_space_percent=None, dir_output=None, allow_deletion_failure=False): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted compact ('bool'): Compact option for n9k, used for size estimation, default False min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device allow_deletion_failure (bool, optional): Allow the deletion of a file to silently fail. Defaults to False Returns: True if there is enough space after the operation, False otherwise ''' # For n9k compact copy: # observationally, depending on release, the compacted image is 36-48% the # size of the original image. For now we'll use 60% as a conservative estimate. if compact: required_size *=.6 # Parse directory output to check dir_out = dir_output or device.execute('dir {}'.format(destination)) # Get available free space on device available_space = device.api.get_available_space( directory=destination, output=dir_out) log.debug('available_space: {avs}'.format(avs=available_space)) # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = device.api.get_total_space( directory=destination, output=dir_out) # Get current available space in % avail_percent = available_space / total_space * 100 log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".\ format(avail=round(avail_percent, 2), compare='less' if \ avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # get bigger of required_space or min_free_space_percent required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) # If there's not enough space, delete non-protected files if device.api.verify_enough_disk_space(required_size=required_size, directory=destination, dir_output=dir_out): if required_size < 0: log.info("Required disk space is unknown, will not delete files") else: log.info("Verified there is enough space on the device. " "No files are deleted") return True if skip_deletion: log.error("'skip_deletion' is set to True and there isn't enough space " "on the device, files cannot be deleted.") return False else: log.info("Deleting unprotected files to free up some space") running_images = [] log.info("Sending 'show version' to learn the current running images") running_image = device.api.get_running_image() if isinstance(running_image, list): for image in running_image: running_images.append(os.path.basename(image)) else: running_images.append(os.path.basename(running_image)) # convert to set for O(1) lookup protected_files = set(protected_files) parsed_dir_out = device.parse('dir {}'.format(destination), output=dir_out) dq = Dq(parsed_dir_out) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] running_image_list = [] for file in dq.get_values('files'): # separate running image from other files if any(file in image for image in running_images): running_image_list.append((file, int(dq.contains(file).get_values('size')[0]))) else: file_list.append((file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # add running images to the end so they are deleted as a last resort file_list.extend(running_image_list) log.debug('file_list: {fl}'.format(fl=file_list)) for file, size in file_list: device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=[file], dir_output=dir_out, allow_failure=allow_deletion_failure) if device.api.verify_enough_disk_space(required_size, destination): log.info("Verified there is enough space on the device after " "deleting unprotected files.") return True # Exhausted list of files - still not enough space log.error('There is still not enough space on the device after ' 'deleting unprotected files.') return False
def _select_health(self, section, data, search_keywords, arg_name): """ check if pyATS Health Check processor meets criteria via `health_tc_sections`, `health_tc_uids` and `health_tc_groups` Arguments: section (`obj`) : Aetest Subsection object. data (`dict`) : data of section search_keywords (`list`) : list of search keywords arg_name (`str`) : `health_tc_sections`, `health_tc_uids` or `health_tc_groups` Returns: new_data (`list`) : Updated data which meets args criteria Otherwise, return empty list """ # initialize new_data = [] search_target = '' # replicate search_keywords for further loop search_keywords = copy.deepcopy(search_keywords) for search_keyword in search_keywords: # save original `search_keyword` in `pre_search_keyword` # which has `%VARIABLES{}` pre_search_keyword = search_keyword # load `%VARIABLES{} and replace in `search_keyword` _, search_keyword = _load_saved_variable(self, section=section, val=search_keyword) if 'type:' in search_keyword: search_class = search_keyword.replace('type:', '') if isinstance(section, SECTION_CLASS_MAPPING.get(search_class)): new_data.append(data) else: # get search_target such as section.uid, section.groups from section search_target = self._find_search_target( section, arg_name, search_keyword, search_keywords) log.debug('search_target: {st}'.format(st=search_target)) if re.search(search_keyword, search_target): # when args exist, don't need to do `contains` because # args will affect to all items in pyATS Health if getattr(runtime.args, arg_name): dq_item = self._find_item_by_search_keyword( section, data, arg_name, search_target) else: # in `data`, %VARIABLES doesn't need to be converted # so, need to use `pre_search_keyword` data_dq = Dq(data) dq_item = data_dq.contains(pre_search_keyword, regex=True, level=1).reconstruct() # for the case regex is used. need to do exact match # without `regex=True` if not dq_item: dq_item = data_dq.contains(pre_search_keyword, level=1).reconstruct() if dq_item and dq_item not in new_data: new_data.append(dq_item) log.debug("new_data: {}".format(new_data)) return new_data
def _pre_post_processors(self, testbed, processor, section, data, name, reconnect, processor_targets, processor_type, pre_processor_result=Passed, health_settings=None): """ execute pre/post processors and return if pre-processor runs and processor result Arguments: testbed (`obj`): testbed object processor (`obj`): Aetest Processor object section (`obj`): Aetest Section object data (`list`) : data of section name (`str`) : name of section in health yaml reconnect (`dict` or None) : parameters for reconnect processor_targets (`list`) : list of `processor_flag which ones will be run as pre/post processors processor_type (`str`) : processor type `pre` or `post` pre_processor_result (`ob`) : result object. Default to `Passed` Returns: pre_processor_run (`bool`) : if pre processor runs or not pre_processor_result (`obj`) : return processor result (Result obj) """ devices_connected = [] new_data_dict = {} selected_options = 0 list_of_args = [] # store reasons why processor is skipped reasons = [] # flag if health args are given to pyats command args_flag = False # flag if health args are defined under action in health yaml args_in_yaml_flag = False log.debug( 'data:\n{d}'.format(d=json.dumps(data, indent=2, sort_keys=True))) orig_data = copy.deepcopy(data) # check if health arguments are given to pyats command for arg_name in [ 'health_tc_sections', 'health_tc_uids', 'health_tc_groups', 'health_sections', 'health_uids', 'health_groups' ]: if getattr(runtime.args, arg_name): args_flag = True for item in self._get_actions(data, processor_targets): if Dq(item).contains( 'health_tc_sections|health_tc_uids|health_tc_groups|health_sections|health_uids|health_groups', regex=True): args_in_yaml_flag = True for arg_name in [ 'health_tc_sections', 'health_tc_uids', 'health_tc_groups', 'health_sections', 'health_uids', 'health_groups' ]: log.debug('Checking {an}'.format(an=arg_name)) selected = None selected_options = 0 for item in self._get_actions(data, processor_targets): # from argument arg_search_keyword = getattr(runtime.args, arg_name) if arg_search_keyword: args_flag = True selected = self._select_health( section, item, arg_search_keyword.split(' '), arg_name) selected_options += 1 list_of_args.append(arg_name) if selected: new_data_dict.setdefault(arg_name, {}).setdefault( selected_options, selected) if not args_flag: # from datafile search_keywords = [] search_keywords = getattr( runtime.args, arg_name) or Dq(item).get_values(arg_name) if not isinstance(search_keywords, list): search_keywords = [search_keywords] if search_keywords == []: # if args are given to one of actions, other actions # will run to all sections by default. To do so, # adding `.*` as search_keywords # ex.) # - api: # only section1 # function: func1 # health_tc_sections: section1 # - api: # all sections # function: func2 if (args_in_yaml_flag and arg_name in ['health_tc_sections', 'health_sections'] and ((not Dq(item).get_values('health_tc_sections') or not Dq(item).get_values('health_sections')) and (not Dq(item).get_values('health_tc_uids') or not Dq(item).get_values('health_uids')))): search_keywords = ['.*'] else: search_keywords = None log.debug( "arg_name, search_keywords: {sel_name}, {sel}".format( sel_name=arg_name, sel=search_keywords)) if search_keywords: selected_options += 1 list_of_args.append(arg_name) selected = self._select_health(section, item, search_keywords, arg_name) if selected: new_data_dict.setdefault(arg_name, {}).setdefault( selected_options, selected) if args_flag: # check for the case which multiple `arg_name`s given and check the same # among the `arg_name`s. if same between `arg_name`s, data will be overwittern # by one of new_data_dict value to execute selected ones new_data_flag = False if new_data_dict: value = '' log.debug( 'num of health args: {n}'.format(n=len(set(list_of_args)))) log.debug( 'num of new_data_dict: {n}'.format(n=len(new_data_dict))) if len(set(list_of_args)) == len(new_data_dict): for key, value_ in new_data_dict.items(): if value == value_: new_data_flag = True else: new_data_flag = False if not value: value = value_ if len(new_data_dict) == 1: new_data_flag = True else: new_data_flag = len(set(list_of_args)) == len(new_data_dict) log.debug('new_data_flag: {f}'.format(f=new_data_flag)) log.debug('new_data_dict: {ndd}'.format( ndd=json.dumps(new_data_dict, indent=2, sort_keys=True))) if new_data_flag: temp_data = [] # override data because meeting criteria by `arg_name`s for key, value__ in new_data_dict.items(): for idx in value__: # data from each health arg should be same # so remove redundant data by overwriting temp_data = [new_data_dict[key][idx].pop()] data = temp_data elif (not new_data_dict or len(set(list_of_args)) != len(new_data_dict) ) and len(set(list_of_args)) != 0: reasons.append( f"health arg {set(list_of_args)-set(new_data_dict.keys())} does not meet criteria" ) data = [] # processor start message log.debug('{type}-processor {name} started'.format( name=name, type=processor_type.capitalize())) pre_processor_run = True # check if `processor` tag matches processor_targets and # if device for action is connected # create temp_data with matched actions and override data by temp_data temp_data = [] # list of checked devices. flag to ignore checked device device_checked = [] # None if no device is defined in any actions all_devices_connected = None common_api = False if new_data_dict and new_data_flag: # get connected devices list devices_connected = self._check_all_devices_connected( testbed, data, reconnect) devices_connected = [dev for dev in devices_connected if dev != ''] actions = self._get_actions(data, processor_targets) if not actions: # check processor in action and put in proc_in_action proc_in_action = [] if isinstance(data, list): for each_data in data: for each_proc in Dq(each_data).get_values('processor'): proc_in_action.append(each_proc) else: for each_proc in Dq(data).get_values('processor'): proc_in_action.append(each_proc) proc_in_action = set(proc_in_action) if proc_in_action: reasons.append( f"processor {proc_in_action} does not meet criteria {processor_targets}" ) for each_data in actions: for key in each_data: # get processor key from action. by default, `both` each_data_dq = Dq(each_data) processor_from_yaml = each_data_dq.contains(key).get_values( 'processor', 0) if not processor_from_yaml: processor_from_yaml = 'both' log.debug( 'processor_targets: {pt}'.format(pt=processor_targets)) log.debug('processor: {p}'.format(p=processor_from_yaml)) # find `common_api` key and return True/False common_api = any(each_data_dq.get_values('common_api')) if processor_from_yaml in processor_targets: # check if device for action is connected all_devices_connected = None devices_not_connected = [] for uut in self._get_device_names(orig_data, each_data): if uut not in device_checked: device_checked.append(uut) if isinstance(uut, str): if (testbed.devices[uut].name in devices_connected) or ( testbed.devices[uut].alias in devices_connected): all_devices_connected = True else: all_devices_connected = False devices_not_connected.append(uut) elif (uut.name in devices_connected) or ( uut.alias in devices_connected): all_devices_connected = True else: all_devices_connected = False devices_not_connected.append(uut) if devices_not_connected: log.warning("devices are not connected: {}".format( devices_not_connected)) force_all_connected = health_settings.get( 'force_all_connected', True) if device_checked and not force_all_connected and devices_connected: log.warning( "force_all_connected is False. Executing even though some of devices might not be connected." ) # data will be created if all devices are connected or # if force_all_connected == False and one of devices is connected if (all_devices_connected == True or all_devices_connected is None) or (force_all_connected == False and devices_connected): temp_data.append(each_data) else: log.warning( 'health check is blocked due to force_all_connected is True.' ) # until here, data contains only actions # for cases like `parallel`, `loop`, need to put the headers # from original data `orig_data` if 'actions' in orig_data and data and temp_data: data = copy.deepcopy(orig_data) if temp_data: data['actions'] = temp_data data = [{'loop': data}] else: data = [] elif isinstance(orig_data, list): if len(orig_data ) > 0 and 'parallel' in orig_data[0] and data and temp_data: data = copy.deepcopy(orig_data)[0] if temp_data: data['parallel'] = temp_data data = [data] else: data = [] elif len(orig_data) > 0 and 'run_condition' in orig_data[ 0] and data and temp_data: data = copy.deepcopy(orig_data)[0] data = [data] else: data = temp_data else: data = temp_data # remove section if no data removed_section = False # set reason in case device is not connected if (not devices_connected and not common_api) and not reasons: reasons.append('Device is not connected') if not data or reasons: processor.result = Skipped processor.reporter.remove_section(id_list=processor.uid.list) removed_section = True # if any device is not connected, processor will be skipped # if common_api is True, will execute if devices_connected or common_api: # instantiate Steps() to reset step number steps = Steps() # execute dispatcher in Blitz result = self.dispatcher(steps, testbed, section, data, name) if isinstance(data, list): hide_processor = any( Dq(data[0]).get_values('hide_processor', 0) == True for each_data in data) else: hide_processor = Dq(data[0]).get_values('hide_processor', 0) if hide_processor and not removed_section: removed_section = self._remove_section(processor) try: log.debug('Blitz section return:\n{result}'.format( result=json.dumps(result, indent=2, sort_keys=True))) except TypeError: log.debug('Blitz section return:\n{result}'.format( result=format_output(result))) # check section result log.debug('section result: {section_result}'.format( section_result=section.result.name)) log.debug('steps result: {steps_result}'.format( steps_result=steps.result.name)) # if section is skipped by run_condition, remove section if (isinstance(result, dict) and 'run_condition_skipped' in result and not removed_section and result['run_condition_skipped'] == True): processor.result = Skipped removed_section = self._remove_section(processor) if processor_type == 'pre' and steps.result != Passed and steps.result != Passx: log.info( "Pre-processor pyATS Health {name} was failed, but continue section and Post-processor" .format(name=name)) # save pre-processor result pre_processor_result = steps.result return pre_processor_run, pre_processor_result elif processor_type == 'post': # refrect processor results to section processor.result += steps.result section.result = section.result + processor.result + self.pre_processor_result # return processor.result to raise the result # at end of context post processor return pre_processor_run, processor.result elif processor_type == 'pre': pre_processor_run = False # processor is skipped log.info( f"Pre-processor pyATS Health '{name}' is skipped due to: {reasons}" ) if pre_processor_result == Passed: # processor.skipped() pre_processor_result = Skipped return pre_processor_run, pre_processor_result elif processor_type == 'post': # for the case only pre-processors runs if section.result == pre_processor_result: log.info('Only Pre-processor runs. Section result and ' 'Pre-processor result are different.Reflecting ' 'Post-processor result to Section.') # reflect processor results to section section.result = section.result + processor.result + self.pre_processor_result # processor is skipped log.info( f"Post-processor pyATS Health '{name}' was skipped due to: {reasons}" ) if pre_processor_result == Passed: # processor.skipped() pre_processor_result = Skipped # return processor.result to raise the result # at end of context post processor return pre_processor_run, processor.result return pre_processor_run, pre_processor_result
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files=None, min_free_space_percent=None, dir_output=None): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted. min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device Returns: True if there is enough space after the operation, False otherwise ''' if not destination: log.warning('No destination provided, cannot verify available space') return True df_info = device.parse('df {}'.format(destination), output=dir_output) dir_df_info = df_info['directory'].values() if dir_df_info: dir_df_info = list(dir_df_info)[0] free_space = dir_df_info.get('available') if not dir_df_info or not free_space: log.error('Unable to determine available space') return True # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = dir_df_info.get('total') # Get current available space in % avail_percent = 100 - dir_df_info.get('use_percentage') log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".format( avail=round(avail_percent, 2), compare='less' if avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # get bigger of required_space or min_free_space_percent required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) if free_space > required_size: log.info('APIC: enough free space available: {}'.format(free_space)) return True log.warning( 'APIC: not enough free space, required: {}, available: {}'.format( required_size, free_space)) ls_output = device.execute('ls -l {}'.format(destination)) file_info = device.parse('ls -l {}'.format(destination), output=ls_output) dq = Dq(file_info) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] for file in dq.get_values('files'): file_list.append((file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # create lis of filenames files_to_be_deleted = set(x[0] for x in file_list) # filter list and get list of unprotected files _, unprotected_files = _protected_and_unprotected_files( files_to_be_deleted, protected_files) # create ordered list of unprotected files to_be_deleted = [x[0] for x in file_list if x[0] in unprotected_files] log.info('Files to be deleted: {}'.format(to_be_deleted)) for file, size in file_list: device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=[file], dir_output=ls_output) if device.api.verify_enough_disk_space(required_size, destination): log.info("Verified there is enough space on the device after " "deleting unprotected files.") return True # Exhausted list of files - still not enough space log.error('There is still not enough space on the device after ' 'deleting unprotected files.') return False