def verify_interface_errors(device, interface, expected_value, input=False, output=False, max_time=30, check_interval=10): """ Verify interface input and output errors Args: device (`obj`): Device object interface (`str`): Pass interface in show command expected_value (`int`, Optional): Expected errors values input (`bool`, Optional): True if input errors to verify. Default to False. output (`bool`, Optional): True if output errors to verify. Default to False. max_time (`int`, Optional): Max time, default: 60 seconds check_interval (`int`, Optional): Check interval, default: 10 seconds Returns: result (`bool`): Verified result Raises: N/A """ timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: cmd = 'show interface {interface}'.format(interface=interface) out = device.parse(cmd) except SchemaEmptyParserError: timeout.sleep() continue # Any(): { # Optional('counters'): { # Optional('in_errors'): int, interface, data = next(iter(out.items())) data = Dq(data) if input and output: input_errors = data.get_values("in_errors", 0) output_errors = data.get_values("out_errors", 0) if input_errors == output_errors == expected_value: return True elif input: input_errors = data.get_values("in_errors", 0) if input_errors == expected_value: return True elif output: output_errors = data.get_values("out_errors", 0) if output_errors == expected_value: return True timeout.sleep() continue return False
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files, compact=False, min_free_space_percent=None, dir_output=None): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted compact ('bool'): Compact option for n9k, used for size estimation, default False min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device Returns: True if there is enough space after the operation, False otherwise ''' # Check correct arguments provided if (min_free_space_percent is None and required_size is None): raise ValueError("Either 'required_size' or 'min_free_space_percent' " "must be provided to perform disk space verification") # For n9k compact copy: # observationally, depending on release, the compacted image is 36-48% the # size of the original image. For now we'll use 60% as a conservative estimate. if compact: required_size *= .6 # Parse directory output to check dir_out = dir_output or device.execute('dir {}'.format(destination)) # Get available free space on device available_space = device.api.get_available_space(directory=destination, output=dir_out) # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = device.api.get_total_space(directory=destination, output=dir_out) # Get current available space in % avail_percent = available_space / total_space * 100 log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".\ format(avail=round(avail_percent, 2), compare='less' if \ avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # calculate the required free space in bytes relative to the total disk # space based on given percentage if required size is also provided, # take the larger value of the two if required_size: required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) else: required_size = round(min_free_space_percent * .01 * total_space) # If there're not enough space, delete non-protected files if device.api.verify_enough_disk_space(required_size=required_size, directory=destination, dir_output=dir_out): if required_size < 0: log.info("Required disk space is unknown, will not delete files") else: log.info("Verified there is enough space on the device. " "No files are deleted") return True if skip_deletion: log.error( "'skip_deletion' is set to True and there isn't enough space " "on the device, files cannot be deleted.") return False else: log.info("Deleting unprotected files to free up some space") log.info("Sending 'show version' to learn the current running images") image = device.api.get_running_image() if isinstance(image, list): protected_files.extend([os.path.basename(i) for i in image]) else: protected_files.extend([os.path.basename(image)]) # convert to set for O(1) lookup protected_files = set(protected_files) parsed_dir_out = device.parse('dir {}'.format(destination), output=dir_out) dq = Dq(parsed_dir_out) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] for file in dq.get_values('files'): file_list.append( (file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # append files to delete list until the deleted file sizes reaches the target, to_delete = [] remaining_size = required_size for file, size in file_list: # break if we reach the target if remaining_size < available_space: break # if the file is protected, move skip and check next one in the list elif file in protected_files: continue to_delete.append(file) remaining_size -= size # if target can not be reached, aka loop is not broken, fail else: if min_free_space_percent: log.error( 'It is not possible to reach the target free space percentage after deleting all ' 'unprotected files. Operation will be aborted and no file has been deleted.' ) return False device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=to_delete, dir_output=dir_out) # after deletion verify again fail if still not enough space, # execute dir again since files are changed dir_out_after = device.execute('dir {}'.format(destination)) if min_free_space_percent: available_space = device.api.get_available_space( directory=destination, output=dir_out_after) total_space = device.api.get_total_space(directory=destination, output=dir_out_after) available_percent = available_space / total_space * 100 log.info( "There are {available}% of free space on the disk, which is {compare} than " "the target of {target}%.".format( available=round(available_percent, 2), compare='less' if available_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) if not device.api.verify_enough_disk_space( required_size, destination, dir_output=dir_out_after): log.error( 'There is still not enough space on the device after deleting ' 'unprotected files.') return False else: log.info( "Verified there is enough space on the device after deleting " "unprotected files.") return True
def verify_services_accounting_aggregation(device, name, expected_source_address=None, expected_destination_address=None, expected_snmp_interface=None, expected_mpls_label1=None, expected_mpls_label2=None, max_time=60, check_interval=10): """ Verify if there are errors in 'show services accounting errors' Args: device (obj): Device object expected_source_address ('str'): expected source address expected_destination_address ('str'): expected destination address expected_snmp_interface ('str'): expected snmp interface expected_mpls_label1 ('str'): expected mpls label1 expected_mpls_label2 ('str'): expected mpls label2 max_time (int, optional): Maximum timeout time. Defaults to 60. check_interval (int, optional): Check interval. Defaults to 10. Returns: Boolean Raises: N/A """ timeout = Timeout(max_time, check_interval) while timeout.iterate(): out = None try: out = device.parse( 'show services accounting aggregation template template-name {name} extensive' .format(name=name)) except SchemaEmptyParserError: timeout.sleep() continue #"flow-aggregate-template-detail": { # "flow-aggregate-template-detail-ipv4": { # "detail-entry": [{ # "source-address": "27.93.202.64", # "destination-address": "106.187.14.158", # "source-port": "8", # "destination-port": "0", # "protocol": {"#text": "1"}, # "tos": "0", # "tcp-flags": "0", # "source-mask": "32", # "destination-mask": "30", # "input-snmp-interface-index": "618", # "output-snmp-interface-index": "620", # "start-time": "79167425", # "end-time": "79167425", # "packet-count": "1", # "byte-count": "84", # }] # } #} for entry in out['services-accounting-information']\ ['flow-aggregate-template-detail']['flow-aggregate-template-detail-ipv4']\ ['detail-entry']: passflag = True entry = Dq(entry) if expected_source_address and \ entry.get_values('source-address', 0) != str(expected_source_address): passflag = False if expected_destination_address and \ entry.get_values('destination-address', 0) != str(expected_destination_address): passflag = False if expected_snmp_interface and \ entry.get_values('input-snmp-interface-index', 0) != str(expected_snmp_interface): passflag = False if expected_mpls_label1 and \ entry.get_values('mpls-label-1', 0) != str(expected_mpls_label1): passflag = False if expected_mpls_label2 and \ entry.get_values('mpls-label-2', 0) != str(expected_mpls_label2): passflag = False if (expected_source_address or expected_destination_address or expected_snmp_interface or expected_mpls_label1 or expected_mpls_label2) and passflag: return True timeout.sleep() return False
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files, compact=False, min_free_space_percent=None, dir_output=None, allow_deletion_failure=False): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted compact ('bool'): Compact option for n9k, used for size estimation, default False min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device allow_deletion_failure (bool, optional): Allow the deletion of a file to silently fail. Defaults to False Returns: True if there is enough space after the operation, False otherwise ''' # For n9k compact copy: # observationally, depending on release, the compacted image is 36-48% the # size of the original image. For now we'll use 60% as a conservative estimate. if compact: required_size *=.6 # Parse directory output to check dir_out = dir_output or device.execute('dir {}'.format(destination)) # Get available free space on device available_space = device.api.get_available_space( directory=destination, output=dir_out) log.debug('available_space: {avs}'.format(avs=available_space)) # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = device.api.get_total_space( directory=destination, output=dir_out) # Get current available space in % avail_percent = available_space / total_space * 100 log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".\ format(avail=round(avail_percent, 2), compare='less' if \ avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # get bigger of required_space or min_free_space_percent required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) # If there's not enough space, delete non-protected files if device.api.verify_enough_disk_space(required_size=required_size, directory=destination, dir_output=dir_out): if required_size < 0: log.info("Required disk space is unknown, will not delete files") else: log.info("Verified there is enough space on the device. " "No files are deleted") return True if skip_deletion: log.error("'skip_deletion' is set to True and there isn't enough space " "on the device, files cannot be deleted.") return False else: log.info("Deleting unprotected files to free up some space") running_images = [] log.info("Sending 'show version' to learn the current running images") running_image = device.api.get_running_image() if isinstance(running_image, list): for image in running_image: running_images.append(os.path.basename(image)) else: running_images.append(os.path.basename(running_image)) # convert to set for O(1) lookup protected_files = set(protected_files) parsed_dir_out = device.parse('dir {}'.format(destination), output=dir_out) dq = Dq(parsed_dir_out) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] running_image_list = [] for file in dq.get_values('files'): # separate running image from other files if any(file in image for image in running_images): running_image_list.append((file, int(dq.contains(file).get_values('size')[0]))) else: file_list.append((file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # add running images to the end so they are deleted as a last resort file_list.extend(running_image_list) log.debug('file_list: {fl}'.format(fl=file_list)) for file, size in file_list: device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=[file], dir_output=dir_out, allow_failure=allow_deletion_failure) if device.api.verify_enough_disk_space(required_size, destination): log.info("Verified there is enough space on the device after " "deleting unprotected files.") return True # Exhausted list of files - still not enough space log.error('There is still not enough space on the device after ' 'deleting unprotected files.') return False
def _pre_post_processors(self, testbed, processor, section, data, name, reconnect, processor_targets, processor_type, pre_processor_result=Passed, health_settings=None): """ execute pre/post processors and return if pre-processor runs and processor result Arguments: testbed (`obj`): testbed object processor (`obj`): Aetest Processor object section (`obj`): Aetest Section object data (`list`) : data of section name (`str`) : name of section in health yaml reconnect (`dict` or None) : parameters for reconnect processor_targets (`list`) : list of `processor_flag which ones will be run as pre/post processors processor_type (`str`) : processor type `pre` or `post` pre_processor_result (`ob`) : result object. Default to `Passed` Returns: pre_processor_run (`bool`) : if pre processor runs or not pre_processor_result (`obj`) : return processor result (Result obj) """ devices_connected = [] new_data_dict = {} selected_options = 0 list_of_args = [] # store reasons why processor is skipped reasons = [] # flag if health args are given to pyats command args_flag = False # flag if health args are defined under action in health yaml args_in_yaml_flag = False log.debug( 'data:\n{d}'.format(d=json.dumps(data, indent=2, sort_keys=True))) orig_data = copy.deepcopy(data) # check if health arguments are given to pyats command for arg_name in [ 'health_tc_sections', 'health_tc_uids', 'health_tc_groups', 'health_sections', 'health_uids', 'health_groups' ]: if getattr(runtime.args, arg_name): args_flag = True for item in self._get_actions(data, processor_targets): if Dq(item).contains( 'health_tc_sections|health_tc_uids|health_tc_groups|health_sections|health_uids|health_groups', regex=True): args_in_yaml_flag = True for arg_name in [ 'health_tc_sections', 'health_tc_uids', 'health_tc_groups', 'health_sections', 'health_uids', 'health_groups' ]: log.debug('Checking {an}'.format(an=arg_name)) selected = None selected_options = 0 for item in self._get_actions(data, processor_targets): # from argument arg_search_keyword = getattr(runtime.args, arg_name) if arg_search_keyword: args_flag = True selected = self._select_health( section, item, arg_search_keyword.split(' '), arg_name) selected_options += 1 list_of_args.append(arg_name) if selected: new_data_dict.setdefault(arg_name, {}).setdefault( selected_options, selected) if not args_flag: # from datafile search_keywords = [] search_keywords = getattr( runtime.args, arg_name) or Dq(item).get_values(arg_name) if not isinstance(search_keywords, list): search_keywords = [search_keywords] if search_keywords == []: # if args are given to one of actions, other actions # will run to all sections by default. To do so, # adding `.*` as search_keywords # ex.) # - api: # only section1 # function: func1 # health_tc_sections: section1 # - api: # all sections # function: func2 if (args_in_yaml_flag and arg_name in ['health_tc_sections', 'health_sections'] and ((not Dq(item).get_values('health_tc_sections') or not Dq(item).get_values('health_sections')) and (not Dq(item).get_values('health_tc_uids') or not Dq(item).get_values('health_uids')))): search_keywords = ['.*'] else: search_keywords = None log.debug( "arg_name, search_keywords: {sel_name}, {sel}".format( sel_name=arg_name, sel=search_keywords)) if search_keywords: selected_options += 1 list_of_args.append(arg_name) selected = self._select_health(section, item, search_keywords, arg_name) if selected: new_data_dict.setdefault(arg_name, {}).setdefault( selected_options, selected) if args_flag: # check for the case which multiple `arg_name`s given and check the same # among the `arg_name`s. if same between `arg_name`s, data will be overwittern # by one of new_data_dict value to execute selected ones new_data_flag = False if new_data_dict: value = '' log.debug( 'num of health args: {n}'.format(n=len(set(list_of_args)))) log.debug( 'num of new_data_dict: {n}'.format(n=len(new_data_dict))) if len(set(list_of_args)) == len(new_data_dict): for key, value_ in new_data_dict.items(): if value == value_: new_data_flag = True else: new_data_flag = False if not value: value = value_ if len(new_data_dict) == 1: new_data_flag = True else: new_data_flag = len(set(list_of_args)) == len(new_data_dict) log.debug('new_data_flag: {f}'.format(f=new_data_flag)) log.debug('new_data_dict: {ndd}'.format( ndd=json.dumps(new_data_dict, indent=2, sort_keys=True))) if new_data_flag: temp_data = [] # override data because meeting criteria by `arg_name`s for key, value__ in new_data_dict.items(): for idx in value__: # data from each health arg should be same # so remove redundant data by overwriting temp_data = [new_data_dict[key][idx].pop()] data = temp_data elif (not new_data_dict or len(set(list_of_args)) != len(new_data_dict) ) and len(set(list_of_args)) != 0: reasons.append( f"health arg {set(list_of_args)-set(new_data_dict.keys())} does not meet criteria" ) data = [] # processor start message log.debug('{type}-processor {name} started'.format( name=name, type=processor_type.capitalize())) pre_processor_run = True # check if `processor` tag matches processor_targets and # if device for action is connected # create temp_data with matched actions and override data by temp_data temp_data = [] # list of checked devices. flag to ignore checked device device_checked = [] # None if no device is defined in any actions all_devices_connected = None common_api = False if new_data_dict and new_data_flag: # get connected devices list devices_connected = self._check_all_devices_connected( testbed, data, reconnect) devices_connected = [dev for dev in devices_connected if dev != ''] actions = self._get_actions(data, processor_targets) if not actions: # check processor in action and put in proc_in_action proc_in_action = [] if isinstance(data, list): for each_data in data: for each_proc in Dq(each_data).get_values('processor'): proc_in_action.append(each_proc) else: for each_proc in Dq(data).get_values('processor'): proc_in_action.append(each_proc) proc_in_action = set(proc_in_action) if proc_in_action: reasons.append( f"processor {proc_in_action} does not meet criteria {processor_targets}" ) for each_data in actions: for key in each_data: # get processor key from action. by default, `both` each_data_dq = Dq(each_data) processor_from_yaml = each_data_dq.contains(key).get_values( 'processor', 0) if not processor_from_yaml: processor_from_yaml = 'both' log.debug( 'processor_targets: {pt}'.format(pt=processor_targets)) log.debug('processor: {p}'.format(p=processor_from_yaml)) # find `common_api` key and return True/False common_api = any(each_data_dq.get_values('common_api')) if processor_from_yaml in processor_targets: # check if device for action is connected all_devices_connected = None devices_not_connected = [] for uut in self._get_device_names(orig_data, each_data): if uut not in device_checked: device_checked.append(uut) if isinstance(uut, str): if (testbed.devices[uut].name in devices_connected) or ( testbed.devices[uut].alias in devices_connected): all_devices_connected = True else: all_devices_connected = False devices_not_connected.append(uut) elif (uut.name in devices_connected) or ( uut.alias in devices_connected): all_devices_connected = True else: all_devices_connected = False devices_not_connected.append(uut) if devices_not_connected: log.warning("devices are not connected: {}".format( devices_not_connected)) force_all_connected = health_settings.get( 'force_all_connected', True) if device_checked and not force_all_connected and devices_connected: log.warning( "force_all_connected is False. Executing even though some of devices might not be connected." ) # data will be created if all devices are connected or # if force_all_connected == False and one of devices is connected if (all_devices_connected == True or all_devices_connected is None) or (force_all_connected == False and devices_connected): temp_data.append(each_data) else: log.warning( 'health check is blocked due to force_all_connected is True.' ) # until here, data contains only actions # for cases like `parallel`, `loop`, need to put the headers # from original data `orig_data` if 'actions' in orig_data and data and temp_data: data = copy.deepcopy(orig_data) if temp_data: data['actions'] = temp_data data = [{'loop': data}] else: data = [] elif isinstance(orig_data, list): if len(orig_data ) > 0 and 'parallel' in orig_data[0] and data and temp_data: data = copy.deepcopy(orig_data)[0] if temp_data: data['parallel'] = temp_data data = [data] else: data = [] elif len(orig_data) > 0 and 'run_condition' in orig_data[ 0] and data and temp_data: data = copy.deepcopy(orig_data)[0] data = [data] else: data = temp_data else: data = temp_data # remove section if no data removed_section = False # set reason in case device is not connected if (not devices_connected and not common_api) and not reasons: reasons.append('Device is not connected') if not data or reasons: processor.result = Skipped processor.reporter.remove_section(id_list=processor.uid.list) removed_section = True # if any device is not connected, processor will be skipped # if common_api is True, will execute if devices_connected or common_api: # instantiate Steps() to reset step number steps = Steps() # execute dispatcher in Blitz result = self.dispatcher(steps, testbed, section, data, name) if isinstance(data, list): hide_processor = any( Dq(data[0]).get_values('hide_processor', 0) == True for each_data in data) else: hide_processor = Dq(data[0]).get_values('hide_processor', 0) if hide_processor and not removed_section: removed_section = self._remove_section(processor) try: log.debug('Blitz section return:\n{result}'.format( result=json.dumps(result, indent=2, sort_keys=True))) except TypeError: log.debug('Blitz section return:\n{result}'.format( result=format_output(result))) # check section result log.debug('section result: {section_result}'.format( section_result=section.result.name)) log.debug('steps result: {steps_result}'.format( steps_result=steps.result.name)) # if section is skipped by run_condition, remove section if (isinstance(result, dict) and 'run_condition_skipped' in result and not removed_section and result['run_condition_skipped'] == True): processor.result = Skipped removed_section = self._remove_section(processor) if processor_type == 'pre' and steps.result != Passed and steps.result != Passx: log.info( "Pre-processor pyATS Health {name} was failed, but continue section and Post-processor" .format(name=name)) # save pre-processor result pre_processor_result = steps.result return pre_processor_run, pre_processor_result elif processor_type == 'post': # refrect processor results to section processor.result += steps.result section.result = section.result + processor.result + self.pre_processor_result # return processor.result to raise the result # at end of context post processor return pre_processor_run, processor.result elif processor_type == 'pre': pre_processor_run = False # processor is skipped log.info( f"Pre-processor pyATS Health '{name}' is skipped due to: {reasons}" ) if pre_processor_result == Passed: # processor.skipped() pre_processor_result = Skipped return pre_processor_run, pre_processor_result elif processor_type == 'post': # for the case only pre-processors runs if section.result == pre_processor_result: log.info('Only Pre-processor runs. Section result and ' 'Pre-processor result are different.Reflecting ' 'Post-processor result to Section.') # reflect processor results to section section.result = section.result + processor.result + self.pre_processor_result # processor is skipped log.info( f"Post-processor pyATS Health '{name}' was skipped due to: {reasons}" ) if pre_processor_result == Passed: # processor.skipped() pre_processor_result = Skipped # return processor.result to raise the result # at end of context post processor return pre_processor_run, processor.result return pre_processor_run, pre_processor_result
def health_dispatcher(self, steps, section, data, testbed, processor, name='', **kwargs): """ excute health yaml based on Blitz logic. This will be calling Blitz's `dispacher` to execute all the actions in health yaml `data` contains all the items under a section in health yaml example of `data`: [ { 'parallel': [ { 'api': { 'device': 'uut', 'function': 'get_platform_cpu_load', 'arguments': { 'command': 'show processes cpu', 'processes': ['BGP I/O'] }, 'save': [ { 'variable_name': 'cpu' } ] } }, { 'api': { 'device': 'uut', (snip) `data` is List, so store the `data` as dict to `data_dict` for Dq Arguments: steps (`obj`) : Aetest Steps object section (`obj`) : Aetest Section object data (`list`) : data of section testbed (`obj`) : testbed object processor (`obj`) : Aetest processor object name (`str`) : name of section in health yaml Default to `` Returns: None """ # --------------------- # pre-context processor # --------------------- data_dict, processor_flag = self._check_processor_tag(data=data) log.debug('processor_flag: {flag}'.format(flag=processor_flag)) # check if all devices are connected data_dict_dq = Dq(data_dict) devices_connected = self._check_all_devices_connected( testbed, data_dict_dq) # execute pre-processor and received result in self.pre_processor_result self.pre_processor_run, self.pre_processor_result = self._pre_post_processors( testbed, processor, section, data, name, devices_connected, processor_flag, processor_targets=['pre', 'both'], processor_type='pre') try: yield except Exception as e: # for case section gets Exception section.errored(e) # ---------------------- # post-context processor # ---------------------- # check `post_if_pre_execute` and if pre-processor is executed if (data_dict_dq.get_values('processor', 0) == 'post_if_pre_execute' and not self.pre_processor_run): log.info( "Post-processor pyATS Health '{name}' was skipped because required Pre-processor was not executed." .format(name=name)) else: # check if all devices are connected data_dict_dq = Dq(data_dict) devices_connected = self._check_all_devices_connected( testbed, data_dict_dq) # execute post-processor self._pre_post_processors( testbed, processor, section, data, name, devices_connected, processor_flag, processor_targets=['post', 'post_if_pre_execute', 'both'], processor_type='post', pre_processor_result=self.pre_processor_result)
def free_up_disk_space(device, destination, required_size, skip_deletion, protected_files=None, min_free_space_percent=None, dir_output=None): '''Delete files to create space on device except protected files Args: device ('Obj') : Device object destination ('str') : Destination directory, i.e bootflash:/ required_size ('int') : Check if enough space to fit given size in bytes. If this number is negative it will be assumed the required size is not available. skip_deletion ('bool') : Only performs checks, no deletion protected_files ('list') : List of file patterns that wont be deleted. min_free_space_percent ('int'): Minimum acceptable free disk space %. Optional, dir_output ('str'): Output of 'dir' command if not provided, executes the cmd on device Returns: True if there is enough space after the operation, False otherwise ''' if not destination: log.warning('No destination provided, cannot verify available space') return True df_info = device.parse('df {}'.format(destination), output=dir_output) dir_df_info = df_info['directory'].values() if dir_df_info: dir_df_info = list(dir_df_info)[0] free_space = dir_df_info.get('available') if not dir_df_info or not free_space: log.error('Unable to determine available space') return True # Check if available space is sufficient if min_free_space_percent: # Get total space total_space = dir_df_info.get('total') # Get current available space in % avail_percent = 100 - dir_df_info.get('use_percentage') log.info("There is {avail} % of free space on the disk, which is " "{compare} than the target of {target} %.".format( avail=round(avail_percent, 2), compare='less' if avail_percent < min_free_space_percent else 'greater', target=min_free_space_percent)) # get bigger of required_space or min_free_space_percent required_size = round( max(required_size, min_free_space_percent * .01 * total_space)) if free_space > required_size: log.info('APIC: enough free space available: {}'.format(free_space)) return True log.warning( 'APIC: not enough free space, required: {}, available: {}'.format( required_size, free_space)) ls_output = device.execute('ls -l {}'.format(destination)) file_info = device.parse('ls -l {}'.format(destination), output=ls_output) dq = Dq(file_info) # turn parsed dir output to a list of files for sorting # Large files are given priority when deleting file_list = [] for file in dq.get_values('files'): file_list.append((file, int(dq.contains(file).get_values('size')[0]))) file_list.sort(key=lambda x: x[1], reverse=True) # create lis of filenames files_to_be_deleted = set(x[0] for x in file_list) # filter list and get list of unprotected files _, unprotected_files = _protected_and_unprotected_files( files_to_be_deleted, protected_files) # create ordered list of unprotected files to_be_deleted = [x[0] for x in file_list if x[0] in unprotected_files] log.info('Files to be deleted: {}'.format(to_be_deleted)) for file, size in file_list: device.api.delete_unprotected_files(directory=destination, protected=protected_files, files_to_delete=[file], dir_output=ls_output) if device.api.verify_enough_disk_space(required_size, destination): log.info("Verified there is enough space on the device after " "deleting unprotected files.") return True # Exhausted list of files - still not enough space log.error('There is still not enough space on the device after ' 'deleting unprotected files.') return False
def health_dispatcher(self, steps, section, data, testbed, name='', **kwargs): # pre-context processor # `data` contains all the items under a section in Blitz yaml # # example of `data`: # [ # { # 'parallel': [ # { # 'api': { # 'device': 'uut', # 'function': 'get_platform_cpu_load', # 'arguments': { # 'command': 'show processes cpu', # 'processes': ['BGP I/O'] # }, # 'save': [ # { # 'variable_name': 'cpu' # } # ] # } # }, # { # 'api': { # 'device': 'uut', # (snip) # # `data` is List, so store the `data` as dict to `data_dict` for Dq # check `processor` and return the value in processor_flag data_dict, processor_flag = self._check_processor_tag(data=data) log.debug('processor_flag: {flag}'.format(flag=processor_flag)) # check if all devices are connected data_dict_dq = Dq(data_dict) devices_connected = self._check_all_devices_connected( testbed, data_dict_dq) # execute pre-processor and received result in self.pre_processor_result self.pre_processor_run, self.pre_processor_result = self._pre_post_processors( testbed, section, data, name, devices_connected, processor_flag, processors=['pre', 'both'], processor_type='pre') try: yield except Exception as e: # for case section gets Exception section.errored(e) # post-context processor # check `post_after_pre` and if pre-processor is executed if (data_dict_dq.get_values('processor', 0) == 'post_after_pre' and not self.pre_processor_run): log.info( "Post-processor pyATS Health '{name}' was skipped because required Pre-processor was not executed." .format(name=name)) else: # check if all devices are connected data_dict_dq = Dq(data_dict) devices_connected = self._check_all_devices_connected( testbed, data_dict_dq) # execute post-processor self._pre_post_processors( testbed, section, data, name, devices_connected, processor_flag, processors=['post', 'post_after_pre', 'both'], processor_type='post', pre_processor_result=self.pre_processor_result)