def clear_cores(device, core_list, crashreport_list): # Create dialog for response dialog = Dialog([ Statement(pattern=r'Delete.*', action='sendline()', loop_continue=True, continue_timer=False), ]) # preparing the full list to iterate over full_list = core_list + crashreport_list # Delete cores from the device for item in full_list: try: # Execute delete command for this core cmd = 'delete {location}/{core}'.format( core=item['core'],location=item['location']) output = device.execute(cmd, timeout=300, reply=dialog) # Log to user meta_info = 'Successfully deleted {location}/{core}'.format( core=item['core'],location=item['location']) logger.info(meta_info) return OK(meta_info) except Exception as e: # Handle exception logger.warning(e) meta_info = 'Unable to delete {location}/{core}'.format( core=item['core'],location=item['location']) logger.error(meta_info) return ERRORED(meta_info)
def execution(self, device, **kwargs): # Init status = OK message = '' # Execute command to check for tracebacks - timeout set to 5 mins try: output = device.execute(self.show_cmd, timeout=self.args.alignmentcheck_timeout) except Exception as e: status += CRITICAL(str(e)) return status if not output: return ERRORED('No output from {cmd}'.format(cmd=self.show_cmd)) # Check for alignment errors. Hex values = problems. if '0x' in output: message = "Device {d} Alignment error detected: '{o}'"\ .format(d=device.name, o=output) status += CRITICAL(message) logger.error(banner(message)) # Log message to user if not message: message = "***** No alignment error found *****" status += OK(message) logger.info(banner(message)) # Final status return status
def check_cores(device, core_list, **kwargs): # Init status = OK # Check if device is VDC try: output = device.parse('show vdc current-vdc') except Exception as e: logger.warning(e) meta_info = "Unable to execute 'show vdc current-vdc' to check if device is VDC" logger.error(meta_info) status = ERRORED(meta_info) return status # Check if device is VDC if 'current_vdc' in output and output['current_vdc']['id'] != '1': cmd = 'show cores' else: cmd = 'show cores vdc-all' # Execute command to check for cores header = [ "VDC", "Module", "Instance", "Process\-name", "PID", "Date\(Year\-Month\-Day Time\)" ] output = oper_fill_tabular(device=device, show_command=cmd, header_fields=header, index=[5]) if not output.entries: meta_info = "No cores found!" logger.info(meta_info) return OK(meta_info) # Parse through output to collect core information (if any) for k in sorted(output.entries.keys(), reverse=True): row = output.entries[k] date = row.get("Date\\(Year\\-Month\\-Day Time\\)", None) if not date: continue date_ = datetime.strptime(date, '%Y-%m-%d %H:%M:%S') # Save core info core_info = dict(module=row['Module'], pid=row['PID'], instance=row['Instance'], process=row['Process\\-name'], date=date.replace(" ", "_")) core_list.append(core_info) meta_info = "Core dump generated for process '{}' at {}".\ format(row['Process\\-name'], date_) logger.error(meta_info) status += CRITICAL(meta_info) return status
def execution(self, device): # Plugin parser results are always stored as 'self.args' if self.args.print_timestamp: self.execution_start = datetime.datetime.now() logger.info('Current time is: %s' % self.execution_start) logger.info('Execution %s: Hello World!' % device.name) return OK("Plugin completed")
def execution(self, device, **kwargs): # Init status = OK # create timeout object timeout = Timeout(max_time=int(self.args.cpucheck_timeout), interval=int(self.args.cpucheck_interval)) # loop status loop_stat_ok = True if not hasattr(self, 'PARSER_MODULE'): return WARNING('Does not have CPU related parsers to check') while timeout.iterate(): # Execute command to get five minutes usage percentage try: cpu_dict = self.PARSER_MODULE(device).parse(sort_time='5min', key_word='CPU') except Exception as e: return ERRORED( 'No output from show processes cpu\n{}'.format(e)) # Check 5 minutes percentage smaller than cpucheck_fivemin_pcnt if int(cpu_dict['five_min_cpu']) >= int( self.args.cpucheck_fivemin_pcnt): message = "****** Device {d} *****\n".format(d=device.name) message += "Excessive CPU utilization detected for 5 min interval\n" message += "Allowed: {e}%\n".format( e=self.args.cpucheck_fivemin_pcnt) message += "Measured: FiveMin: {r}%".format( r=cpu_dict['five_min_cpu']) loop_stat_ok = False timeout.sleep() else: message = "***** CPU usage is Expected ***** \n" message += "Allowed threashold: {e} \n"\ .format(e=self.args.cpucheck_fivemin_pcnt) message += "Measured from device: {r}"\ .format(r=cpu_dict['five_min_cpu']) loop_stat_ok = True status += OK(message) logger.info(banner(message)) break if not loop_stat_ok: status += CRITICAL(message) logger.error(banner(message)) # Final status return status
def check_cores(device, core_list, **kwargs): # Init status = OK timeout = kwargs['timeout'] # Execute command to check for cores for location in ['disk0:', 'disk0:core', 'harddisk:']: try: output = device.execute('dir {}'.format(location), timeout=timeout) except Exception as e: # Handle exception logger.warning(e) logger.warning( "Location '{}' does not exist on device".format(location)) continue if 'Invalid input detected' in output: logger.warning( "Location '{}' does not exist on device".format(location)) continue elif not output: meta_info = "Unable to check for cores" logger.error(meta_info) return ERRORED(meta_info) # 24 -rwxr--r-- 1 18225345 Oct 23 05:15 ipv6_rib_9498.by.11.20170624-014425.xr-vm_node0_RP0_CPU0.237a0.core.gz pattern1 = '(?P<number>(\d+)) +(?P<permissions>(\S+)) +(?P<other_number>(\d+)) +(?P<filesize>(\d+)) +(?P<month>(\S+)) +(?P<date>(\d+)) +(?P<time>(\S+)) +(?P<core>(.*core\.gz))' # 12089255 -rwx 23596201 Tue Oct 31 05:16:50 2017 ospf_14495.by.6.20171026-060000.xr-vm_node0_RP0_CPU0.328f3.core.gz pattern2 = '(?P<number>(\d+)) +(?P<permissions>(\S+)) +(?P<filesize>(\d+)) +(?P<day>(\S+)) +(?P<month>(\S+)) +(?P<date>(\d+)) +(?P<time>(\S+)) +(?P<year>(\d+)) +(?P<core>(.*core\.gz))' for line in output.splitlines(): # Parse through output to collect core information (if any) match = re.search(pattern1, line, re.IGNORECASE) or \ re.search(pattern2, line, re.IGNORECASE) if match: core = match.groupdict()['core'] meta_info = "Core dump generated:\n'{}'".format(core) logger.error(meta_info) status += CRITICAL(meta_info) core_info = dict(location=location, core=core) core_list.append(core_info) if not core_list: meta_info = "No cores found at location: {}".format(location) logger.info(meta_info) status += OK(meta_info) return status
def clear_cores(device, core_list, crashreport_list, **kwargs): # Execute command to delete cores try: device.execute('clear cores') meta_info = "Successfully cleared cores on device" logger.info(meta_info) status = OK(meta_info) except Exception as e: # Handle exception logger.warning(e) meta_info = "Unable to clear cores on device" logger.error(meta_info) status = ERRORED(meta_info) return status
def upload_to_server(device, core_list, *args, **kwargs): # Init status= OK # Get info port = kwargs['port'] server = kwargs['server'] timeout = kwargs['timeout'] destination = kwargs['destination'] protocol = kwargs['protocol'] username = kwargs['username'] password = kwargs['password'] # Check values are not None for item in kwargs: if item in ['protocol', 'server', 'destination', 'username', 'password'] and \ kwargs[item] is None: meta_info = "Unable to upload core dump - parameters `{}` not provided."\ " Required parameters are: `protocol`, `server`, "\ "`destination`, `username`, `password`".format(item) return ERRORED(meta_info) # Upload each core found for core in core_list: # Sample command: # copy core://<module-number>/<process-id>[/instance-num] # tftp:[//server[:port]][/path] vrf management path = '{dest}/core_{pid}_{process}_{date}_{time}'.format( dest = destination, pid = core['pid'], process = core['process'], date = core['date'], time = time.time()) if port: server = '{server}:{port}'.format(server = server, port = port) if 'instance' in core: pid = '{pid}/{instance}'.format(pid = core['pid'], instance = core['instance']) message = "Core dump upload attempt from module {} to {} via server {}".\ format(core['module'], destination, server) # construction the module/pid for the copy process core['core'] = '{module}/{pid}'.format(module = core['module'], pid = core['pid']) try: # Check if filetransfer has been added to device before or not if not hasattr(device, 'filetransfer'): device.filetransfer = FileUtils.from_device(device) to_URL = '{protocol}://{address}/{path}'.format( protocol=protocol, address=server, path=path) from_URL = 'core://{core_path}'.format(core_path=core['core']) device.filetransfer.copyfile(device=device, source=from_URL, destination=to_URL) except Exception as e: if 'Tftp operation failed' in str(e): meta_info = "Core dump upload operation failed: {}".format( message) logger.error(meta_info) status += ERRORED(meta_info) else: # Handle exception logger.error(e) status += ERRORED("Failed: {}".format(message)) else: meta_info = "Core dump upload operation passed: {}".format(message) logger.info(meta_info) status += OK(meta_info) return status
def execution(self, device, **kwargs): # Init status = OK matched_lines_dict = {} match_patterns = None lookup = Lookup.from_device(device) # Execute command to check for tracebacks - timeout set to 5 mins output = lookup.libs.utils.check_tracebacks( device, timeout=self.args.tracebackcheck_timeout) if not output: message = "No output found for '{cmd}'".format(cmd=self.show_cmd) status += OK(message) logger.info(message) return status # Set match pattern to search 'show logging logfile' if not self.args.tracebackcheck_logic_pattern: match_patterns = logic_str("And('Traceback')") else: # Check if its a pattern or a string if 'And' in self.args.tracebackcheck_logic_pattern or\ 'Not' in self.args.tracebackcheck_logic_pattern or\ 'Or' in self.args.tracebackcheck_logic_pattern: if not self.args.tracebackcheck_disable_traceback: match_patterns = logic_str("Or('Traceback', {})".\ format(self.args.tracebackcheck_logic_pattern)) else: match_patterns = logic_str( self.args.tracebackcheck_logic_pattern) else: logic_string = "" # Check if user wants to disable 'Traceback' check if not self.args.tracebackcheck_disable_traceback: logic_string = "\'Traceback\', " # Add patterns to create a logic string for item in self.args.tracebackcheck_logic_pattern.split(', '): logic_string += "\'{}\', ".format(item.strip()) # Create logic pattern to match in 'show logging logfile' output match_patterns = logic_str("Or({})".\ format(logic_string.rstrip(", "))) # Parse 'show logging logfile' output for keywords matched_lines_dict['matched_lines'] = [] logger.info('Patterns to search for: {}'.format(match_patterns)) for line in output.splitlines(): if match_patterns(line): matched_lines_dict['matched_lines'].append(line) message = "Matched pattern in line: '{line}'".format(line=line) status += CRITICAL(message) status += CRITICAL(matched_lines_dict) logger.error(message) # Log message to user if not matched_lines_dict['matched_lines']: message = "No patterns {patterns} matched".\ format(patterns=match_patterns) status += OK(message) logger.info(message) # Clear logging (if user specified) if self.args.tracebackcheck_clean_up: try: output = lookup.libs.utils.clear_tracebacks( device, timeout=self.args.tracebackcheck_timeout) message = "Successfully cleared logging" status += OK() logger.info(message) except Exception as e: # Handle exception logger.warning(e) message = "Clear logging execution failed" logger.error(message) status += ERRORED() # Final status return status
def check_cores(device, core_list, crashreport_list, timeout, crash_type=None): # Init status = OK # Construct the core pattern to be parsed later # 1613827 -rw- 56487348 Oct 17 2017 15:56:59 +17:00 PE1_RP_0_x86_64_crb_linux_iosd-universalk9-ms_15866_20171016-155604-PDT.core.gz # 7763 -rw- 107847329 Jul 5 2018 12:53:55 +00:00 kernel.rp_RP-EDISON_0_20180705125020.core.flat.gz # 7761 -rw- 36003 Jul 5 2018 12:50:20 +00:00 kernel.rp_RP-EDISON_0_20180705125020.txt core_pattern = re.compile(r'(?P<number>\d+) ' '+(?P<permissions>[rw\-]+) +(?P<filesize>\d+) ' '+(?P<month>\w+) +(?P<date>\d+) +(?P<year>\d+) ' '+(?P<time>[\w\:]+) +(?P<timezone>(\S+)) +(?P<core>((.*\.core\.gz)|(.*\.core\.flat\.gz)|(.*\.txt)))$', re.IGNORECASE) # Construct the crashreport pattern to be parsed later # 62 -rw- 125746 Jul 30 2016 05:47:28 +00:00 crashinfo_RP_00_00_20160730-054724-UTC crashinfo_pattern = re.compile(r'(?P<number>\d+) ' '+(?P<permissions>[rw\-]+) +(?P<filesize>\d+) ' '+(?P<month>\w+) +(?P<date>\d+) +(?P<year>\d+) ' '+(?P<time>[\w\:]+) +(?P<timezone>(\S+)) ' '+(?P<core>(crashinfo.*))$', re.IGNORECASE) # define default checking dir locations = ['flash:/core', 'bootflash:/core', 'harddisk:/core', 'crashinfo:'] # if provided if crash_type: for crash_string in crash_type.split(','): locations.append('flash:{}*'.format(crash_string.strip())) if crash_string else None # Execute command to check for cores and crashinfo reports for location in locations: try: output = device.execute('dir {}'.format(location), timeout=timeout) except Exception as e: if any(isinstance(item, TimeoutError) for item in e.args): # Handle exception logger.warning(e) logger.warning(banner("dir {} execution exceeded the timeout value {}".format(location, timeout))) else: # Handle exception logger.warning(e) logger.warning(banner("Location '{}' does not exist on device".format(location))) continue if 'Invalid input detected' in output or \ 'No such file' in output : logger.warning("Location '{}' does not exist on device".format(location)) continue elif not output: meta_info = "Unable to check for cores" logger.error(meta_info) return ERRORED(meta_info) for line in output.splitlines(): line = line.strip() m = core_pattern.match(line) if m: core = m.groupdict()['core'] meta_info = "Core dump generated:\n'{}'".format(core) logger.error(meta_info) status += CRITICAL(meta_info) core_info = dict(location = location, core = core) core_list.append(core_info) continue m = crashinfo_pattern.match(line) if m: crashreport = m.groupdict()['core'] meta_info = "Crashinfo report generated:\n'{}'".\ format(crashreport) logger.error(meta_info) status += CRITICAL(meta_info) crashreport_info = dict(location = location, core = crashreport) crashreport_list.append(crashreport_info) continue # find user defined crashed files other than crashinfo pattern = location.split(':')[1] if pattern and '/' not in pattern: m = re.compile(r'{}'.format(pattern)).match(line) if m: crashreport = line meta_info = "Crashinfo report generated:\n'{}' on device {}".\ format(line, device.name) logger.error(meta_info) status += CRITICAL(meta_info) crashreport_info = dict(location = location, core = crashreport) crashreport_list.append(crashreport_info) continue if not core_list: meta_info = "No cores found at location: {}".format( location) logger.info(meta_info) status += OK(meta_info) if not crashreport_list: meta_info = "No crashreports found at location: {}".\ format(location) logger.info(meta_info) status += OK(meta_info) return status
def upload_to_server(device, core_list, crashreport_list, **kwargs): # Init status= OK # Get info port = kwargs['port'] server = kwargs['server'] timeout = kwargs['timeout'] destination = kwargs['destination'] protocol = kwargs['protocol'] username = kwargs['username'] password = kwargs['password'] # Check values are not None for item in kwargs: if item in ['protocol', 'server', 'destination', 'username', 'password'] and \ kwargs[item] is None: meta_info = "Unable to upload core dump - parameters `{}` not provided."\ " Required parameters are: `protocol`, `server`, "\ "`destination`, `username`, `password`".format(item) return ERRORED(meta_info) # preparing the full list to iterate over full_list = core_list + crashreport_list if port: server = '{server}:{port}'.format(server=server, port=port) # Upload each core/crashinfo report found for item in full_list: if 'crashinfo' in item['core']: file_type = 'Crashreport' else: file_type = 'Core' message = "{} upload attempt from {} to {} via server {}".format( file_type, item['location'], destination, server) try: # Check if filetransfer has been added to device before or not if not hasattr(device, 'filetransfer'): device.filetransfer = FileUtils.from_device(device) to_URL = '{protocol}://{address}/{path}'.format( protocol=protocol, address=server, path=destination) from_URL = '{location}//{core_path}'.format( location=item['location'], core_path=item['core']) device.filetransfer.copyfile(device=device, source=from_URL, destination=to_URL) except Exception as e: if 'Tftp operation failed' in e: meta_info = "{} upload operation failed: {}".format(file_type, message) logger.error(meta_info) status += ERRORED(meta_info) else: # Handle exception logger.warning(e) status += ERRORED("Failed: {}".format(message)) meta_info = "{} upload operation passed: {}".format(file_type, message) logger.info(meta_info) status += OK(meta_info) return status