class oVirtEngineBackup(Plugin, RedHatPlugin): short_desc = 'oVirt Engine database backup' packages = ("ovirt-engine-tools-backup", ) plugin_name = "ovirt_engine_backup" option_list = [ PluginOpt('backupdir', default='/var/lib/ovirt-engine-backup', desc='Directory where backups are generated'), PluginOpt('tmpdir', default='/tmp', desc='temp dir to use for engine-backup') ] profiles = ("virt", ) def setup(self): now = datetime.now().strftime("%Y%m%d%H%M%S") backup_filename = self.path_join(self.get_option("backupdir"), "engine-db-backup-%s.tar.gz" % (now)) log_filename = self.path_join(self.get_option("backupdir"), "engine-db-backup-%s.log" % (now)) cmd = ("engine-backup --mode=backup --scope=db" " --file=%s --log=%s --tmpdir=%s") % ( backup_filename, log_filename, self.get_option("tmpdir")) res = self.collect_cmd_output(cmd, suggest_filename="engine-backup") if res['status'] == 0: self.add_copy_spec([backup_filename, log_filename])
class OpenStackDatabase(Plugin): short_desc = 'Openstack Database Information' plugin_name = 'openstack_database' profiles = ('openstack', 'openstack_controller') option_list = [ PluginOpt('dump', default=False, desc='Dump select databases'), PluginOpt('dumpall', default=False, desc='Dump ALL databases') ] databases = [ 'cinder', 'glance', 'heat', 'ironic', 'keystone', 'mistral', '(.*)?neutron', 'nova.*' ] def setup(self): # determine if we're running databases on the host or in a container _db_containers = [ 'galera-bundle-.*', # overcloud 'mysql' # undercloud ] cname = None for container in _db_containers: cname = self.get_container_by_name(container) if cname: break fname = "clustercheck_%s" % cname if cname else None self.add_cmd_output('clustercheck', container=cname, timeout=15, suggest_filename=fname) if self.get_option('dump') or self.get_option('dumpall'): db_dump = self.get_mysql_db_string(container=cname) db_cmd = "mysqldump --opt %s" % db_dump self.add_cmd_output(db_cmd, suggest_filename='mysql_dump.sql', sizelimit=0, container=cname) def get_mysql_db_string(self, container=None): if self.get_option('dumpall'): return '--all-databases' collect = [] dbs = self.exec_cmd('mysql -e "show databases;"', container=container) for db in dbs['output'].splitlines(): if any([re.match(db, reg) for reg in self.databases]): collect.append(db) return '-B ' + ' '.join(d for d in collect)
class MockPlugin(Plugin): option_list = [ PluginOpt("opt", default=None, desc='an option', val_type=str), PluginOpt("opt2", default=False, desc='another option') ] def setup(self): pass
class MockPlugin(Plugin): option_list = [ PluginOpt('baz', default=False), PluginOpt('empty', default=None), PluginOpt('test_option', default='foobar') ] def __init__(self, commons): super(MockPlugin, self).__init__(commons=commons)
class Rpm(Plugin, RedHatPlugin): short_desc = 'RPM Package Manager' plugin_name = 'rpm' profiles = ('system', 'packagemanager') option_list = [ PluginOpt('rpmq', default=True, desc='query package information with rpm -q'), PluginOpt('rpmva', default=False, desc='verify all packages'), PluginOpt('rpmdb', default=False, desc='collect /var/lib/rpm') ] verify_packages = ('rpm', ) def setup(self): self.add_copy_spec("/var/log/rpmpkgs") if self.get_option("rpmq"): rpmq = "rpm --nodigest -qa --qf=%s" # basic installed-rpms nvra = '"%-59{NVRA} %{INSTALLTIME:date}\n"' irpms = "sh -c '%s | sort -V'" % rpmq % nvra self.add_cmd_output(irpms, root_symlink='installed-rpms', tags='installed_rpms') # extended package data extpd = ('"%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\\t' '%{INSTALLTIME:date}\\t%{INSTALLTIME}\\t' '%{VENDOR}\\t%{BUILDHOST}\\t' '%{SIGPGP}\\t%{SIGPGP:pgpsig}\\n"') self.add_cmd_output(rpmq % extpd, suggest_filename='package-data', tags=['installed_rpms', 'package_data']) if self.get_option("rpmva"): self.plugin_timeout = 1000 self.add_cmd_output( "rpm -Va", root_symlink="rpm-Va", timeout=900, priority=100, tags=['rpm_va', 'rpm_V', 'rpm_v', 'insights_rpm_V_packages']) if self.get_option("rpmdb"): self.add_cmd_output("lsof +D /var/lib/rpm", suggest_filename='lsof_D_var_lib_rpm') self.add_copy_spec("/var/lib/rpm") self.add_cmd_output("rpm --showrc")
class Skydive(Plugin, RedHatPlugin): short_desc = 'Skydive network topology and protocol analyzer' plugin_name = "skydive" profiles = ('network', ) files = ('/usr/bin/skydive', '/etc/skydive/skydive.yml') password_warn_text = " (password visible in process listings)" option_list = [ PluginOpt('username', default='', val_type=str, desc='skydive username'), PluginOpt('password', default='', val_type=str, desc='skydive password' + password_warn_text), PluginOpt('analyzer', default='', val_type=str, desc='skydive analyzer address') ] def setup(self): self.add_copy_spec("/etc/skydive/skydive.yml") self.add_copy_spec("/var/log/skydive.log") username = (self.get_option("username") or os.getenv("SKYDIVE_USERNAME", "") or os.getenv("OS_USERNAME", "")) password = (self.get_option("password") or os.getenv("SKYDIVE_PASSWORD", "") or os.getenv("OS_PASSWORD", "")) analyzer = (self.get_option("analyzer") or os.getenv("SKYDIVE_ANALYZER", "localhost:8082")) if not all([username, password, analyzer]): self.soslog.warning("Some or all of the skydive params are not " "set properly. Skydive status command may " " not work as expected.") # Setting all the params in environment variable for # skydive client access. os.environ["SKYDIVE_USERNAME"] = username os.environ["SKYDIVE_PASSWORD"] = password os.environ["SKYDIVE_ANALYZER"] = analyzer status_cmd = "skydive client status" self.add_cmd_output(status_cmd)
class ContainersCommon(Plugin, RedHatPlugin, UbuntuPlugin): short_desc = 'Common container configs under {/etc,/usr/share}/containers' plugin_name = 'containers_common' profiles = ('container', ) packages = ('containers-common', ) option_list = [ PluginOpt('rootlessusers', default='', val_type=str, desc='colon-delimited list of users to collect for') ] def setup(self): self.add_copy_spec([ '/etc/containers/*', '/usr/share/containers/*', '/etc/subuid', '/etc/subgid', ]) users_opt = self.get_option('rootlessusers') users_list = [] if users_opt: users_list = [x for x in users_opt.split(':') if x] user_subcmds = [ 'podman info', 'podman unshare cat /proc/self/uid_map', 'podman unshare cat /proc/self/gid_map', 'podman images', 'podman images --digests', 'podman pod ps', 'podman port --all', 'podman ps', 'podman ps -a', 'podman stats --no-stream --all', 'podman version', 'podman volume ls', 'buildah info', 'buildah unshare cat /proc/self/uid_map', 'buildah unshare cat /proc/self/gid_map', 'buildah containers', 'buildah containers --all', 'buildah images', 'buildah images --all', 'buildah version', ] for user in users_list: # collect user's containers' config self.add_copy_spec('%s/.config/containers/' % (os.path.expanduser('~%s' % user))) # collect user-status self.add_cmd_output('loginctl user-status %s' % user) # collect the user's related commands self.add_cmd_output([ 'machinectl -q shell %s@ /usr/bin/%s' % (user, cmd) for cmd in user_subcmds ], foreground=True)
class Abrt(Plugin, RedHatPlugin): short_desc = 'Automatic Bug Reporting Tool' plugin_name = "abrt" profiles = ('system', 'debug') packages = ('abrt-cli', 'abrt-gui', 'abrt') files = ('/var/spool/abrt',) option_list = [ PluginOpt("detailed", default=False, desc="collect detailed information for every report") ] def setup(self): self.add_cmd_output("abrt-cli status", tags=["abrt_status", "insights_abrt_status_bare"]) abrt_list = self.collect_cmd_output("abrt-cli list") if self.get_option("detailed") and abrt_list['status'] == 0: for line in abrt_list["output"].splitlines(): if line.startswith("Directory"): self.add_cmd_output("abrt-cli info -d '%s'" % line.split()[1]) self.add_copy_spec([ "/etc/abrt/abrt.conf", "/etc/abrt/abrt-action-save-package-data.conf", "/etc/abrt/plugins" ])
class Libraries(Plugin, IndependentPlugin): short_desc = 'Dynamic shared libraries' plugin_name = 'libraries' profiles = ('system', ) option_list = [ PluginOpt('ldconfigv', default=False, desc='collect verbose ldconfig output') ] def setup(self): self.add_copy_spec(["/etc/ld.so.conf", "/etc/ld.so.conf.d"]) if self.get_option("ldconfigv"): self.add_cmd_output("ldconfig -v -N -X") self.add_env_var(['PATH', 'LD_LIBRARY_PATH', 'LD_PRELOAD']) ldconfig = self.collect_cmd_output("ldconfig -p -N -X") if ldconfig['status'] == 0: # Collect library directories from ldconfig's cache dirs = set() for lib in ldconfig['output'].splitlines(): s = lib.split(" => ", 2) if len(s) != 2: continue dirs.add(s[1].rsplit('/', 1)[0]) if dirs: self.add_cmd_output("ls -lanH %s" % " ".join(dirs), suggest_filename="ld_so_cache")
class Veritas(Plugin, RedHatPlugin): short_desc = 'Veritas software' plugin_name = 'veritas' profiles = ('cluster', 'storage') # Information about VRTSexplorer obtained from # http://seer.entsupport.symantec.com/docs/243150.htm option_list = [ PluginOpt('script', default='/opt/VRTSspt/VRTSexplorer', desc='Path to VRTSexploer script') ] def check_enabled(self): return self.path_isfile(self.get_option("script")) def setup(self): """ interface with vrtsexplorer to capture veritas related data """ r = self.exec_cmd(self.get_option("script")) if r['status'] == 0: tarfile = "" for line in r['output']: line = line.strip() tarfile = self.do_regex_find_all(r"ftp (.*tar.gz)", line) if len(tarfile) == 1: self.add_copy_spec(tarfile[0])
class Dlm(Plugin, IndependentPlugin): short_desc = 'DLM (Distributed lock manager)' plugin_name = "dlm" profiles = ("cluster", ) packages = ("cman", "dlm", "pacemaker") option_list = [ PluginOpt('lockdump', default=False, desc='capture lock dumps for DLM') ] def setup(self): self.add_copy_spec(["/etc/sysconfig/dlm"]) self.add_cmd_output( ["dlm_tool log_plock", "dlm_tool dump", "dlm_tool ls -n"]) if self.get_option("lockdump"): self.do_lockdump() def do_lockdump(self): dlm_tool = "dlm_tool ls" result = self.collect_cmd_output(dlm_tool) if result["status"] != 0: return lock_exp = r'^name\s+([^\s]+)$' lock_re = re.compile(lock_exp, re.MULTILINE) for lockspace in lock_re.findall(result["output"]): self.add_cmd_output("dlm_tool lockdebug -svw '%s'" % lockspace, suggest_filename="dlm_locks_%s" % lockspace)
class AtomicHost(Plugin, RedHatPlugin): short_desc = 'Atomic Host' plugin_name = "atomichost" profiles = ('container', ) option_list = [ PluginOpt("info", default=False, desc="gather atomic info for each image") ] def check_enabled(self): return self.policy.in_container() def setup(self): self.add_cmd_output("atomic host status") if self.get_option('info'): # The 'docker images' command may include duplicate rows of # output (repeated "IMAGE ID" values). Use a set to filter # these out and only obtain 'docker info' data once per image # identifier. images = self.exec_cmd("docker images -q") for image in set(images['output'].splitlines()): self.add_cmd_output("atomic info {0}".format(image))
class Npm(Plugin, IndependentPlugin): short_desc = 'Information from available npm modules' plugin_name = 'npm' profiles = ('system', ) option_list = [ PluginOpt('project_path', default='', val_type=str, desc='Collect npm modules of project at this path') ] # in Fedora, Debian, Ubuntu and Suse the package is called npm packages = ('npm', ) def _get_npm_output(self, cmd, filename, working_directory=None): # stderr output is already part of the json, key "problems" self.add_cmd_output(cmd, suggest_filename=filename, stderr=False, runat=working_directory) def setup(self): if self.get_option("project_path"): project_path = os.path.abspath( os.path.expanduser(self.get_option("project_path"))) self._get_npm_output("npm ls --json", "npm_ls_project", working_directory=project_path) self._get_npm_output("npm config list -l", "npm_config_list_project", working_directory=project_path) self._get_npm_output("npm ls -g --json", "npm_ls_global") self._get_npm_output("npm config list -l", "npm_config_list_global")
class Dmraid(Plugin, IndependentPlugin): short_desc = 'dmraid software RAID' plugin_name = 'dmraid' profiles = ('hardware', 'storage') packages = ('dmraid',) option_list = [ PluginOpt('metadata', default=False, desc='collect dmraid metadata') ] # V - {-V/--version} # b - {-b|--block_devices} # r - {-r|--raid_devices} # s - {-s|--sets} # t - [-t|--test] # a - {-a|--activate} {y|n|yes|no} # D - [-D|--dump_metadata] dmraid_options = ['V', 'b', 'r', 's', 'tay'] def setup(self): for opt in self.dmraid_options: self.add_cmd_output("dmraid -%s" % (opt,)) if self.get_option("metadata"): metadata_path = self.get_cmd_output_path("metadata") self.add_cmd_output("dmraid -rD", runat=metadata_path, chroot=self.tmp_in_sysroot())
class Libreswan(Plugin, IndependentPlugin): short_desc = 'Libreswan IPsec' plugin_name = 'libreswan' profiles = ('network', 'security', 'openshift') option_list = [ PluginOpt('ipsec-barf', default=False, desc='collect ipsec barf output') ] files = ('/etc/ipsec.conf',) packages = ('libreswan', 'openswan') def setup(self): self.add_copy_spec([ "/etc/ipsec.conf", "/etc/ipsec.d", "/proc/net/xfrm_stat" ]) # although this is 'verification' it's normally a very quick # operation so is not conditional on --verify self.add_cmd_output([ 'ipsec verify', 'ipsec whack --status', 'ipsec whack --listall', 'certutil -L -d sql:/etc/ipsec.d' ]) # may load xfrm kmods xfrm_pred = SoSPredicate(self, kmods=['xfrm_user', 'xfrm_algo'], required={'kmods': 'all'}) self.add_cmd_output([ 'ip xfrm policy', 'ip xfrm state' ], pred=xfrm_pred) if self.get_option("ipsec-barf"): self.add_cmd_output("ipsec barf") self.add_forbidden_path([ '/etc/ipsec.secrets', '/etc/ipsec.secrets.d', '/etc/ipsec.d/*.db', '/etc/ipsec.d/*.secrets' ]) def postproc(self): # Remove any sensitive data. # "ip xfrm state" output contains encryption or authentication private # keys: xfrm_state_regexp = r'(aead|auth|auth-trunc|enc)' \ r'(\s.*\s)(0x[0-9a-f]+)' self.do_cmd_output_sub("state", xfrm_state_regexp, r"\1\2********") if self.get_option("ipsec-barf"): self.do_cmd_output_sub("barf", xfrm_state_regexp, r"\1\2********")
class Pxe(Plugin): short_desc = 'PXE service' plugin_name = "pxe" profiles = ('sysmgmt', 'network') option_list = [ PluginOpt('tftpboot', default=False, desc='collect content from tftpboot path') ]
class Psacct(Plugin): short_desc = 'Process accounting information' plugin_name = "psacct" profiles = ('system', ) option_list = [ PluginOpt('all', default=False, desc='collect all accounting files') ] packages = ("psacct", )
class RedHatPython(Python, RedHatPlugin): packages = ('python', 'python36', 'python2', 'python3', 'platform-python') option_list = [ PluginOpt('hashes', default=False, desc='collect hashes for all python files') ] def setup(self): self.add_cmd_output(['python2 -V', 'python3 -V']) if isinstance(self.policy, RHELPolicy) and \ self.policy.dist_version() > 7: self.python_version = "/usr/libexec/platform-python -V" super(RedHatPython, self).setup() if self.get_option('hashes'): digests = {'digests': []} py_paths = [ '/usr/lib', '/usr/lib64', '/usr/local/lib', '/usr/local/lib64' ] for py_path in py_paths: for root, _, files in os.walk(py_path): for file_ in files: filepath = os.path.join(root, file_) if filepath.endswith('.py'): try: with open(filepath, 'rb') as f: digest = hashlib.sha256() chunk = 1024 while True: data = f.read(chunk) if data: digest.update(data) else: break digest = digest.hexdigest() digests['digests'].append({ 'filepath': filepath, 'sha256': digest }) except IOError: self._log_error( "Unable to read python file at %s" % filepath) self.add_string_as_file(json.dumps(digests), 'digests.json', plug_dir=True)
class SMcli(Plugin, IndependentPlugin): short_desc = 'SANtricity storage device' plugin_name = 'smclient' plugin_timeout = 900 profiles = ( 'system', 'storage', 'hardware', ) packages = ('SMclient', ) option_list = [ PluginOpt('debug', default=False, desc='capture support debug data') ] def setup(self): subcmds = [ "show storagearray;", "show storagearray connections;", "show storagearray healthstatus;", ] ssnames = [] # Get list of storage arrays result = self.collect_cmd_output('SMcli -d -S') if result['status'] == 0: for line in result['output'].splitlines(): if 'localhost' in line: ssnames.append(line.split()[0]) # Collect status of each storage array for ssname in ssnames: self.add_cmd_output([ "SMcli localhost -n %s -c '%s'" % (ssname, subcmd) for subcmd in subcmds ]) if self.get_option("debug"): self.do_debug(ssnames) def do_debug(self, ssnames): logpath = self.get_cmd_output_path(make=False) cmd = 'SMcli localhost -n' subcmd = 'save storageArray supportData file=' for ssname in ssnames: self.add_cmd_output("%s %s -c '%s\"support-%s\";'" % (cmd, ssname, subcmd, ssname), runat=logpath, timeout=450)
class Navicli(Plugin, RedHatPlugin): short_desc = 'EMC Navicli' plugin_name = 'navicli' profiles = ('storage', 'hardware') option_list = [ PluginOpt('ipaddrs', default='', val_type=str, desc='space-delimited list of CLARiiON IP addresses') ] def check_enabled(self): return is_executable("navicli") def get_navicli_config(self): """ EMC Navisphere Host Agent NAVICLI specific information - files """ self.add_copy_spec([ "/etc/Navisphere/agent.config", "/etc/Navisphere/Navimon.cfg", "/etc/Navisphere/Quietmode.cfg", "/etc/Navisphere/messages/[a-z]*", "/etc/Navisphere/log/[a-z]*" ]) def get_navicli_SP_info(self, SP_address): """ EMC Navisphere Host Agent NAVICLI specific information - CLARiiON - commands """ self.add_cmd_output([ "navicli -h %s getall" % SP_address, "navicli -h %s getsptime -spa" % SP_address, "navicli -h %s getsptime -spb" % SP_address, "navicli -h %s getlog" % SP_address, "navicli -h %s getdisk" % SP_address, "navicli -h %s getcache" % SP_address, "navicli -h %s getlun" % SP_address, "navicli -h %s getlun -rg -type -default -owner -crus " "-capacity" % SP_address, "navicli -h %s lunmapinfo" % SP_address, "navicli -h %s getcrus" % SP_address, "navicli -h %s port -list -all" % SP_address, "navicli -h %s storagegroup -list" % SP_address, "navicli -h %s spportspeed -get" % SP_address ]) def setup(self): self.get_navicli_config() for ip in set(self.get_option("ipaddrs").split()): if self.exec_cmd("navicli -h %s getsptime" % (ip))['status'] == 0: self.get_navicli_SP_info(ip)
class PercCLI(Plugin, IndependentPlugin): short_desc = 'PowerEdge RAID Controller management' plugin_name = 'perccli' profiles = ( 'system', 'storage', 'hardware', ) packages = ('perccli', ) option_list = [ PluginOpt('json', default=False, desc='collect data in JSON format') ] def setup(self): cmd = '/opt/MegaRAID/perccli/perccli64' subcmds = [ 'show ctrlcount', '/call show AliLog', '/call show all', '/call show termlog', '/call/bbu show all', '/call/cv show all', '/call/dall show', '/call/eall show all', '/call/eall/sall show all', '/call/sall show all', '/call/vall show all', ] json = ' J' if self.get_option('json') else '' logpath = self.get_cmd_output_path() for subcmd in subcmds: self.add_cmd_output("%s %s%s" % (cmd, subcmd, json), suggest_filename="perccli64_%s%s" % (subcmd, json), runat=logpath) # /call show events need 'file=' option to get adapter info like below # "Adapter: # - Number of Events: xxx". subcmd = '/call show events' self.add_cmd_output("%s %s file=/dev/stdout%s" % (cmd, subcmd, json), suggest_filename="perccli64_%s%s" % (subcmd, json), runat=logpath)
class SELinux(Plugin, RedHatPlugin): short_desc = 'SELinux access control' plugin_name = 'selinux' profiles = ('container', 'system', 'security', 'openshift') option_list = [ PluginOpt('fixfiles', default=False, desc='collect incorrect file context labels') ] packages = ('libselinux', ) def setup(self): self.add_copy_spec(['/etc/sestatus.conf', '/etc/selinux']) # capture this with a higher log limit since #2035 may limit this # collection self.add_copy_spec('/var/lib/selinux', sizelimit=50) self.add_cmd_output('sestatus') state = self.exec_cmd('getenforce')['output'] if state != 'Disabled': self.add_cmd_output([ 'ps auxZww', 'sestatus -v', 'sestatus -b', 'selinuxdefcon root', 'selinuxconlist root', 'selinuxexeccon /bin/passwd', 'semanage -o' # deprecated, may disappear at some point ]) subcmds = [ 'fcontext', 'user', 'port', 'login', 'node', 'interface', 'module' ] for subcmd in subcmds: self.add_cmd_output("semanage %s -l" % subcmd) if self.get_option('fixfiles'): self.add_cmd_output("restorecon -Rvn /", stderr=False, priority=100)
class Services(Plugin): short_desc = 'System services' plugin_name = "services" profiles = ('system', 'boot') option_list = [ PluginOpt('servicestatus', default=False, desc='collect status of all running services') ] def setup(self): self.add_copy_spec(["/etc/inittab", "/etc/rc.d", "/etc/rc.local"]) if self.get_option('servicestatus'): self.add_cmd_output("service --status-all") self.add_cmd_output(["/sbin/runlevel", "ls /var/lock/subsys"])
class Fibrechannel(Plugin, RedHatPlugin): short_desc = 'Collect information on fibrechannel devices' plugin_name = 'fibrechannel' profiles = ('hardware', 'storage', 'system') files = ('/sys/class/fc_host', '/sys/class/fc_remote_ports') option_list = [ PluginOpt('debug', default=True, desc='collect debugging logs') ] # vendor specific debug paths debug_paths = ['/sys/kernel/debug/qla2*/'] def setup(self): self.add_blockdev_cmd("udevadm info -a %(dev)s", devices='fibre') if self.get_option('debug'): self.add_copy_spec(self.debug_paths)
class Gfs2(Plugin, IndependentPlugin): short_desc = 'GFS2 (Global Filesystem 2)' plugin_name = "gfs2" profiles = ("cluster", ) packages = ("gfs2-utils", ) option_list = [ PluginOpt('lockdump', default=False, desc='collect lock dumps for all GFS2 filesystems') ] def setup(self): self.add_copy_spec(["/sys/fs/gfs2/*/withdraw"]) self.add_cmd_output(["gfs_control ls -n", "gfs_control dump"]) if self.get_option("gfs2lockdump"): self.add_copy_spec("/sys/kernel/debug/gfs2/*")
class Sar( Plugin, ): short_desc = 'System Activity Reporter' plugin_name = 'sar' profiles = ('system', 'performance') packages = ('sysstat', ) sa_path = '/var/log/sa' option_list = [ PluginOpt('all_sar', default=False, desc="gather all system activity records") ] def setup(self): self.add_copy_spec(self.path_join(self.sa_path, '*'), sizelimit=0 if self.get_option("all_sar") else None, tailit=False) try: dir_list = self.listdir(self.sa_path) except OSError: self._log_warn("sar: could not list %s" % self.sa_path) return sa_regex = re.compile(r"sa[\d]+") # find all the sa files that don't have an existing sar file # there are two possible formats for sar files # saDD, the default one where DD is the day of the month # saYYYYMMDD, which is the format when specifying -D # as option for sadc for fname in dir_list: if sa_regex.match(fname): sa_data_path = self.path_join(self.sa_path, fname) sar_filename = 'sar' + fname[2:] if sar_filename not in dir_list: sar_cmd = 'sh -c "sar -A -f %s"' % sa_data_path self.add_cmd_output(sar_cmd, sar_filename) sadf_cmd = "sadf -x -- -A %s" % sa_data_path self.add_cmd_output(sadf_cmd, "%s.xml" % fname)
class Conntrack(Plugin, IndependentPlugin): short_desc = 'conntrack - netfilter connection tracking' plugin_name = 'conntrack' profiles = ('network', 'cluster') packages = ('conntrack-tools', 'conntrack', 'conntrackd') option_list = [ PluginOpt("namespaces", default=None, val_type=int, desc="Number of namespaces to collect, 0 for unlimited"), ] def setup(self): # Collect info from conntrackd self.add_copy_spec("/etc/conntrackd/conntrackd.conf") self.add_cmd_output([ "conntrackd -s network", "conntrackd -s cache", "conntrackd -s runtime", "conntrackd -s link", "conntrackd -s rsqueue", "conntrackd -s queue", "conntrackd -s ct", "conntrackd -s expect", ]) # Collect info from conntrack self.add_cmd_output([ "conntrack -L -o extended", "conntrack -S", ]) # Capture additional data from namespaces; each command is run # per-namespace cmd_prefix = "ip netns exec " nsps = self.get_option('namespaces') for namespace in self.get_network_namespaces(ns_max=nsps): ns_cmd_prefix = cmd_prefix + namespace + " " self.add_cmd_output(ns_cmd_prefix + "conntrack -L -o extended") self.add_cmd_output(ns_cmd_prefix + "conntrack -S")
class DellRAC(Plugin, IndependentPlugin): short_desc = 'Dell Remote Access Controller Administration' plugin_name = 'dellrac' profiles = ( 'system', 'storage', 'hardware', ) packages = ('srvadmin-idracadm7', ) option_list = [ PluginOpt('debug', default=False, desc='capture support assist data') ] racadm = '/opt/dell/srvadmin/bin/idracadm7' prefix = 'idracadm7' def setup(self): for subcmd in ['getniccfg', 'getsysinfo']: self.add_cmd_output('%s %s' % (self.racadm, subcmd), suggest_filename='%s_%s' % (self.prefix, subcmd)) if self.get_option("debug"): self.do_debug() def do_debug(self): # ensure the sos_commands/dellrac directory does exist in either case # as we will need to run the command at that dir, and also ensure # logpath is properly populated in either case as well try: logpath = self.get_cmd_output_path() except FileExistsError: logpath = self.get_cmd_output_path(make=False) subcmd = 'supportassist collect -f' self.add_cmd_output('%s %s support.zip' % (self.racadm, subcmd), runat=logpath, suggest_filename='%s_%s' % (self.prefix, subcmd))
class Hpssm(Plugin, IndependentPlugin): short_desc = 'HP Smart Storage Management' plugin_name = 'hpssm' profiles = ( 'system', 'storage', 'hardware', ) packages = ( 'ilorest', 'ssacli', 'ssaducli', ) option_list = [ PluginOpt('debug', default=False, desc='capture debug data') ] def setup(self): self.add_cmd_output([ 'ssacli ctrl slot=0 array all show detail', 'ssacli ctrl slot=0 ld all show detail', 'ssacli ctrl slot=0 pd all show detail', 'ssacli ctrl slot=0 show detail', ]) logpath = self.get_cmd_output_path() self.add_cmd_output('ssaducli -v -adu -f %s/adu-log.zip' % logpath, suggest_filename='ssaducli_-v_-adu.log') if self.get_option("debug"): self.do_debug(logpath) def do_debug(self, logpath): self.add_cmd_output('ilorest serverlogs --selectlog=AHS', runat=logpath, suggest_filename='ilorest.log')
class Boot(Plugin, IndependentPlugin): short_desc = 'Bootloader information' plugin_name = 'boot' profiles = ('system', 'boot') packages = ('grub', 'grub2', 'grub-common', 'grub2-common', 'zipl') option_list = [ PluginOpt("all-images", default=False, desc="collect lsinitrd for all images") ] def setup(self): self.add_copy_spec([ # legacy / special purpose bootloader configs "/etc/milo.conf", "/etc/silo.conf", "/boot/efi/efi/redhat/elilo.conf", "/etc/yaboot.conf", "/boot/yaboot.conf" ]) self.add_cmd_output("ls -lanR /boot", tags="insights_ls_boot") self.add_cmd_output([ "lsinitrd", "ls -lanR /sys/firmware", ]) self.add_cmd_output(["efibootmgr -v", "mokutil --sb-state"]) if self.get_option("all-images"): for image in glob('/boot/initr*.img'): if image[-9:] == "kdump.img": continue self.add_cmd_output("lsinitrd %s" % image, priority=100)