def setup(self): if self.get_option('jbosstrace') and self.is_installed('ovirt-engine'): engine_pattern = r"^ovirt-engine\ -server.*jboss-modules.jar" pgrep = "pgrep -f '%s'" % engine_pattern lines = self.call_ext_prog(pgrep)['output'].splitlines() engine_pids = [int(x) for x in lines] if not engine_pids: self.soslog.error('Unable to get ovirt-engine pid') self.add_alert('Unable to get ovirt-engine pid') for pid in engine_pids: try: # backtrace written to '/var/log/ovirt-engine/console.log os.kill(pid, signal.SIGQUIT) except OSError as e: self.soslog.error('Unable to send signal to %d' % pid, e) self.add_forbidden_path( ['/etc/ovirt-engine/.pgpass', '/etc/rhevm/.pgpass']) if not self.get_option('heapdump'): self.add_forbidden_path('/var/log/ovirt-engine/dump') self.add_cmd_output('ls -l /var/log/ovirt-engine/dump/') # Copy all engine tunables and domain information self.add_cmd_output("engine-config --all") # 3.x line uses engine-manage-domains, 4.x uses ovirt-aaa-jdbc-tool manage_domains = 'engine-manage-domains' extensions_tool = 'ovirt-engine-extensions-tool' jdbc_tool = 'ovirt-aaa-jdbc-tool' if is_executable(manage_domains): self.add_cmd_output('%s list' % manage_domains) if is_executable(extensions_tool): self.add_cmd_output('%s info list-extensions' % extensions_tool) if is_executable('ovirt-aaa-jdbc-tool'): subcmds = [ 'query --what=user', 'query --what=group', 'settings show' ] self.add_cmd_output(['%s %s' % (jdbc_tool, sc) for sc in subcmds]) # Copy engine config files. self.add_copy_spec([ "/etc/ovirt-engine", "/etc/rhevm/", "/etc/ovirt-engine-dwh", "/etc/ovirt-engine-reports", "/var/log/ovirt-engine", "/var/log/ovirt-engine-dwh", "/var/log/ovirt-engine-reports", "/var/log/ovirt-scheduler-proxy", "/var/log/rhevm", "/etc/sysconfig/ovirt-engine", "/usr/share/ovirt-engine/conf", "/var/log/ovirt-guest-agent", "/var/lib/ovirt-engine/setup-history.txt", "/var/lib/ovirt-engine/setup/answers", "/var/lib/ovirt-engine/external_truststore", "/var/tmp/ovirt-engine/config", "/var/lib/ovirt-engine/jboss_runtime/config", "/var/lib/ovirt-engine-reports/jboss_runtime/config" ])
def _compress(self): methods = [] # Make sure that valid compression commands exist. for method in ['xz', 'bzip2', 'gzip']: if is_executable(method): methods.append(method) else: self.log_error("\"%s\" command not found." % method) if self.method in methods: methods = [self.method] exp_msg = "No compression utilities found." last_error = Exception(exp_msg) for cmd in methods: suffix = "." + cmd.replace('ip', '') # use fast compression if using xz or bz2 if cmd != "gzip": cmd = "%s -1" % cmd try: r = sos_get_command_output("%s %s" % (cmd, self.name()), timeout=0) if r['status']: self.log_info(r['output']) self._suffix += suffix return self.name() except Exception as e: last_error = e raise last_error
def _compress(self): methods = [] # Make sure that valid compression commands exist. for method in ['xz', 'bzip2', 'gzip']: if is_executable(method): methods.append(method) else: self.log_info("\"%s\" compression method unavailable" % method) if self.method in methods: methods = [self.method] exp_msg = "No compression utilities found." last_error = Exception(exp_msg) for cmd in methods: suffix = "." + cmd.replace('ip', '') cmd = self._policy.get_cmd_for_compress_method(cmd, self._threads) try: exec_cmd = "%s %s" % (cmd, self.name()) r = sos_get_command_output(exec_cmd, stderr=True, timeout=0) if r['status']: self.log_error(r['output']) raise Exception("%s exited with %s" % (exec_cmd, r['status'])) self._suffix += suffix return self.name() except Exception as e: last_error = e raise last_error
def _compress(self): methods = [] # Make sure that valid compression commands exist. for method in ['xz', 'bzip2', 'gzip']: if is_executable(method): methods.append(method) else: self.log_error("\"%s\" command not found." % method) if self.method in methods: methods = [self.method] exp_msg = "No compression utilities found." last_error = Exception(exp_msg) for cmd in methods: suffix = "." + cmd.replace('ip', '') # use fast compression if using xz or bz2 if cmd != "gzip": cmd = "%s -1" % cmd try: r = sos_get_command_output("%s %s" % (cmd, self.name())) if r['status']: self.log_info(r['output']) self._suffix += suffix return self.name() except Exception as e: last_error = e raise last_error
def check_enabled(self): """This method will be used to verify that a plugin should execute given the condition of the underlying environment. The default implementation will return True if none of class.files, class.packages, nor class.commands is specified. If any of these is specified the plugin will check for the existence of any of the corresponding paths, packages or commands and return True if any are present. For plugins with more complex enablement checks this method may be overridden. """ # some files or packages have been specified for this package if any([self.files, self.packages, self.commands]): if isinstance(self.files, six.string_types): self.files = [self.files] if isinstance(self.packages, six.string_types): self.packages = [self.packages] if isinstance(self.commands, six.string_types): self.commands = [self.commands] return (any(os.path.exists(fname) for fname in self.files) or any(self.is_installed(pkg) for pkg in self.packages) or any(is_executable(cmd) for cmd in self.commands)) return True
def check_is_active(self): # the daemon must be running if (is_executable('docker') and self.policy.init_system.is_running('docker')): self.active = True return True return False
def _compress(self): methods = [] # Make sure that valid compression commands exist. for method in ['xz', 'gzip']: if is_executable(method): methods.append(method) else: self.log_info("\"%s\" compression method unavailable" % method) if self.method in methods: methods = [self.method] exp_msg = "No compression utilities found." last_error = Exception(exp_msg) for cmd in methods: suffix = "." + cmd.replace('ip', '') cmd = self._policy.get_cmd_for_compress_method(cmd, self._threads) try: exec_cmd = "%s %s" % (cmd, self.name()) r = sos_get_command_output(exec_cmd, stderr=True, timeout=0) if r['status']: self.log_error(r['output']) raise Exception("%s exited with %s" % (exec_cmd, r['status'])) self._suffix += suffix return self.name() except Exception as e: last_error = e raise last_error
def check_is_active(self, sysroot=None): # the daemon must be running if (is_executable('docker', sysroot) and (self.policy.init_system.is_running('docker') or self.policy.init_system.is_running('snap.docker.dockerd'))): self.active = True return True return False
def _files_pkgs_or_cmds_present(self, files, packages, commands): kernel_mods = self.policy.lsmod() def have_kmod(kmod): return kmod in kernel_mods return (any(os.path.exists(fname) for fname in files) or any(self.is_installed(pkg) for pkg in packages) or any(is_executable(cmd) for cmd in commands) or any(have_kmod(kmod) for kmod in self.kernel_mods))
def check_is_active(self): """Check to see if the container runtime is both present AND active. Active in this sense means that the runtime can be used to glean information about the runtime itself and containers that are running. """ if is_executable(self.binary): self.active = True return True return False
def set_transport_type(self): if is_executable('oc') or self.opts.transport == 'oc': return 'oc' self.log_info("Local installation of 'oc' not found or is not " "correctly configured. Will use ControlPersist.") self.ui_log.warn( "Preferred transport 'oc' not available, will fallback to SSH.") if not self.opts.batch: input("Press ENTER to continue connecting with SSH, or Ctrl+C to" "abort.") return 'control_persist'
def _check_plugin_triggers(self, files, packages, commands, services): kernel_mods = self.policy.lsmod() def have_kmod(kmod): return kmod in kernel_mods return (any(os.path.exists(fname) for fname in files) or any(self.is_installed(pkg) for pkg in packages) or any(is_executable(cmd) for cmd in commands) or any(have_kmod(kmod) for kmod in self.kernel_mods) or any(self.is_service(svc) for svc in services))
def setup(self): self.add_file_tags({ '/usr/share/foreman/.ssh/ssh_config': 'ssh_foreman_config', }) # if we are on RHEL7 with scl, wrap some Puma commands by # scl enable tfm 'command' if self.policy.dist_version() == 7 and is_executable('scl'): self.pumactl = "scl enable tfm '%s'" % self.pumactl super(RedHatForeman, self).setup()
def setup(self): # binary depends on particular package, both require hwloc-libs one # hwloc-gui provides lstopo command # hwloc provides lstopo-no-graphics command if is_executable("lstopo"): cmd = "lstopo" else: cmd = "lstopo-no-graphics" self.add_cmd_output("%s --whole-io --of console" % cmd, suggest_filename="lstopo.txt") self.add_cmd_output("%s --whole-io --of xml" % cmd, suggest_filename="lstopo.xml")
def check_is_active(self): """Check to see if the container runtime is both present AND active. Active in this sense means that the runtime can be used to glean information about the runtime itself and containers that are running. :returns: ``True`` if the runtime is active, else ``False`` :rtype: ``bool`` """ if is_executable(self.binary, self.policy.sysroot): self.active = True return True return False
def _connect(self, password): # the oc binary must be _locally_ available for this to work if not is_executable('oc'): return False # deploy the debug container we'll exec into podconf = self.get_node_pod_config() self.pod_name = podconf['metadata']['name'] fd, self.pod_tmp_conf = tempfile.mkstemp(dir=self.tmpdir) with open(fd, 'w') as cfile: json.dump(podconf, cfile) self.log_debug("Starting sos collector container '%s'" % self.pod_name) # this specifically does not need to run with a project definition out = sos_get_command_output( "oc create -f %s" % self.pod_tmp_conf ) if (out['status'] != 0 or "pod/%s created" % self.pod_name not in out['output']): self.log_error("Unable to deploy sos collect pod") self.log_debug("Debug pod deployment failed: %s" % out['output']) return False self.log_debug("Pod '%s' successfully deployed, waiting for pod to " "enter ready state" % self.pod_name) # wait for the pod to report as running try: up = self.run_oc("wait --for=condition=Ready pod/%s --timeout=30s" % self.pod_name, # timeout is for local safety, not oc timeout=40) if not up['status'] == 0: self.log_error("Pod not available after 30 seconds") return False except SoSTimeoutError: self.log_error("Timeout while polling for pod readiness") return False except Exception as err: self.log_error("Error while waiting for pod to be ready: %s" % err) return False return True
def setup(self): self.add_cmd_output([ "ubuntu-security-status --thirdparty --unavailable", "hwe-support-status --verbose", ]) if self.is_installed('ubuntu-advantage-tools'): if is_executable('ua'): ua_tools_status = 'ua status' else: ua_tools_status = 'ubuntu-advantage status' self.add_cmd_output(ua_tools_status) if not self.get_option("all_logs"): self.add_copy_spec([ "/var/log/ubuntu-advantage.log", "/var/log/ubuntu-advantage.log.1", "/var/log/ubuntu-advantage.log.2*", ]) else: self.add_copy_spec("/var/log/ubuntu-advantage.log*")
def setup(self): zvm_pred = SoSPredicate(self, kmods=['vmcp', 'cpint']) self.set_cmd_predicate(zvm_pred) self.vm_cmd = None for cmd in self.commands: if is_executable(cmd): self.vm_cmd = cmd break # vm commands from dbginfo.sh vm_cmds = [ "q userid", "q users", "q privclass", "q cplevel", "q cpservice", "q cpprot user", "q specex", "q ssi", "q cpus", "q srm", "q vtod", "q time full", "q timezone", "q loaddev", "q v osa", "q v dasd", "q v crypto", "q v fcp", "q v pav", "q v sw", "q v st", "q v nic", "q st", "q xstore", "q xstore user system", "q sxspages", "q vmlan", "q vswitch", "q vswitch details", "q vswitch access", "q vswitch active", "q vswitch accesslist", "q vswitch promiscuous", "q vswitch controller", "q port group all active details", "q set", "q comm", "q controller all", "q fcp", "q frames", "q lan", "q lan all details", "q lan all access", "q memassist", "q nic", "q pav", "q proc", "q proc topology", "q mt", "q qioass", "q spaces", "q swch all", "q trace", "q mdcache", "q alloc page", "q alloc spool", "q dump", "q dumpdev", "q pcifunction", "q vmrelocate", "ind load", "ind sp", "ind user" ] vm_id_out = self.collect_cmd_output("%s q userid" % self.vm_cmd) if vm_id_out['status'] == 0: vm_id = vm_id_out['output'].split()[0] vm_cmds.extend(["q reorder %s" % vm_id, "q quickdsp %s" % vm_id]) self.add_cmd_output( ["%s %s" % (self.vm_cmd, vcmd) for vcmd in vm_cmds])
def test_nonexe_file(self): path = os.path.join(TEST_DIR, 'utility_tests.py') self.assertFalse(is_executable(path))
def test_exe_file_abs_path(self): self.assertTrue(is_executable("/usr/bin/timeout"))
def test_exe_file(self): path = os.path.join(TEST_DIR, 'test_exe.py') self.assertTrue(is_executable(path))
def check_enabled(self): arch = self.policy.get_arch() return "ppc64" in arch and is_executable("iprconfig")
def test_exe_file(self): self.assertTrue(is_executable('true'))
def check_enabled(self): return is_executable("navicli")
def _files_pkgs_or_cmds_present(self, files, packages, commands): return (any(os.path.exists(fname) for fname in files) or any(self.is_installed(pkg) for pkg in packages) or any(is_executable(cmd) for cmd in commands))
def upload_sftp(self, user=None, password=None): """Attempts to upload the archive to an SFTP location. Due to the lack of well maintained, secure, and generally widespread python libraries for SFTP, sos will shell-out to the system's local ssh installation in order to handle these uploads. Do not override this method with one that uses python-paramiko, as the upstream sos team will reject any PR that includes that dependency. """ # if we somehow don't have sftp available locally, fail early if not is_executable('sftp'): raise Exception('SFTP is not locally supported') # soft dependency on python3-pexpect, which we need to use to control # sftp login since as of this writing we don't have a viable solution # via ssh python bindings commonly available among downstreams try: import pexpect except ImportError: raise Exception('SFTP upload requires python3-pexpect, which is ' 'not currently installed') sftp_connected = False if not user: user = self.get_upload_user() if not password: password = self.get_upload_password() # need to strip the protocol prefix here sftp_url = self.get_upload_url().replace('sftp://', '') sftp_cmd = "sftp -oStrictHostKeyChecking=no %s@%s" % (user, sftp_url) ret = pexpect.spawn(sftp_cmd, encoding='utf-8') sftp_expects = [ u'sftp>', u'password:'******'Connection refused', pexpect.TIMEOUT, pexpect.EOF ] idx = ret.expect(sftp_expects, timeout=15) if idx == 0: sftp_connected = True elif idx == 1: ret.sendline(password) pass_expects = [ u'sftp>', u'Permission denied', pexpect.TIMEOUT, pexpect.EOF ] sftp_connected = ret.expect(pass_expects, timeout=10) == 0 if not sftp_connected: ret.close() raise Exception("Incorrect username or password for %s" % self.get_upload_url_string()) elif idx == 2: raise Exception("Connection refused by %s. Incorrect port?" % self.get_upload_url_string()) elif idx == 3: raise Exception("Timeout hit trying to connect to %s" % self.get_upload_url_string()) elif idx == 4: raise Exception("Unexpected error trying to connect to sftp: %s" % ret.before) if not sftp_connected: ret.close() raise Exception("Unable to connect via SFTP to %s" % self.get_upload_url_string()) put_cmd = 'put %s %s' % (self.upload_archive_name, self._get_sftp_upload_name()) ret.sendline(put_cmd) put_expects = [ u'100%', pexpect.TIMEOUT, pexpect.EOF ] put_success = ret.expect(put_expects, timeout=180) if put_success == 0: ret.sendline('bye') return True elif put_success == 1: raise Exception("Timeout expired while uploading") elif put_success == 2: raise Exception("Unknown error during upload: %s" % ret.before) else: raise Exception("Unexpected response from server: %s" % ret.before)
def check_enabled(self): return is_executable("/usr/symcli/bin/symcli")
def setup(self): if self.get_option('jbosstrace') and self.is_installed('ovirt-engine'): engine_pattern = r"^ovirt-engine\ -server.*jboss-modules.jar" pgrep = "pgrep -f '%s'" % engine_pattern r = self.exec_cmd(pgrep) engine_pids = [int(x) for x in r['output'].splitlines()] if not engine_pids: self.soslog.error('Unable to get ovirt-engine pid') self.add_alert('Unable to get ovirt-engine pid') for pid in engine_pids: try: # backtrace written to '/var/log/ovirt-engine/console.log os.kill(pid, signal.SIGQUIT) except OSError as e: self.soslog.error('Unable to send signal to %d' % pid, e) self.add_forbidden_path( ['/etc/ovirt-engine/.pgpass', '/etc/rhevm/.pgpass']) if not self.get_option('heapdump'): self.add_forbidden_path('/var/log/ovirt-engine/dump') self.add_cmd_output('ls -l /var/log/ovirt-engine/dump/') certificates = [ '/etc/pki/ovirt-engine/ca.pem', '/etc/pki/ovirt-engine/apache-ca.pem', '/etc/pki/ovirt-engine/certs/engine.cer', '/etc/pki/ovirt-engine/certs/apache.cer', '/etc/pki/ovirt-engine/certs/websocket-proxy.cer', '/etc/pki/ovirt-engine/certs/jboss.cer', '/etc/pki/ovirt-engine/certs/imageio-proxy.cer', '/etc/pki/ovirt-engine/certs/ovirt-provider-ovn.cer', ] keystores = [ ('mypass', '/etc/pki/ovirt-engine/.truststore'), ('changeit', '/var/lib/ovirt-engine/external_truststore'), ] self.add_cmd_output([ # Copy all engine tunables and domain information "engine-config --all", # clearer diff from factory defaults (only on ovirt>=4.2.8) "engine-config -d", ]) self.add_cmd_output([ # process certificate files "openssl x509 -in %s -text -noout" % c for c in certificates ]) self.add_cmd_output([ # process TrustStore certificates "keytool -list -storepass %s -rfc -keystore %s" % (p, c) for (p, c) in keystores ]) # 3.x line uses engine-manage-domains, 4.x uses ovirt-aaa-jdbc-tool manage_domains = 'engine-manage-domains' extensions_tool = 'ovirt-engine-extensions-tool' jdbc_tool = 'ovirt-aaa-jdbc-tool' if is_executable(manage_domains): self.add_cmd_output('%s list' % manage_domains) if is_executable(extensions_tool): self.add_cmd_output('%s info list-extensions' % extensions_tool) if is_executable('ovirt-aaa-jdbc-tool'): subcmds = [ 'query --what=user', 'query --what=group', 'settings show' ] self.add_cmd_output(['%s %s' % (jdbc_tool, sc) for sc in subcmds]) # Copy engine config files. self.add_copy_spec([ "/etc/ovirt-engine", "/etc/rhevm/", "/etc/ovirt-engine-dwh", "/etc/ovirt-engine-reports", "/etc/ovirt-engine-metrics", "/etc/ovirt-engine-setup", "/etc/ovirt-vmconsole", "/var/log/ovirt-engine", "/var/log/ovirt-engine-dwh", "/var/log/ovirt-engine-reports", "/var/log/ovirt-scheduler-proxy", "/var/log/rhevm", "/etc/sysconfig/ovirt-engine", "/usr/share/ovirt-engine/conf", "/var/log/ovirt-guest-agent", "/var/lib/ovirt-engine/setup-history.txt", "/var/lib/ovirt-engine/setup/answers", "/var/lib/ovirt-engine/external_truststore", "/var/tmp/ovirt-engine/config", "/var/lib/ovirt-engine/jboss_runtime/config", "/var/lib/ovirt-engine-reports/jboss_runtime/config" ]) # Copying host certs; extra copy the hidden .truststore file self.add_forbidden_path( ["/etc/pki/ovirt-engine/keys", "/etc/pki/ovirt-engine/private"]) self.add_copy_spec([ "/etc/pki/ovirt-engine/", "/etc/pki/ovirt-engine/.truststore", ])
def setup(self): if self.get_option('jbosstrace') and self.is_installed('ovirt-engine'): engine_pattern = r"^ovirt-engine\ -server.*jboss-modules.jar" pgrep = "pgrep -f '%s'" % engine_pattern lines = self.call_ext_prog(pgrep)['output'].splitlines() engine_pids = [int(x) for x in lines] if not engine_pids: self.soslog.error('Unable to get ovirt-engine pid') self.add_alert('Unable to get ovirt-engine pid') for pid in engine_pids: try: # backtrace written to '/var/log/ovirt-engine/console.log os.kill(pid, signal.SIGQUIT) except OSError as e: self.soslog.error('Unable to send signal to %d' % pid, e) self.add_forbidden_path([ '/etc/ovirt-engine/.pgpass', '/etc/rhevm/.pgpass' ]) if not self.get_option('heapdump'): self.add_forbidden_path('/var/log/ovirt-engine/dump') self.add_cmd_output('ls -l /var/log/ovirt-engine/dump/') # Copy all engine tunables and domain information self.add_cmd_output("engine-config --all") # 3.x line uses engine-manage-domains, 4.x uses ovirt-aaa-jdbc-tool manage_domains = 'engine-manage-domains' extensions_tool = 'ovirt-engine-extensions-tool' jdbc_tool = 'ovirt-aaa-jdbc-tool' if is_executable(manage_domains): self.add_cmd_output('%s list' % manage_domains) if is_executable(extensions_tool): self.add_cmd_output('%s info list-extensions' % extensions_tool) if is_executable('ovirt-aaa-jdbc-tool'): subcmds = [ 'query --what=user', 'query --what=group', 'settings show' ] self.add_cmd_output(['%s %s' % (jdbc_tool, sc) for sc in subcmds]) # Copy engine config files. self.add_copy_spec([ "/etc/ovirt-engine", "/etc/rhevm/", "/etc/ovirt-engine-dwh", "/etc/ovirt-engine-reports", "/var/log/ovirt-engine", "/var/log/ovirt-engine-dwh", "/var/log/ovirt-engine-reports", "/var/log/ovirt-scheduler-proxy", "/var/log/rhevm", "/etc/sysconfig/ovirt-engine", "/usr/share/ovirt-engine/conf", "/var/log/ovirt-guest-agent", "/var/lib/ovirt-engine/setup-history.txt", "/var/lib/ovirt-engine/setup/answers", "/var/lib/ovirt-engine/external_truststore", "/var/tmp/ovirt-engine/config", "/var/lib/ovirt-engine/jboss_runtime/config", "/var/lib/ovirt-engine-reports/jboss_runtime/config" ])
def setup(self): self.add_file_tags({ '/etc/systemd/journald.conf.*': 'insights_etc_journald_conf', '/usr/lib/systemd/journald.conf.*': 'insights_usr_journald_conf_d', '/etc/systemd/system.conf': 'insights_systemd_system_conf', '/etc/systemd/logind.conf': 'insights_systemd_logind_conf' }) self.add_cmd_output([ "systemctl status --all", "systemctl show --all", "systemctl show *service --all", # It is possible to do systemctl show with target, slice, # device, socket, scope, and mount too but service and # status --all mostly seems to cover the others. "systemctl list-units", "systemctl list-units --failed", "systemctl list-units --all", "systemctl list-unit-files", "systemctl list-jobs", "systemctl list-dependencies", "systemctl list-timers --all", "systemctl list-machines", "systemctl show-environment", "systemd-delta", "systemd-analyze", "systemd-analyze blame", "systemd-analyze dump", "systemd-inhibit --list", "journalctl --list-boots", "ls -lR /lib/systemd" ]) self.add_cmd_output('timedatectl', root_symlink='date') # resolvectl command starts systemd-resolved service if that # is not running, so gate the commands by this predicate if is_executable('resolvectl'): resolvectl_status = 'resolvectl status' resolvectl_statistics = 'resolvectl statistics' else: resolvectl_status = 'systemd-resolve --status' resolvectl_statistics = 'systemd-resolve --statistics' self.add_cmd_output([ resolvectl_status, resolvectl_statistics, ], pred=SoSPredicate(self, services=["systemd-resolved"])) self.add_cmd_output("systemd-analyze plot", suggest_filename="systemd-analyze_plot.svg") if self.get_option("verify"): self.add_cmd_output("journalctl --verify") self.add_copy_spec([ "/etc/systemd", "/lib/systemd/system", "/lib/systemd/user", "/etc/vconsole.conf", "/run/systemd/generator*", "/run/systemd/seats", "/run/systemd/sessions", "/run/systemd/system", "/run/systemd/users", "/etc/modules-load.d/*.conf", "/etc/yum/protected.d/systemd.conf", "/etc/tmpfiles.d/*.conf", "/run/tmpfiles.d/*.conf", "/usr/lib/tmpfiles.d/*.conf", ]) self.add_forbidden_path('/dev/null')