def stop(service_name, fail_pattern='FAILED'): """Stop a service via an init script. 'service_name' is used as the base of the keys in the core.config and core.state dictionaries. If we started the service, the init script is run by doing "service init_script stop". The regex 'fail_pattern' is matched against stdout. If there is a match, shutdown is considered to have failed. We also check that the sentinel file, if there was one, no longer exists. Globals used: core.config[service_name.init-script] is used to get the name of the init script. If not set, service_name is used. core.config[service_name.sentinel-file] is used to get the path of the sentinel file. core.state[service_name.started-service] is used to determine if we started the service. After shutdown, this is set to False. """ init_script = _init_script_name(service_name) if not core.state.get(service_name + '.started-service'): core.skip('did not start service ' + service_name) return command = ('service', init_script, 'stop') stdout, _, fail = core.check_system(command, 'Stop ' + service_name + ' service') assert re.search(fail_pattern, stdout) is None, fail sentinel_file = core.config.get(service_name + '.sentinel-file') if sentinel_file: assert not os.path.exists(sentinel_file), "%(service_name)s sentinel file still exists at %(sentinel_file)s" % locals() core.state[service_name + '.started-service'] = False
def stop(service_name): """ Stop a service via init script or systemd. 'service_name' is used as the base of the keys in the core.config and core.state dictionaries. If we started the service, the service is stopped by doing "service service_name stop" or "systemctl stop service_name". Globals used: core.state[service_name.started-service] is used to determine if we started the service. After shutdown, this is set to False. """ if not core.state.get(service_name + '.started-service'): core.skip('did not start service ' + service_name) return if core.el_release() >= 7: command = ('systemctl', 'stop', service_name) else: command = ('service', service_name, 'stop') core.check_system(command, 'Stop ' + service_name + ' service') core.state[service_name + '.started-service'] = False
def start(service_name): """ Start a service via init script or systemd. 'service_name' is used as the base of the keys in the core.config and core.state dictionaries. The service is started by doing "service service_name start" or "systemctl start service_name". The service is not started up if core.state[service_name.started-service] is True. The following globals are set: core.config[service_name.sentinel-file] is set to the value of sentinel_file, if specified. """ if core.state.get(service_name + '.started-service'): core.skip('service ' + service_name + ' already running (flagged as started)') return if core.el_release() >= 7: command = ('systemctl', 'start', service_name) else: command = ('service', service_name, 'start') core.check_system(command, 'Start ' + service_name + ' service') core.state[service_name + '.started-service'] = True
def test_02_user(self): core.state['system.wrote_mapfile'] = False if core.options.skiptests: core.skip('no user needed') return try: password_entry = pwd.getpwnam(core.options.username) except KeyError, e: self.fail("User '%s' should exist but does not" % core.options.username)
def test_02_start_seg(self): core.state['globus.started-seg'] = False core.config[ 'globus.seg-lockfile'] = '/var/lock/subsys/globus-scheduler-event-generator' if not core.rpm_is_installed('globus-scheduler-event-generator-progs'): return if os.path.exists(core.config['globus.seg-lockfile']): core.skip('SEG apparently running') return command = ('service', 'globus-scheduler-event-generator', 'start') stdout, _, fail = core.check_system(command, 'Start Globus SEG') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(os.path.exists(core.config['globus.seg-lockfile']), 'Globus SEG run lock file missing') core.state['globus.started-seg'] = True
def test_03_remove_test_user(self): if not core.state['general.user_added']: core.skip('did not add user') return username = core.options.username password_entry = pwd.getpwnam(username) globus_dir = os.path.join(password_entry.pw_dir, '.globus') command = ('userdel', username) core.check_system(command, "Remove user '%s'" % (username)) files.remove(os.path.join(globus_dir, 'usercert.pem')) files.remove(os.path.join(globus_dir, 'userkey.pem')) files.remove(os.path.join('/var/spool/mail', username)) shutil.rmtree(password_entry.pw_dir)
def test_09_start_voms(self): core.state['voms.started-server'] = False if not core.rpm_is_installed('voms-server'): core.skip('not installed') return if os.path.exists(core.config['voms.lock-file']): core.skip('apparently running') return command = ('service', 'voms', 'start') stdout, _, fail = core.check_system(command, 'Start VOMS service') self.assertEqual(stdout.find('FAILED'), -1, fail) self.assert_(os.path.exists(core.config['voms.lock-file']), 'VOMS server PID file is missing') core.state['voms.started-server'] = True
def test_01_stop_xrootd(self): if not core.rpm_is_installed('cvmfs'): core.skip('not installed') return if core.state['cvmfs.started-server'] == False: core.skip('did not start server') return command = ('service', 'cvmfs', 'stop') stdout, _, fail = core.check_system(command, 'Stop Cvmfs server') self.assert_(stdout.find('FAILED') == -1, fail) files.restore("/etc/fuse.conf","root") files.restore("/etc/auto.master","root") files.restore("/etc/cvmfs/default.local","root") files.restore("/etc/cvmfs/domain.d/cern.ch.local","root")
def test_01_start_gridftp(self): core.config['gridftp.pid-file'] = '/var/run/globus-gridftp-server.pid' core.state['gridftp.started-server'] = False if not core.rpm_is_installed('globus-gridftp-server-progs'): core.skip('not installed') return if os.path.exists(core.config['gridftp.pid-file']): core.skip('apparently running') return command = ('service', 'globus-gridftp-server', 'start') stdout, _, fail = core.check_system(command, 'Start GridFTP server') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(os.path.exists(core.config['gridftp.pid-file']), 'GridFTP server PID file missing') core.state['gridftp.started-server'] = True
def test_01_stop_xrootd(self): if not core.rpm_is_installed('xrootd-server'): core.skip('not installed') return if core.state['xrootd.started-server'] == False: core.skip('did not start server') return command = ('service', 'xrootd', 'stop') stdout, _, fail = core.check_system(command, 'Stop Xrootd server') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(not os.path.exists(core.config['xrootd.pid-file']), 'Xrootd server PID file still present') if core.config['xrootd.gsi'] == "ON": files.restore('/etc/xrootd/xrootd-clustered.cfg', "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd")
def test_03_start_pbs_sched(self): core.config['torque.sched-lockfile'] = '/var/lock/subsys/pbs_sched' core.state['torque.pbs-sched-running'] = False if core.missing_rpm(*self.required_rpms): return if os.path.exists(core.config['torque.sched-lockfile']): core.skip('pbs scheduler apparently running') return command = ('service', 'pbs_sched', 'start') stdout, _, fail = core.check_system(command, 'Start pbs scheduler daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['torque.sched-lockfile']), 'pbs sched run lock file missing') core.state['torque.pbs-sched-running'] = True
def test_01_start_xrootd(self): core.config['xrootd.pid-file']='/var/run/xrootd/xrootd-default.pid' core.config['certs.hostcert'] = '/etc/grid-security/hostcert.pem' core.config['certs.hostkey'] = '/etc/grid-security/hostkey.pem' core.config['certs.xrootdcert']='/etc/grid-security/xrd/xrdcert.pem' core.config['certs.xrootdkey']='/etc/grid-security/xrd/xrdkey.pem' core.config['xrootd.gsi']="ON" core.state['xrootd.started-server'] = False if not core.rpm_is_installed('xrootd-server'): core.skip('not installed') return user=pwd.getpwnam("xrootd") if core.config['xrootd.gsi'] == "ON": self.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0644) self.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0400) cfgfile='/etc/xrootd/xrootd-clustered.cfg' cfgtext='cms.space min 2g 5g\n' cfgtext=cfgtext+'xrootd.seclib /usr/lib64/libXrdSec.so\n' cfgtext=cfgtext+'sec.protocol /usr/lib64 gsi -certdir:/etc/grid-security/certificates -cert:/etc/grid-security/xrd/xrdcert.pem -key:/etc/grid-security/xrd/xrdkey.pem -crl:3 -gridmap:/etc/grid-security/xrd/xrdmapfile --gmapopt:10 --gmapto:0\n' cfgtext=cfgtext+'acc.authdb /etc/xrootd/auth_file\n' cfgtext=cfgtext+'ofs.authorize\n' files.append(cfgfile,cfgtext,owner='xrootd',backup=True) authfile='/etc/xrootd/auth_file' files.write(authfile,'u * /tmp lr\nu = /tmp/@=/ a\nu xrootd /tmp a\n',owner="xrootd") os.chown(authfile, user.pw_uid, user.pw_gid) files.write("/etc/grid-security/xrd/xrdmapfile","\"/O=Grid/OU=GlobusTest/OU=VDT/CN=VDT Test\" vdttest",owner="xrootd") os.chown("/etc/grid-security/xrd/xrdmapfile", user.pw_uid, user.pw_gid) command = ('service', 'xrootd', 'start') if core.el_release() != 6: stdout, stderr, fail = core.check_system(command, 'Start Xrootd server') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(os.path.exists(core.config['xrootd.pid-file']), 'xrootd server PID file missing') core.state['xrootd.started-server'] = True else: stdout, stderr, fail = core.check_system(command, 'Start Xrootd server',exit=1) self.assert_(stdout.find('OK') == -1, fail)
def start(service_name, fail_pattern='FAILED', init_script=None, sentinel_file=None): """Start a service via an init script. 'service_name' is used as the base of the keys in the core.config and core.state dictionaries. It is also used as the value of 'init_script', if it is not specified. The init script is run by doing "service init_script start". The regex 'fail_pattern' is matched against stdout. If there is a match, startup is considered to have failed. 'sentinel_file' is the path to a pid file or lock file, or some other file that is expected to exist iff the service is running. The service is not started up if the sentinel file exists, or if core.state[service_name.started-service] is True. The following globals are set: core.config[service_name.init-script] is set to the value of init_script (or service_name if not specified). core.state[service_name.started-service] is set to True on successful startup, False otherwise. core.config[service_name.sentinel-file] is set to the value of sentinel_file, if specified. """ init_script = _init_script_name(service_name, init_script) if sentinel_file and os.path.exists(sentinel_file): core.skip('service ' + service_name + ' already running (sentinel file found)') return if core.state.get(service_name + '.started-service'): core.skip('service ' + service_name + ' already running (flagged as started)') return command = ('service', init_script, 'start') stdout, _, fail = core.check_system(command, 'Start ' + service_name + ' service') assert re.search(fail_pattern, stdout) is None, fail if sentinel_file: assert os.path.exists(sentinel_file), "%(service_name)s sentinel file not found at %(sentinel_file)s" % locals() core.config[service_name + '.sentinel-file'] = sentinel_file core.state[service_name + '.started-service'] = True
def test_03_install_mapfile(self): core.state['system.wrote_mapfile'] = False try: pwd_entry = pwd.getpwnam(core.options.username) except KeyError: core.skip('no user') return if pwd_entry.pw_dir == '/': core.skip('no user home dir') return cert_path = os.path.join(pwd_entry.pw_dir, '.globus', 'usercert.pem') user_dn, user_cert_issuer = core.certificate_info(cert_path) existed_prior = os.path.exists(core.config['system.mapfile']) files.append(core.config['system.mapfile'], '"%s" %s\n' % (user_dn, pwd_entry.pw_name), owner='user') if not existed_prior: core.state['system.wrote_mapfile'] = True os.chmod(core.config['system.mapfile'], 0644)
def test_01_start_condor(self): core.config['condor.lockfile'] = '/var/lock/subsys/condor_master' core.state['condor.started-service'] = False core.state['condor.running-service'] = False if core.missing_rpm('condor'): return if os.path.exists(core.config['condor.lockfile']): core.state['condor.running-service'] = True core.skip('apparently running') return command = ('service', 'condor', 'start') stdout, _, fail = core.check_system(command, 'Start Condor') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['condor.lockfile']), 'Condor run lock file missing') core.state['condor.started-service'] = True core.state['condor.running-service'] = True
def test_03_pbs_job(self): if core.missing_rpm('globus-gram-job-manager-pbs', 'globus-gram-client-tools', 'globus-proxy-utils'): return if (not core.state['torque.pbs-configured'] or not core.state['torque.pbs-mom-running'] or not core.state['torque.pbs-server-running'] or not core.state['globus.pbs_configured']): core.skip('pbs not running or configured') return command = ('globus-job-run', self.contact_string('pbs'), '/bin/echo', 'hello') stdout = core.check_system(command, 'globus-job-run on PBS job', user=True)[0] self.assertEqual(stdout, 'hello\n', 'Incorrect output from globus-job-run on PBS job')
def test_01_add_user(self): core.state['general.user_added'] = False # Bail out if this step is not needed if not core.options.adduser: core.skip('not requested') return try: pwd.getpwnam(core.options.username) except KeyError: pass # expected else: core.skip('user exists') return # Add home_dir = core.config['user.home'] if not os.path.isdir(home_dir): os.mkdir(home_dir) command = ('useradd', '--base-dir', home_dir, '-n', '--shell', '/bin/sh', core.options.username) core.check_system(command, 'Add user %s' % (core.options.username)) core.state['general.user_added'] = True # Set up directories user = pwd.getpwnam(core.options.username) os.chown(user.pw_dir, user.pw_uid, user.pw_gid) os.chmod(user.pw_dir, 0755) globus_dir = os.path.join(user.pw_dir, '.globus') if not os.path.isdir(globus_dir): os.mkdir(globus_dir) os.chown(globus_dir, user.pw_uid, user.pw_gid) os.chmod(globus_dir, 0755) # Set up certificate shutil.copy2('/usr/share/osg-test/usercert.pem', globus_dir) shutil.copy2('/usr/share/osg-test/userkey.pem', globus_dir) os.chmod(os.path.join(globus_dir, 'usercert.pem'), 0644) os.chmod(os.path.join(globus_dir, 'userkey.pem'), 0400) os.chown(os.path.join(globus_dir, 'usercert.pem'), user.pw_uid, user.pw_gid) os.chown(os.path.join(globus_dir, 'userkey.pem'), user.pw_uid, user.pw_gid)
def test_01_add_user(self): core.state['general.user_added'] = False core.state['general.user_cert_created'] = False # Bail out if this step is not needed if not core.options.adduser: core.skip('not requested') return try: pwd.getpwnam(core.options.username) except KeyError: pass # expected else: core.skip('user exists') return # Add home_dir = core.config['user.home'] if not os.path.isdir(home_dir): os.mkdir(home_dir) # SSH requires that the user have a password - even if password # auth is disabled. Set a random password for the vdttest user password = encrypted_password(random_string(16)) command = ('useradd', '--base-dir', home_dir, '--password', password, '--shell', '/bin/sh', core.options.username) core.check_system(command, 'Add user %s' % core.options.username) core.state['general.user_added'] = True # Set up directories user = pwd.getpwnam(core.options.username) os.chown(user.pw_dir, user.pw_uid, user.pw_gid) os.chmod(user.pw_dir, 0o755) # Set up certificate globus_dir = os.path.join(user.pw_dir, '.globus') user_cert = os.path.join(globus_dir, 'usercert.pem') test_ca = CA.load(core.config['certs.test-ca']) if not os.path.exists(user_cert): test_ca.usercert(core.options.username, core.options.password) core.state['general.user_cert_created'] = True
def test_02_user(self): core.state['system.wrote_mapfile'] = False if core.options.skiptests: core.skip('no user needed') return try: password_entry = pwd.getpwnam(core.options.username) except KeyError as e: self.fail("User '%s' should exist but does not" % core.options.username) self.assert_(password_entry.pw_dir != '/', "User '%s' has home directory at '/'" % (core.options.username)) self.assert_(os.path.isdir(password_entry.pw_dir), "User '%s' missing a home directory at '%s'" % (core.options.username, password_entry.pw_dir)) cert_path = os.path.join(password_entry.pw_dir, '.globus', 'usercert.pem') core.config['user.cert_subject'], core.config['user.cert_issuer'] = certificate_info(cert_path) # Add user to mapfile files.append(core.config['system.mapfile'], '"%s" %s\n' % (core.config['user.cert_subject'], password_entry.pw_name), owner='user') core.state['system.wrote_mapfile'] = True os.chmod(core.config['system.mapfile'], 0o644)
def stop(service_name, fail_pattern='FAILED'): """Stop a service via an init script. 'service_name' is used as the base of the keys in the core.config and core.state dictionaries. If we started the service, the init script is run by doing "service init_script stop". The regex 'fail_pattern' is matched against stdout. If there is a match, shutdown is considered to have failed. We also check that the sentinel file, if there was one, no longer exists. Globals used: core.config[service_name.init-script] is used to get the name of the init script. If not set, service_name is used. core.config[service_name.sentinel-file] is used to get the path of the sentinel file. core.state[service_name.started-service] is used to determine if we started the service. After shutdown, this is set to False. """ init_script = core.config.get(service_name + '.init-script', service_name) if not core.state.get(service_name + '.started-service'): core.skip('did not start service ' + service_name) return command = ('service', init_script, 'stop') stdout, _, fail = core.check_system(command, 'Stop ' + service_name + ' service') assert re.search(fail_pattern, stdout) is None, fail sentinel_file = core.config.get(service_name + '.sentinel-file') if sentinel_file: assert not os.path.exists( sentinel_file ), "%(service_name)s sentinel file still exists at %(sentinel_file)s" % locals( ) core.state[service_name + '.started-service'] = False
def test_03_lfc_multilib(self): if core.missing_rpm('yum-utils'): return # We can't test this on 32-bit uname_out, _, _ = core.check_system(['uname', '-i'], 'getting arch') if re.search(r'i\d86', uname_out): core.skip('running on 32-bit') return cmdbase = ['repoquery', '--plugins'] for repo in core.options.extrarepos: cmdbase.append('--enablerepo=%s' % repo) # Find the 32-bit lfc-python rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python.i386'], 'lfc-python multilib (32bit)') if stdout.strip() == '': self.fail('32-bit lfc-python not found in 64-bit repo') # Sanity check: find the 64-bit lfc-python rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python.x86_64'], 'lfc-python multilib (64bit)') if stdout.strip() == '': self.fail('64-bit lfc-python not found in 64-bit repo') # Find the 32-bit lfc-python26 rpm (on el5 only) if core.el_release() == 5: stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.i386'], 'lfc-python26 multilib (32bit)') if stdout.strip() == '': self.fail('32-bit lfc-python not found in 64-bit repo') # Sanity check: find the 64-bit lfc-python26 rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.x86_64'], 'lfc-python26 multilib (64bit)') if stdout.strip() == '': self.fail('64-bit lfc-python not found in 64-bit repo')
def test_01_remove_packages(self): if (('install.preinstalled' not in core.state) or (len(core.state['install.preinstalled']) == 0)): core.skip('no original list') return if 'install.installed' not in core.state: core.skip('no packages installed') return current_rpms = core.installed_rpms() new_rpms = current_rpms - core.state['install.preinstalled'] if len(new_rpms) == 0: core.skip('no new RPMs') return # For the "rpm -e" command, RPMs should be listed in the same order as # installed. Why? The erase command processes files in reverse order # as listed on the command line, mostly; it seems to do a bit of # reordering (search -vv output for "tsort"), but it is not clear what # the algorithm is. So, rpm will cheerfully erase a package, the # contents of which are needed by the pre- or post-uninstall scriptlets # of a package that will be erased later in sequence. By listing them # in yum install order, we presumably get a valid ordering and increase # the chances of a clean erase. rpm_erase_candidates = [] for package in core.state['install.installed']: if package in new_rpms: rpm_erase_candidates.append(package) remaining_new_rpms = new_rpms - set(rpm_erase_candidates) count = len(remaining_new_rpms) if count > 0: core.log_message('%d RPMs installed but not in yum output' % count) rpm_erase_candidates += remaining_new_rpms # Creating the list of RPMs to erase is more complicated than just using # the list of new RPMs, because there may be RPMs with both 32- and # 64-bit versions installed. In that case, rpm will fail if given just # the base package name; instead, the architecture must be specified, # and an easy way to get that information is from 'rpm -q'. So we use # the bare name when possible, and the fully versioned one when # necessary. rpm_erase_list = [] for package in rpm_erase_candidates: command = ('rpm', '--query', package, '--queryformat', r'%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\n') status, stdout, stderr = core.system(command, log_output=False) versioned_rpms = re.split('\n', stdout.strip()) if len(versioned_rpms) > 1: rpm_erase_list += versioned_rpms else: rpm_erase_list.append(package) package_count = len(rpm_erase_list) command = ['rpm', '--quiet', '--erase'] + rpm_erase_list core.check_system(command, 'Remove %d packages' % (package_count))
def test_04_glexec_switch_id(self): # if the utils are not present, it won't work anyhow, so might as well skip the test if not core.rpm_is_installed('globus-proxy-utils'): core.skip('globus-proxy-utils not installed') return if not core.rpm_is_installed('glexec'): core.skip('not installed') return command = ('grid-proxy-info','-f',self.__user_proxy_path) status, stdout, stderr = core.system(command, True) if int(status)!=0: # no proxy found even after previous checks, have to skip core.skip('suitable proxy not found') return command = ('/usr/sbin/glexec','/usr/bin/id','-u') status, stdout, stderr = core.system(command) switched_id = stdout.rstrip() self.assert_(self.__uid==switched_id, 'Glexec identity switch from root to user '+core.options.username+' failed')
def test_01_stop_mysqld(self): if not core.rpm_is_installed('mysql-server'): core.skip('not installed') return service.stop('mysqld')
def test_01_start_mysqld(self): if not core.rpm_is_installed('mysql-server'): core.skip('not installed') return service.start('mysqld', sentinel_file='/var/run/mysqld/mysqld.pid')
def test_01_restore_lcmaps_after_glexec(self): if not core.rpm_is_installed('glexec'): core.skip("glexec not installed, don't need lcmaps for it") return files.restore('/etc/lcmaps.db', 'lcmaps')
def test_01_create_lcmaps_for_glexec(self): if not core.rpm_is_installed('glexec'): core.skip("glexec not installed, don't need lcmaps for it") return path = '/etc/lcmaps.db' contents = """ ############################################################################## # # lcmaps.db # # This is a configuration for lcmaps for testing the ce and glexec. It CAN'T # be used as-is to test gums. # ############################################################################## glexectracking = "lcmaps_glexec_tracking.mod" "-exec /usr/sbin/glexec_monitor" # Uncomment if your procd is located in a non-standard directory # "-procddir /usr" # Uncomment to write tracking info to glexec_monitor.log in the given dir # otherwise the default is to use syslog # "-logdir /var/log/glexec" # Uncomment to change the default logging level for the glexec_monitor # Level 0: none, 1: errors, 2: warnings, 3: notices, 4: info, 5: debug # The notices level is used for usage tracking; info is commonly useful. # Default is lcmaps_debug_level from glexec.conf. # "-log-level 4" # Uncomment to change the syslog facility. Default is LOG_DAEMON # "-log-facility LOG_DAEMON" # Uncomment to use local time in the file log (doesn't apply to syslog) # "-datetime-local" # Uncomment to change the minimum tracking group id # "-min-gid 65000" # Uncomment to change the maximum tracking group id # "-max-gid 65049" # Uncomment to not kill processes still running after the main process finishes # "-dont-kill-leftovers" posix_enf = "lcmaps_posix_enf.mod" "-maxuid 1 -maxpgid 1 -maxsgid 32" gridmapfile = "lcmaps_localaccount.mod" "-gridmap /etc/grid-security/grid-mapfile" verifyproxy = "lcmaps_verify_proxy.mod" "--allow-limited-proxy" " -certdir /etc/grid-security/certificates" # Mapping policies # # Mapping policy: osg_default # Purpose: Used for the Globus gatekeeper and the gridftp server # osg_default: gridmapfile -> posix_enf # # Mapping policy: glexec # Purpose: Used for glexec on the worker nodes. # glexec: verifyproxy -> gridmapfile gridmapfile -> glexectracking """ files.write(path, contents, owner='lcmaps')
def test_03_configure_globus_pbs(self): if not core.state['globus.pbs_configured']: core.skip('Globus pbs configuration not altered') if not core.rpm_is_installed('globus-gram-job-manager-pbs'): return files.restore(core.config['globus.pbs-config'], 'pbs')
def test_01_stop_tomcat(self): if not core.rpm_is_installed(tomcat.pkgname()): core.skip('not installed') return service.stop('tomcat')