def test_01_stop_server(self): core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_unless(core.state['pbs_server.started-service'], 'did not start pbs server') service.check_stop('pbs_server') files.restore(core.config['torque.pbs-serverdb'], 'pbs') files.restore(core.config['torque.pbs-nodes-file'], 'pbs')
def test_01_cvmfs_probe(self): default_local = '/etc/cvmfs/default.local' probe_repos = ",".join( ['atlas.cern.ch', 'cms.cern.ch', 'oasis.opensciencegrid.org']) # Test depends on oasis-config to access the oasis.opensciencegrid.org # repo. This is an external service, so the requirement should be # removed as part of SOFTWARE-1108. core.skip_ok_unless_installed('cvmfs') core.skip_ok_unless_installed('cvmfs-keys', 'oasis-config', by_dependency=True) command = ('cat', default_local) status, stdout, stderr = core.system(command, False) # Dave Dykstra suggested running cvmfs probe against a different # set of repositories than are currently set up, so we modify them # just for this test. (See SOFTWARE-1097) # In the future, this test might be removed since we do not want # to depend on external services, and it's redundant to probe the # repos that we have already mounted. files.replace(default_local, 'CVMFS_REPOSITORIES=cms.cern.ch', 'CVMFS_REPOSITORIES=' + probe_repos, owner='cvmfsprobe') try: command = ('cvmfs_config', 'probe') status, stdout, stderr = core.system(command, False) self.assertEqual( status, 0, core.diagnose('cvmfs probe', command, status, stdout, stderr)) finally: files.restore(default_local, 'cvmfsprobe')
def test_01_cvmfs_probe(self): default_local = '/etc/cvmfs/default.local' probe_repos = ",".join([ 'atlas.cern.ch', 'cms.cern.ch', 'oasis.opensciencegrid.org']) # Test depends on oasis-config to access the oasis.opensciencegrid.org # repo. This is an external service, so the requirement should be # removed as part of SOFTWARE-1108. core.skip_ok_unless_installed('cvmfs') core.skip_ok_unless_installed('cvmfs-keys', 'oasis-config', by_dependency=True) command = ('cat', default_local) status, stdout, stderr = core.system(command, False) # Dave Dykstra suggested running cvmfs probe against a different # set of repositories than are currently set up, so we modify them # just for this test. (See SOFTWARE-1097) # In the future, this test might be removed since we do not want # to depend on external services, and it's redundant to probe the # repos that we have already mounted. files.replace( default_local, 'CVMFS_REPOSITORIES=cms.cern.ch', 'CVMFS_REPOSITORIES=' + probe_repos, owner='cvmfsprobe') try: command = ('cvmfs_config', 'probe') status, stdout, stderr = core.system(command, False) self.assertEqual(status, 0, core.diagnose('cvmfs probe', command, status, stdout, stderr)) finally: files.restore(default_local, 'cvmfsprobe')
def test_02_restore_vomses(self): if core.missing_rpm('voms-admin-server'): return if os.path.exists(core.config['voms.lsc-dir']): shutil.rmtree(core.config['voms.lsc-dir']) files.restore('/etc/vomses', 'voms')
def test_04_stop_mom(self): core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_unless(core.state['pbs_mom.started-service'], 'did not start pbs mom server') service.stop('pbs_mom') for mom_file in ['config', 'layout']: files.restore(core.config['torque.mom-%s' % mom_file], 'pbs')
def test_01_stop_condor(self): core.skip_ok_unless_installed('condor') self.skip_ok_unless(core.state['condor.started-service'], 'did not start server') service.check_stop('condor') files.restore(core.config['condor.personal_condor'], 'condor') core.state['condor.running-service'] = False
def test_03_clean_edg_mkgridmap(self): core.skip_ok_unless_installed('edg-mkgridmap', 'voms-server') for envvar in ('VO_LIST_FILE', 'UNDEFINED_ACCTS_FILE', 'EDG_MKGRIDMAP_LOG', 'USER_VO_MAP', 'GRIDMAP'): files.remove(os.environ[envvar]) del os.environ[envvar] files.restore(core.config['edg.conf'], 'edg')
def test_03_unconfigure(self): for key in [ "cache_config_path", "cache_authfile_path", "origin_config_path", "caches_json_path" ]: files.restore(_getcfg(key), owner=_NAMESPACE)
def test_04_stop_mom(self): core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_unless(core.state['pbs_mom.started-service'], 'did not start pbs mom server') service.stop('pbs_mom') for mom_file in ['config', 'layout']: files.restore(core.config['torque.mom-%s' % mom_file], 'pbs')
def test_03_clean_edg_mkgridmap(self): if core.missing_rpm('edg-mkgridmap', 'voms-server'): return for envvar in ('VO_LIST_FILE', 'UNDEFINED_ACCTS_FILE', 'EDG_MKGRIDMAP_LOG', 'USER_VO_MAP', 'GRIDMAP'): files.remove(os.environ[envvar]) del os.environ[envvar] files.restore(core.config['edg.conf'], 'edg')
def test_03_clean_edg_mkgridmap(self): core.skip_ok_unless_installed('edg-mkgridmap', 'voms-admin-server') self.skip_bad_unless(core.state['voms-admin.read-members'], 'Cannot read VO member list') self.skip_bad_unless(core.state['tomcat.started'], 'Tomcat not started') for envvar in ('VO_LIST_FILE', 'UNDEFINED_ACCTS_FILE', 'EDG_MKGRIDMAP_LOG', 'USER_VO_MAP', 'GRIDMAP'): files.remove(os.environ[envvar]) del os.environ[envvar] files.restore(core.config['edg.conf'], 'edg')
def test_02_stop_slurmdbd(self): self.slurm_reqs() core.skip_ok_unless_installed('slurm-slurmdbd') self.skip_ok_unless(core.state['slurmdbd.started-service'], 'did not start slurmdbd') # service requires config so we stop it first; use stop() since slurmdbd fails to remove pid file service.stop('slurmdbd') files.restore(core.config['slurmdbd.config'], 'slurm') mysql.check_execute("drop database %s; " % core.config['slurmdbd.name'] + \ "drop user %s;" % core.config['slurmdbd.user'], 'drop mysql slurmdb')
def test_04_stop_munge(self): core.skip_ok_unless_installed(*self.required_rpms) self.skip_ok_if(core.state['munge.running'] == False, 'munge not running') command = ('service', 'munge', 'stop') stdout, _, fail = core.check_system(command, 'Stop munge daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['munge.lockfile']), 'munge lock file still present') core.state['munge.running'] = False files.restore(core.config['munge.keyfile'], 'pbs')
def test_01_stop_xrootd(self): if core.state['xrootd.tpc.backups-exist']: files.restore(core.config['xrootd.tpc.config-1'], "xrootd") files.restore(core.config['xrootd.tpc.config-2'], "xrootd") core.skip_ok_unless_installed('xrootd', 'xrootd-scitokens', by_dependency=True) self.skip_ok_if(not core.state['xrootd.started-http-server-1'] and not core.state['xrootd.started-http-server-2'], 'did not start any of the http servers') service.check_stop(core.config['xrootd_tpc_service_1']) service.check_stop(core.config['xrootd_tpc_service_2'])
def test_02_stop_slurmdbd(self): self.slurm_reqs() core.skip_ok_unless_installed('slurm-slurmdbd') self.skip_ok_unless(core.state['slurmdbd.started-service'], 'did not start slurmdbd') # service requires config so we stop it first; use stop() since slurmdbd fails to remove pid file service.stop('slurmdbd') files.restore(core.config['slurmdbd.config'], 'slurm') mysql.check_execute("drop database %s; " % core.config['slurmdbd.name'] + \ "drop user %s;" % core.config['slurmdbd.user'], 'drop mysql slurmdb')
def test_01_stop_mom(self): core.skip_ok_unless_installed(*self.required_rpms) self.skip_ok_if(core.state['torque.pbs-mom-running'] == False, 'did not start pbs mom server') command = ('service', 'pbs_mom', 'stop') stdout, _, fail = core.check_system(command, 'Stop pbs mom') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['torque.mom-lockfile']), 'PBS mom run lock file still present') for mom_file in ['config', 'layout']: files.restore(core.config['torque.mom-%s' % mom_file], 'pbs') core.state['torque.pbs-mom-running'] = False
def test_06_cleanup_condor(self): core.skip_ok_unless_installed('gratia-probe-condor', 'gratia-service') try: probeconfig = core.config['gratia.config.dir'] + "/condor/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError, e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_06_cleanup_condor(self): core.skip_ok_unless_installed('gratia-probe-condor', 'gratia-service') try: probeconfig = core.config[ 'gratia.config.dir'] + "/condor/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError as e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_11_cleanup_sge(self): core.skip_ok_unless_installed('gratia-probe-sge', 'gratia-service') try: files.remove("/var/log/accounting", True) probeconfig = core.config['gratia.config.dir'] + "/sge/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError, e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_08_cleanup_sge(self): core.skip_ok_unless_installed('gratia-probe-sge', 'gratia-service') try: files.remove("/var/log/accounting", True) probeconfig = core.config['gratia.config.dir'] + "/sge/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError as e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_01_stop_cvmfs(self): core.skip_ok_unless_installed('cvmfs') self.skip_ok_if(['cvmfs.started-server'] == False, 'did not start server') try: for temp_dir in core.config['cvmfs.debug-dirs']: command = ('umount', temp_dir) core.check_system(command, 'Manual cvmfs unmount failed') files.remove(temp_dir, force=True) except KeyError: pass # tempdir was never created if core.state['cvmfs.version'] < ('2', '1'): command = ('service', 'cvmfs', 'stop') else: command = ('cvmfs_config', 'umount') stdout, _, fail = core.check_system(command, 'Stop Cvmfs server') self.assert_(stdout.find('FAILED') == -1, fail) # Restart autofs to bring network filesystems back (specifically # homedirs on el5 fermicloud vms) if core.state['cvmfs.version'] >= ('2', '1'): stdout, _, fail = core.check_system(('service', 'autofs', 'restart'), 'Restart autofs') self.assert_(stdout.find('FAILED') == -1, fail) files.restore("/etc/fuse.conf","cvmfs") files.restore("/etc/auto.master","cvmfs") files.restore("/etc/cvmfs/default.local","cvmfs") files.restore("/etc/cvmfs/domain.d/cern.ch.local","cvmfs")
def test_04_stop_munge(self): if core.missing_rpm(*self.required_rpms): return if core.state['munge.running'] == False: core.skip('munge not running') command = ('service', 'munge', 'stop') stdout, _, fail = core.check_system(command, 'Stop munge daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['munge.lockfile']), 'munge lock file still present') core.state['munge.running'] = False files.restore(core.config['munge.keyfile'], 'pbs')
def test_01_stop_gatekeeper(self): if not core.rpm_is_installed('globus-gatekeeper'): core.skip('not installed') return if core.state['globus.started-gk'] == False: core.skip('did not start server') return files.restore(core.config['jobmanager-config'], 'globus') command = ('service', 'globus-gatekeeper', 'stop') stdout, _, fail = core.check_system(command, 'Stop Globus gatekeeper') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(not os.path.exists(core.config['globus.gk-lockfile']), 'Globus gatekeeper run lock file still present')
def test_03_stop_scheduler(self): if core.missing_rpm(*self.required_rpms): return if core.state['torque.pbs-sched-running'] == False: core.skip('did not start pbs scheduler') return command = ('service', 'pbs_sched', 'stop') stdout, _, fail = core.check_system(command, 'Stop pbs scheduler') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['torque.sched-lockfile']), 'PBS server run lock file still present') files.restore(core.config['torque.pbs-nodes-file'], 'pbs') core.state['torque.pbs-sched-running'] = False
def test_02_stop_server(self): core.skip_ok_unless_installed(*self.required_rpms) self.skip_ok_if(core.state['torque.pbs-server-started'] == False, 'did not start pbs server') command = ('service', 'pbs_server', 'stop') stdout, _, fail = core.check_system(command, 'Stop pbs server') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['torque.pbs-lockfile']), 'PBS server run lock file still present') if core.state['trqauthd.started-service']: service.stop('trqauthd') files.restore(core.config['torque.pbs-servername-file'], 'pbs') files.restore(core.config['torque.pbs-nodes-file'], 'pbs') core.state['torque.pbs-server-running'] = False
def test_03_cleanup_gridftp(self): core.skip_ok_unless_installed('gratia-probe-gridftp-transfer', 'gratia-service') try: files.remove("/var/log/gridftp.log") files.remove("/var/log/gridftp-auth.log") probeconfig = core.config['gratia.config.dir'] + "/gridftp-transfer/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError, e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_04_cleanup_glexec(self): core.skip_ok_unless_installed('gratia-probe-glexec', 'gratia-service') try: files.remove("/var/log/glexec.log") files.remove("/var/lib/gratia/data/glexec_plugin.chk") probeconfig = core.config['gratia.config.dir'] + "/glexec/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError, e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_04_cleanup_glexec(self): core.skip_ok_unless_installed('gratia-probe-glexec', 'gratia-service') try: files.remove("/var/log/glexec.log") files.remove("/var/lib/gratia/data/glexec_plugin.chk") probeconfig = core.config[ 'gratia.config.dir'] + "/glexec/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError as e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_02_stop_xrootd_tpc(self): if core.state['xrootd.tpc.backups-exist']: files.restore(core.config['xrootd.tpc.config-1'], "xrootd") files.restore(core.config['xrootd.tpc.config-2'], "xrootd") files.restore(core.config['xrootd.tpc.basic-config'], "xrootd") files.restore(xrootd.logfile("third-party-copy-1"), "xrootd", ignore_missing=True) files.restore(xrootd.logfile("third-party-copy-2"), "xrootd", ignore_missing=True) self.skip_ok_if( not core.state['xrootd.started-http-server-1'] and not core.state['xrootd.started-http-server-2'], 'did not start any of the http servers') service.check_stop(core.config['xrootd_tpc_service_1']) service.check_stop(core.config['xrootd_tpc_service_2'])
def test_03_cleanup_gridftp(self): core.skip_ok_unless_installed('gratia-probe-gridftp-transfer', 'gratia-service') try: files.remove("/var/log/gridftp.log") files.remove("/var/log/gridftp-auth.log") probeconfig = core.config[ 'gratia.config.dir'] + "/gridftp-transfer/ProbeConfig" owner = os.path.basename(os.path.dirname(probeconfig)) files.restore(probeconfig, owner) except OSError as e: if e.errno == 2: # suppress "No such file or directory" error pass else: # reraise the exception, as it's an unexpected error raise
def test_02_stop_xrootd(self): if core.state['xrootd.backups-exist']: files.restore(core.config['xrootd.config'], "xrootd") files.restore(core.config['xrootd.logging-config'], "xrootd") files.restore(core.config['xrootd.authfile'], "xrootd") files.restore(xrootd.logfile("standalone"), "xrootd", ignore_missing=True) if "SCITOKENS" in core.config['xrootd.security']: files.restore('/etc/xrootd/scitokens.conf', "xrootd") files.remove("/etc/xrootd/config.d/99-osgtest-ztn.cfg", force=True) if os.path.exists(xrootd.ROOTDIR): shutil.rmtree(xrootd.ROOTDIR) # Get xrootd service back to its original state self.skip_ok_unless(core.state['xrootd.is-configured'], "xrootd is not configured") xrootd_service = core.config['xrootd_service'] if service.is_running(xrootd_service): service.check_stop(xrootd_service, force=True) if core.state.get('xrootd.service-was-running', False): service.check_start(xrootd_service, force=True)
def test_01_stop_xrootd(self): if core.state['xrootd.backups-exist']: if core.PackageVersion('xrootd') < '1:4.9.0': files.restore(core.config['xrootd.config'], "xrootd") else: files.restore(core.config['xrootd.config-extra'], "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") if not core.rpm_is_installed('xrootd-lcmaps'): files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd") core.skip_ok_unless_installed('xrootd', by_dependency=True) self.skip_ok_if(core.state['xrootd.started-server'], 'did not start server') service.check_stop(core.config['xrootd_service']) files.remove(core.config['xrootd.tmp-dir'], force=True)
def test_01_stop_xrootd(self): if core.state['xrootd.tpc.backups-exist']: files.restore(core.config['xrootd.tpc.config-1'], "xrootd") files.restore(core.config['xrootd.tpc.config-2'], "xrootd") files.restore(core.config['xrootd.tpc.basic-config'], "xrootd") files.restore('/etc/xrootd/config.d/40-osg-standalone.cfg', "xrootd") self.skip_ok_if( not core.state['xrootd.started-http-server-1'] and not core.state['xrootd.started-http-server-2'], 'did not start any of the http servers') service.check_stop(core.config['xrootd_tpc_service_1']) service.check_stop(core.config['xrootd_tpc_service_2'])
def test_02_restore_config(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') files.restore(core.config['condor-ce.condor-cfg'], 'condor-ce') files.restore(core.config['condor-ce.condor-ce-cfg'], 'condor-ce') if core.options.hostcert: files.restore(core.config['condor-ce.condorce_mapfile'], 'condor-ce')
def test_01_stop_xrootd(self): if core.state['xrootd.backups-exist']: files.restore(core.config['xrootd.config'], "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") if not core.rpm_is_installed('xrootd-lcmaps'): files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd") if core.el_release() < 7: files.restore(core.config['xrootd.service-defaults'], "xrootd") core.skip_ok_unless_installed('xrootd', 'globus-proxy-utils', by_dependency=True) self.skip_ok_if(core.state['xrootd.started-server'], 'did not start server') service.check_stop(core.config['xrootd_service']) files.remove(core.config['xrootd.tmp-dir'], force=True)
def test_02_restore_config(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') files.restore(core.config['condor-ce.condor-cfg'], 'condor-ce') files.restore(core.config['condor-ce.condor-ce-cfg'], 'condor-ce') if core.state['condor-ce.wrote-mapfile']: files.restore(core.config['condor-ce.mapfile'], 'condor-ce')
def test_01_stop_xrootd(self): if not core.rpm_is_installed('cvmfs'): core.skip('not installed') return if core.state['cvmfs.started-server'] == False: core.skip('did not start server') return command = ('service', 'cvmfs', 'stop') stdout, _, fail = core.check_system(command, 'Stop Cvmfs server') self.assert_(stdout.find('FAILED') == -1, fail) files.restore("/etc/fuse.conf","root") files.restore("/etc/auto.master","root") files.restore("/etc/cvmfs/default.local","root") files.restore("/etc/cvmfs/domain.d/cern.ch.local","root")
def test_01_stop_slurm(self): self.slurm_reqs() self.skip_ok_unless(core.state['%s.started-service' % core.config['slurm.service-name']], 'did not start slurm') service.check_stop(core.config['slurm.service-name']) # service requires config so we stop it first if core.el_release() == 7: service.check_stop(core.config['slurm.ctld-service-name']) files.restore(core.config['slurm.config'], 'slurm') files.restore(core.config['cgroup.config'], 'slurm') files.restore(core.config['cgroup_allowed_devices_file.conf'], 'slurm')
def test_01_stop_xrootd(self): if core.config['xrootd.gsi'] == "ON" and core.state['xrootd.backups-exist']: files.restore(core.config['xrootd.config'], "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") if not core.rpm_is_installed('xrootd-lcmaps'): files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd") core.skip_ok_unless_installed('xrootd', by_dependency=True) self.skip_ok_if(core.state['xrootd.started-server'], 'did not start server') service.check_stop(core.config['xrootd_service'])
def test_01_stop_cvmfs(self): core.skip_ok_unless_installed('cvmfs') self.skip_ok_if(['cvmfs.started-server'] == False, 'did not start server') try: for temp_dir in core.config['cvmfs.debug-dirs']: command = ('umount', temp_dir) core.check_system(command, 'Manual cvmfs unmount failed') files.remove(temp_dir, force=True) except KeyError: pass # tempdir was never created stdout, _, fail = core.check_system(('cvmfs_config', 'umount'), 'Stop Cvmfs server') self.assert_(stdout.find('FAILED') == -1, fail) files.restore("/etc/fuse.conf","cvmfs") files.restore("/etc/auto.master","cvmfs") files.restore("/etc/cvmfs/default.local","cvmfs") files.restore("/etc/cvmfs/domain.d/cern.ch.local","cvmfs")
def test_01_stop_xrootd(self): if core.config['xrootd.gsi'] == "ON" and core.state[ 'xrootd.backups-exist']: files.restore(core.config['xrootd.config'], "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") if not core.rpm_is_installed('xrootd-lcmaps'): files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd") core.skip_ok_unless_installed('xrootd', by_dependency=True) self.skip_ok_if(core.state['xrootd.started-server'], 'did not start server') service.check_stop(core.config['xrootd_service'])
def test_01_stop_slurm(self): self.slurm_reqs() self.skip_ok_unless( core.state['%s.started-service' % core.config['slurm.service-name']], 'did not start slurm') service.check_stop(core.config['slurm.service-name'] ) # service requires config so we stop it first service.check_stop(core.config['slurm.ctld-service-name']) files.restore(core.config['slurm.config'], 'slurm') files.restore(core.config['cgroup.config'], 'slurm') files.restore(core.config['cgroup_allowed_devices_file.conf'], 'slurm')
def test_01_stop_cvmfs(self): core.skip_ok_unless_installed('cvmfs') self.skip_ok_unless(core.state['cvmfs.started-server'], 'did not start server') try: for temp_dir in core.config['cvmfs.debug-dirs']: command = ('umount', temp_dir) core.check_system(command, 'Manual cvmfs unmount failed') files.remove(temp_dir, force=True) except KeyError: pass # tempdir was never created stdout, _, fail = core.check_system(('cvmfs_config', 'umount'), 'Stop Cvmfs server') self.assert_(stdout.find('FAILED') == -1, fail) files.restore("/etc/fuse.conf", "cvmfs") files.restore("/etc/auto.master", "cvmfs") files.restore("/etc/cvmfs/default.local", "cvmfs") files.restore("/etc/sysconfig/autofs", "cvmfs")
def test_01_stop_xrootd(self): if not core.rpm_is_installed('xrootd-server'): core.skip('not installed') return if core.state['xrootd.started-server'] == False: core.skip('did not start server') return command = ('service', 'xrootd', 'stop') stdout, _, fail = core.check_system(command, 'Stop Xrootd server') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(not os.path.exists(core.config['xrootd.pid-file']), 'Xrootd server PID file still present') if core.config['xrootd.gsi'] == "ON": files.restore('/etc/xrootd/xrootd-clustered.cfg', "xrootd") files.restore('/etc/xrootd/auth_file', "xrootd") files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd")
def test_01_stop_xrootd(self): if (core.config['xrootd.gsi'] == "ON") and (core.state['xrootd.backups-exist'] == True): files.restore('/etc/xrootd/xrootd-clustered.cfg',"xrootd") files.restore('/etc/xrootd/auth_file',"xrootd") files.restore('/etc/grid-security/xrd/xrdmapfile',"xrootd") core.skip_ok_unless_installed('xrootd', by_dependency=True) self.skip_ok_if(core.state['xrootd.started-server'] == False, 'did not start server') if core.el_release() < 7: command = ('service', 'xrootd', 'stop') stdout, _, fail = core.check_system(command, 'Stop Xrootd server') self.assert_(stdout.find('FAILED') == -1, fail) self.assert_(not os.path.exists(core.config['xrootd.pid-file']), 'Xrootd server PID file still present') else: core.check_system(('systemctl', 'stop', 'xrootd@clustered'), 'Stop Xrootd server') core.check_system(('systemctl', 'status', 'xrootd@clustered'), 'Verify Xrootd server stopped', exit=3)
def test_02_restore_configFile(self): core.skip_ok_unless_installed('myproxy-server') files.restore('/etc/myproxy-server.config', 'root')
def test_05_restore_mapfile(self): if core.state['system.wrote_mapfile']: files.restore(core.config['system.mapfile'], 'user')
def test_05_restore_mapfile(self): if core.state['system.wrote_mapfile']: files.restore(core.config['system.mapfile'], 'user')
def test_05_restore_sge_configFile(self): core.skip_ok_unless_installed(['osg-info-services', 'osg-ce-sge']) files.restore(core.config['osg-info-services.sge-file'], 'root')
def test_01_restore_lcmaps_after_glexec(self): core.skip_ok_unless_installed('glexec') files.restore('/etc/lcmaps.db', 'lcmaps')
def test_04_restore_lsf_configFile(self): core.skip_ok_unless_installed(['osg-info-services', 'osg-ce-lsf']) files.restore(core.config['osg-info-services.lsf-file'], 'root')
def test_01_restore_job_env(self): core.skip_ok_unless_installed('osg-configure') core.skip_ok_unless_one_installed(['htcondor-ce', 'globus-gatekeeper', 'condor']) files.restore(core.config['osg.job-environment'], owner='pbs') files.restore(core.config['osg.local-job-environment'], owner='pbs')
def test_06_restore_lcmaps(self): core.skip_ok_unless_installed('glexec', 'lcmaps-plugins-basic') self.skip_ok_unless(core.state['glexec.lcmaps_written'], 'did not write lcmaps.db for glexec tests') files.restore(core.config['lcmaps.db'], 'glexec')
def test_03_restore_pbs_configFile(self): core.skip_ok_unless_installed(['osg-info-services', 'osg-ce-pbs']) files.restore(core.config['osg-info-services.pbs-file'], 'root')
def test_02_restore_configFile(self): core.skip_ok_unless_installed('myproxy-server') files.restore('/etc/myproxy-server.config', 'root')
def test_12_restore_user_vo_map_file(self): core.skip_ok_unless_installed('gratia-service') if files.filesBackedup(core.config['gratia.user-vo-map'], 'root'): files.restore(core.config['gratia.user-vo-map'], 'root')
def test_13_restore_tomcat_template(self): if core.el_release() == 7: core.skip_ok_unless_installed(tomcat.pkgname(), 'gratia-service') files.restore(core.config['gratia.broken_template'], 'gratia')
def test_01_restore_basic_configFile(self): core.skip_ok_unless_installed('osg-info-services') core.skip_ok_unless_one_installed(*self.possible_rpms) files.restore(core.config['osg-info-services.storage-file'], 'root') files.restore(core.config['osg-info-services.squid-file'], 'root') files.restore(core.config['osg-info-services.misc-file'], 'root') files.restore(core.config['osg-info-services.gip-file'], 'root') files.restore(core.config['osg-info-services.siteinfo-file'], 'root') files.restore(core.config['osg-info-services.gratia-file'], 'root') files.restore(core.config['osg-info-services.gateway-file'], 'root')
def test_07_restore_user_vo_map_file(self): core.skip_ok_unless_installed('osg-info-services') core.skip_ok_unless_one_installed(*self.possible_rpms) if files.filesBackedup(core.config['osg-info-services.user-vo-map'], 'root'): files.restore(core.config['osg-info-services.user-vo-map'], 'root')