Exemple #1
0
    def test_02_condor_ce_run_condor(self):
        core.skip_ok_unless_installed('htcondor-ce', 'htcondor-ce-client',
                                      'htcondor-ce-condor', 'condor')

        self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running')
        self.skip_bad_unless(service.is_running('condor'),
                             'condor not running')
        self.skip_bad_unless(core.state['jobs.env-set'],
                             'job environment not set')
        self.skip_bad_unless(
            core.state['proxy.valid']
            or core.state['token.condor_write_created'],
            'requires a scitoken or a proxy')

        command = [
            'condor_ce_run', '--debug', '-r',
            '%s:9619' % core.get_hostname(), '/bin/env'
        ]

        if core.state['token.condor_write_created']:
            # FIXME: After HTCONDOR-636 is released (targeted for HTCondor-CE 5.1.2),
            # we can stop setting _condor_SCITOKENS_FILE
            for token_var in ('_condor_SCITOKENS_FILE', 'BEARER_TOKEN_FILE'):
                os.environ[token_var] = core.config['token.condor_write']
        else:
            core.log_message(
                'condor WRITE token not found; skipping SCITOKENS auth')

        self.run_job_in_tmp_dir(command, 'condor_ce_run a Condor job')
 def test_06_slurm_trace(self):
     self.general_requirements()
     core.skip_ok_unless_installed(core.SLURM_PACKAGES)
     self.skip_bad_unless(service.is_running('munge'), 'slurm requires munge')
     self.skip_bad_unless(core.state['condor-ce.schedd-ready'], 'CE schedd not ready to accept jobs')
     self.skip_ok_unless(service.is_running(core.config['slurm.service-name']), 'slurm service not running')
     self.run_blahp_trace('slurm')
Exemple #3
0
    def test_01_condor_run_pbs(self):
        core.skip_ok_unless_installed('condor', 'blahp')
        core.skip_ok_unless_installed('torque-mom',
                                      'torque-server',
                                      'torque-scheduler',
                                      by_dependency=True)
        self.skip_bad_unless(core.state['jobs.env-set'],
                             'job environment not set')
        self.skip_bad_unless(service.is_running('condor'),
                             'condor not running')
        self.skip_bad_unless(service.is_running('pbs_server'),
                             'pbs not running')

        command = ('condor_run', '-u', 'grid', '-a', 'grid_resource=pbs', '-a',
                   'periodic_remove=JobStatus==5', '/bin/env')

        # Figure out whether the installed BLAHP package is the same as or later
        # than "blahp-1.18.11.bosco-4.osg*" (in the RPM sense), because it's the
        # first build in which the job environments are correctly passed to PBS.
        # The release following "osg" does not matter and it is easier to ignore
        # the OS major version.  This code may be a useful starting point for a
        # more general library function.
        blahp_envra = core.get_package_envra('blahp')
        blahp_pbs_has_env_vars = (rpm.labelCompare(
            ['blahp', '1.18.11.bosco', '4.osg'], blahp_envra[1:4]) <= 0)

        self.run_job_in_tmp_dir(command,
                                'condor_run a Condor job',
                                verify_environment=blahp_pbs_has_env_vars)
Exemple #4
0
    def test_05_start_pbs(self):
        core.state['pbs_server.started-service'] = False
        core.state['torque.nodes-up'] = False

        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_bad_unless(service.is_running('trqauthd'), 'pbs_server requires trqauthd')
        self.skip_ok_if(service.is_running('pbs_server'), 'pbs server already running')

        server_log = '/var/log/torque/server_logs/' + date.today().strftime('%Y%m%d')
        try:
            server_log_stat = os.stat(server_log)
        except OSError:
            server_log_stat = None

        service.check_start('pbs_server')

        # Wait until the server is up before writing the rest of the config
        core.monitor_file(server_log, server_log_stat, '.*Server Ready.*', 60.0)
        core.check_system("echo '%s' | qmgr %s" % (self.pbs_config, core.get_hostname()),
                          "Configuring pbs server",
                          shell=True)

        # wait up to 5 minutes for the server to recognize the node
        start_time = time.time()
        while (time.time() - start_time) < 600:
            command = ('/usr/bin/qnodes', '-s', core.get_hostname())
            stdout, _, fail = core.check_system(command, 'Get pbs node info')
            self.assert_(stdout.find('error') == -1, fail)
            if stdout.find('state = free'):
                core.state['torque.nodes-up'] = True
                break
        if not core.state['torque.nodes-up']:
            self.fail('PBS nodes not coming up')
Exemple #5
0
    def test_04_configure_pbs(self):
        core.config[
            'torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes'
        core.config[
            'torque.pbs-serverdb'] = '/var/lib/torque/server_priv/serverdb'
        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_bad_unless(service.is_running('trqauthd'),
                             'pbs_server requires trqauthd')
        self.skip_ok_if(service.is_running('pbs_server'),
                        'pbs server already running')

        files.preserve(core.config['torque.pbs-serverdb'], 'pbs')
        if not os.path.exists(core.config['torque.pbs-serverdb']):
            command = (
                '/usr/sbin/pbs_server -d /var/lib/torque -t create -f && '
                'sleep 10 && /usr/bin/qterm')
            stdout, _, fail = core.check_system(
                command, 'create initial pbs serverdb config', shell=True)
            self.assert_(stdout.find('error') == -1, fail)

        # This gets wiped if we write it before the initial 'service pbs_server create'
        # However, this file needs to be in place before the service is started so we
        # restart the service after 'initial configuration'
        files.write(
            core.config[
                'torque.pbs-nodes-file'],  # add the local node as a compute node
            "%s np=1 num_node_boards=1\n" % core.get_hostname(),
            owner='pbs')
 def test_06_slurm_trace(self):
     self.general_requirements()
     core.skip_ok_unless_installed(core.SLURM_PACKAGES)
     self.skip_bad_unless(service.is_running('munge'), 'slurm requires munge')
     self.skip_bad_unless(core.state['condor-ce.schedd-ready'], 'CE schedd not ready to accept jobs')
     self.skip_ok_unless(service.is_running(core.config['slurm.service-name']), 'slurm service not running')
     self.run_blahp_trace('slurm')
Exemple #7
0
    def test_02_condor_ce_run_condor(self):
        core.skip_ok_unless_installed('htcondor-ce', 'htcondor-ce-client',
                                      'htcondor-ce-condor', 'condor')

        self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running')
        self.skip_bad_unless(service.is_running('condor'),
                             'condor not running')
        self.skip_bad_unless(core.state['jobs.env-set'],
                             'job environment not set')

        command = ('condor_ce_run', '-r', '%s:9619' % core.get_hostname(),
                   '/bin/env')
        self.run_job_in_tmp_dir(command, 'condor_ce_run a Condor job')
 def test_05_pbs_trace(self):
     core.skip_ok_unless_installed('torque-mom', 'torque-server', 'torque-scheduler', 'torque-client', 'munge',
                                   by_dependency=True)
     self.skip_ok_unless(service.is_running('pbs_server'), 'pbs service not running')
     self.check_schedd_ready()
     self.check_write_creds()
     self.run_blahp_trace('pbs')
Exemple #9
0
    def test_03_start_slurm(self):
        core.config['slurm.service-name'] = 'slurm'
        if core.el_release() == 7:
            core.config['slurm.service-name'] += 'd'
            core.config['slurm.ctld-service-name'] = 'slurmctld'
        core.state['%s.started-service' % core.config['slurm.service-name']] = False
        self.slurm_reqs()
        self.skip_ok_if(service.is_running(core.config['slurm.service-name']), 'slurm already running')

        stat = core.get_stat(CTLD_LOG)

        if core.el_release() == 7:
            # slurmctld is handled by /etc/init.d/slurm on EL6
            command = ['slurmctld']
            core.check_system(command, 'enable slurmctld')
            service.check_start(core.config['slurm.service-name'])
            service.check_start(core.config['slurm.ctld-service-name'])
        else:
            service.check_start(core.config['slurm.service-name'])

        core.monitor_file(CTLD_LOG,
                          stat,
                          'slurm_rpc_node_registration complete for %s' % SHORT_HOSTNAME,
                          60.0)
        log_stat = core.get_stat(SLURM_LOG)
        core.monitor_file(SLURM_LOG,
                          log_stat,
                          'slurmd started',
                          60.0)
        command = ['scontrol', 'update', 'nodename=%s' % SHORT_HOSTNAME, 'state=idle']
        core.check_system(command, 'enable slurm node')
    def test_04_start_condorce(self):
        if core.el_release() >= 7:
            core.config[
                'condor-ce.lockfile'] = '/var/lock/condor-ce/htcondor-ceLock'
        else:
            core.config['condor-ce.lockfile'] = '/var/lock/subsys/condor-ce'
        core.state['condor-ce.started-service'] = False
        core.state['condor-ce.schedd-ready'] = False

        core.skip_ok_unless_installed('condor', 'htcondor-ce',
                                      'htcondor-ce-client')
        core.config['condor-ce.collectorlog'] = condor.ce_config_val(
            'COLLECTOR_LOG')

        if service.is_running('condor-ce'):
            core.state['condor-ce.schedd-ready'] = True
            self.skip_ok('already running')

        stat = core.get_stat(core.config['condor-ce.collectorlog'])

        service.check_start('condor-ce', timeout=20)

        if condor.wait_for_daemon(core.config['condor-ce.collectorlog'], stat,
                                  'Schedd', 300.0):
            core.state['condor-ce.schedd-ready'] = True
Exemple #11
0
 def test_06_slurm_trace(self):
     core.skip_ok_unless_installed(core.SLURM_PACKAGES)
     self.skip_bad_unless(core.state['condor-ce.schedd-ready'], 'CE schedd not ready to accept jobs')
     self.skip_ok_unless(service.is_running(core.config['slurm.service-name']), 'slurm service not running')
     self.check_schedd_ready()
     self.check_write_creds()
     self.run_blahp_trace('slurm')
 def test_05_pbs_trace(self):
     self.general_requirements()
     self.skip_bad_unless(core.state['condor-ce.schedd-ready'], 'CE schedd not ready to accept jobs')
     core.skip_ok_unless_installed('torque-mom', 'torque-server', 'torque-scheduler', 'torque-client', 'munge',
                                   by_dependency=True)
     self.skip_ok_unless(service.is_running('pbs_server'), 'pbs service not running')
     self.run_blahp_trace('pbs')
Exemple #13
0
    def test_03_start_slurm(self):
        core.config['slurm.service-name'] = 'slurm'
        if core.el_release() == 7:
            core.config['slurm.service-name'] += 'd'
            core.config['slurm.ctld-service-name'] = 'slurmctld'
        core.state['%s.started-service' %
                   core.config['slurm.service-name']] = False
        self.slurm_reqs()
        self.skip_ok_if(service.is_running(core.config['slurm.service-name']),
                        'slurm already running')

        stat = core.get_stat(CTLD_LOG)

        if core.el_release() == 7:
            # slurmctld is handled by /etc/init.d/slurm on EL6
            command = ['slurmctld']
            core.check_system(command, 'enable slurmctld')
            service.check_start(core.config['slurm.service-name'])
            service.check_start(core.config['slurm.ctld-service-name'])
        else:
            service.check_start(core.config['slurm.service-name'])

        core.monitor_file(
            CTLD_LOG, stat,
            'slurm_rpc_node_registration complete for %s' % SHORT_HOSTNAME,
            60.0)
        log_stat = core.get_stat(SLURM_LOG)
        core.monitor_file(SLURM_LOG, log_stat, 'slurmd started', 60.0)
        command = [
            'scontrol', 'update',
            'nodename=%s' % SHORT_HOSTNAME, 'state=idle'
        ]
        core.check_system(command, 'enable slurm node')
 def test_05_pbs_trace(self):
     self.general_requirements()
     self.skip_bad_unless(core.state['condor-ce.schedd-ready'], 'CE schedd not ready to accept jobs')
     core.skip_ok_unless_installed('torque-mom', 'torque-server', 'torque-scheduler', 'torque-client', 'munge',
                                   by_dependency=True)
     self.skip_ok_unless(service.is_running('pbs_server'), 'pbs service not running')
     self.run_blahp_trace('pbs')
def start_xrootd(instance):
    svc = "xrootd@%s" % instance
    if not service.is_running(svc):
        try:
            service.check_start(svc, min_up_time=3)
        except Exception:
            core.system("tail -n 75 /var/log/xrootd/%s/xrootd.log" % instance,
                        shell=True)
            raise
    def test_01_start_gatekeeper(self):
        core.config['globus-gatekeeper.started-service'] = False
        core.state['globus-gatekeeper.running'] = False
        core.skip_ok_unless_installed('globus-gatekeeper')

        if not service.is_running('globus-gatekeeper'):
            # DEBUG: Set up gatekeeper debugging
            core.config['jobmanager-config'] = '/etc/globus/globus-gram-job-manager.conf'
            conf_path = core.config['jobmanager-config']
            files.append(conf_path, '-log-levels TRACE|DEBUG|FATAL|ERROR|WARN|INFO\n', owner='globus')
            files.append(conf_path, '-log-pattern /var/log/globus/gram_$(LOGNAME)_$(DATE).log\n', backup=False)

            if not os.path.exists('/var/log/globus'):
                os.mkdir('/var/log/globus')
                os.chmod('/var/log/globus', 0777)

            service.start('globus-gatekeeper')
            core.state['globus-gatekeeper.running'] = service.is_running('globus-gatekeeper')
            self.assert_(core.state['globus-gatekeeper.running'], 'globus-gatekeeper failed to start')
Exemple #17
0
    def test_01_start_munge(self):
        core.config['munge.keyfile'] = '/etc/munge/munge.key'
        core.state['munge.started-service'] = False
        core.skip_ok_unless_installed('munge')
        self.skip_ok_if(service.is_running('munge'), 'already running')

        files.preserve(core.config['munge.keyfile'], 'munge')
        command = ('/usr/sbin/create-munge-key', '-f',)
        stdout, _, fail = core.check_system(command, 'Create munge key')
        self.assert_(stdout.find('error') == -1, fail)
        service.check_start('munge')
Exemple #18
0
 def test_03_start_trqauthd(self):
     core.state['trqauthd.started-service'] = False
     core.config['torque.pbs-servername-file'] = '/var/lib/torque/server_name'
     core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
     self.skip_ok_if(service.is_running('trqauthd'), 'trqauthd is already running')
     # set hostname as servername instead of localhost
     # config required before starting trqauthd
     files.write(core.config['torque.pbs-servername-file'],
                 "%s" % core.get_hostname(),
                 owner='pbs')
     service.check_start('trqauthd')
Exemple #19
0
    def test_04_configure_pbs(self):
        core.config['torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes'
        core.config['torque.pbs-serverdb'] = '/var/lib/torque/server_priv/serverdb'
        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_bad_unless(service.is_running('trqauthd'), 'pbs_server requires trqauthd')
        self.skip_ok_if(service.is_running('pbs_server'), 'pbs server already running')

        files.preserve(core.config['torque.pbs-serverdb'], 'pbs')
        if not os.path.exists(core.config['torque.pbs-serverdb']):
            command = ('/usr/sbin/pbs_server -d /var/lib/torque -t create -f && '
                       'sleep 10 && /usr/bin/qterm')
            stdout, _, fail = core.check_system(command, 'create initial pbs serverdb config', shell=True)
            self.assert_(stdout.find('error') == -1, fail)

        # This gets wiped if we write it before the initial 'service pbs_server create'
        # However, this file needs to be in place before the service is started so we
        # restart the service after 'initial configuration'
        files.write(core.config['torque.pbs-nodes-file'], # add the local node as a compute node
                    "%s np=1 num_node_boards=1\n" % core.get_hostname(),
                    owner='pbs')
    def test_01_start_gridftp(self):
        core.state['gridftp.started-server'] = False
        core.state['gridftp.running-server'] = False

        core.skip_ok_unless_installed('globus-gridftp-server-progs')
        if service.is_running('globus-gridftp-server'):
            core.state['gridftp.running-server'] = True
            return

        service.check_start('globus-gridftp-server')
        core.state['gridftp.running-server'] = True
        core.state['gridftp.started-server'] = True
    def test_01_start_condor_cron(self):
        core.state['condor-cron.started-service'] = False
        core.state['condor-cron.running-service'] = False

        core.skip_ok_unless_installed('condor-cron')
        if service.is_running('condor-cron', timeout=1):
            core.state['condor-cron.running-service'] = True
            self.skip_ok('already running')

        service.check_start('condor-cron')

        core.state['condor-cron.started-service'] = True
        core.state['condor-cron.running-service'] = True
Exemple #22
0
 def test_03_start_trqauthd(self):
     core.state['trqauthd.started-service'] = False
     core.config[
         'torque.pbs-servername-file'] = '/var/lib/torque/server_name'
     core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
     self.skip_ok_if(service.is_running('trqauthd'),
                     'trqauthd is already running')
     # set hostname as servername instead of localhost
     # config required before starting trqauthd
     files.write(core.config['torque.pbs-servername-file'],
                 "%s" % core.get_hostname(),
                 owner='pbs')
     service.check_start('trqauthd')
    def test_01_start_condor_cron(self):
        core.state['condor-cron.started-service'] = False
        core.state['condor-cron.running-service'] = False

        core.skip_ok_unless_installed('condor-cron')
        if service.is_running('condor-cron', timeout=1):
            core.state['condor-cron.running-service'] = True
            self.skip_ok('already running')

        service.check_start('condor-cron')

        core.state['condor-cron.started-service'] = True
        core.state['condor-cron.running-service'] = True
Exemple #24
0
    def test_05_start_pbs(self):
        core.state['pbs_server.started-service'] = False
        core.state['torque.nodes-up'] = False

        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_bad_unless(service.is_running('trqauthd'),
                             'pbs_server requires trqauthd')
        self.skip_ok_if(service.is_running('pbs_server'),
                        'pbs server already running')

        server_log = '/var/log/torque/server_logs/' + date.today().strftime(
            '%Y%m%d')
        try:
            server_log_stat = os.stat(server_log)
        except OSError:
            server_log_stat = None

        service.check_start('pbs_server')

        # Wait until the server is up before writing the rest of the config
        core.monitor_file(server_log, server_log_stat, '.*Server Ready.*',
                          60.0)
        core.check_system("echo '%s' | qmgr %s" %
                          (self.pbs_config, core.get_hostname()),
                          "Configuring pbs server",
                          shell=True)

        # wait up to 5 minutes for the server to recognize the node
        start_time = time.time()
        while (time.time() - start_time) < 600:
            command = ('/usr/bin/qnodes', '-s', core.get_hostname())
            stdout, _, fail = core.check_system(command, 'Get pbs node info')
            self.assert_(stdout.find('error') == -1, fail)
            if stdout.find('state = free'):
                core.state['torque.nodes-up'] = True
                break
        if not core.state['torque.nodes-up']:
            self.fail('PBS nodes not coming up')
    def test_02_reconfigure_condor(self):
        core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client')
        self.skip_bad_unless(core.state['condor.running-service'], 'Condor not running')

        # Ensure that the Condor master is available for reconfig
        self.failUnless(condor.wait_for_daemon(core.config['condor.collectorlog'],
                                               core.config['condor.collectorlog_stat'],
                                               'Master',
                                               300.0),
                        'Condor Master not available for reconfig')

        command = ('condor_reconfig', '-debug')
        core.check_system(command, 'Reconfigure Condor')
        self.assert_(service.is_running('condor', timeout=10), 'Condor not running after reconfig')
Exemple #26
0
    def test_01_start_mom(self):
        core.state['pbs_mom.started-service'] = False
        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_ok_if(service.is_running('pbs_mom'), 'PBS mom already running')

        core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config'
        files.write(core.config['torque.mom-config'],
                    "$pbsserver %s\n" % core.get_hostname(),
                    owner='pbs')
        core.config['torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout'
        files.write(core.config['torque.mom-layout'],
                    "nodes=0",
                    owner='pbs')
        service.check_start('pbs_mom')
Exemple #27
0
    def test_01_start_munge(self):
        core.config['munge.keyfile'] = '/etc/munge/munge.key'
        core.state['munge.started-service'] = False
        core.skip_ok_unless_installed('munge')
        self.skip_ok_if(service.is_running('munge'), 'already running')

        files.preserve(core.config['munge.keyfile'], 'munge')
        command = (
            '/usr/sbin/create-munge-key',
            '-f',
        )
        stdout, _, fail = core.check_system(command, 'Create munge key')
        self.assert_(stdout.find('error') == -1, fail)
        service.check_start('munge')
Exemple #28
0
    def test_01_start_mom(self):
        core.state['pbs_mom.started-service'] = False
        core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
        self.skip_ok_if(service.is_running('pbs_mom'),
                        'PBS mom already running')

        core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config'
        files.write(core.config['torque.mom-config'],
                    "$pbsserver %s\n" % core.get_hostname(),
                    owner='pbs')
        core.config[
            'torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout'
        files.write(core.config['torque.mom-layout'], "nodes=0", owner='pbs')
        service.check_start('pbs_mom')
Exemple #29
0
    def setUp(self):
        # Enforce SciToken or GSI auth for testing
        os.environ['_condor_SEC_CLIENT_AUTHENTICATION_METHODS'] = 'SCITOKENS, GSI'
        core.skip_ok_unless_installed('condor', 'htcondor-ce')
        self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running')

        self.command = []
        if core.state['token.condor_write_created']:
            # FIXME: After HTCONDOR-636 is released (targeted for HTCondor-CE 5.1.2),
            # we can stop setting _condor_SCITOKENS_FILE
            for token_var in ('_condor_SCITOKENS_FILE',
                              'BEARER_TOKEN_FILE'):
                os.environ[token_var] = core.config['token.condor_write']
        else:
            core.log_message('condor WRITE token not found; skipping SCITOKENS auth')
Exemple #30
0
    def test_01_start_condor(self):
        core.state['condor.running-service'] = False

        core.skip_ok_unless_installed('condor')
        core.config['condor.collectorlog'] = condor.config_val('COLLECTOR_LOG')

        if service.is_running('condor'):
            core.state['condor.running-service'] = True
            return

        core.config['condor.collectorlog_stat'] = core.get_stat(
            core.config['condor.collectorlog'])

        service.check_start('condor')
        core.state['condor.started-service'] = True
        core.state['condor.running-service'] = True
    def test_02_stop_xrootd(self):
        if core.state['xrootd.backups-exist']:
            files.restore(core.config['xrootd.config'], "xrootd")
            files.restore(core.config['xrootd.logging-config'], "xrootd")
            files.restore(core.config['xrootd.authfile'], "xrootd")
            files.restore(xrootd.logfile("standalone"), "xrootd", ignore_missing=True)
            if "SCITOKENS" in core.config['xrootd.security']:
                files.restore('/etc/xrootd/scitokens.conf', "xrootd")
                files.remove("/etc/xrootd/config.d/99-osgtest-ztn.cfg", force=True)
            if os.path.exists(xrootd.ROOTDIR):
                shutil.rmtree(xrootd.ROOTDIR)

        # Get xrootd service back to its original state
        self.skip_ok_unless(core.state['xrootd.is-configured'], "xrootd is not configured")
        xrootd_service = core.config['xrootd_service']
        if service.is_running(xrootd_service):
            service.check_stop(xrootd_service, force=True)
        if core.state.get('xrootd.service-was-running', False):
            service.check_start(xrootd_service, force=True)
    def test_01_start_condor(self):
        core.state['condor.started-service'] = False
        core.state['condor.running-service'] = False

        core.skip_ok_unless_installed('condor')
        core.config['condor.collectorlog'] = condor.config_val('COLLECTOR_LOG')

        if service.is_running('condor'):
            core.state['condor.running-service'] = True
            return

        config_dirs = re.split(r'[, ]+', condor.config_val('LOCAL_CONFIG_DIR'))
        core.config['condor.personal_condor'] = join(config_dirs[-1], '99-personal-condor.conf')
        files.write(core.config['condor.personal_condor'], personal_condor_config, owner='condor', chmod=0o644)

        core.config['condor.collectorlog_stat'] = core.get_stat(core.config['condor.collectorlog'])

        service.check_start('condor')
        core.state['condor.started-service'] = True
        core.state['condor.running-service'] = True
    def test_04_start_condorce(self):
        if core.el_release() >= 7:
            core.config['condor-ce.lockfile'] = '/var/lock/condor-ce/htcondor-ceLock'
        else:
            core.config['condor-ce.lockfile'] = '/var/lock/subsys/condor-ce'
        core.state['condor-ce.started-service'] = False
        core.state['condor-ce.schedd-ready'] = False

        core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client')
        core.config['condor-ce.collectorlog'] = condor.ce_config_val('COLLECTOR_LOG')

        if service.is_running('condor-ce'):
            core.state['condor-ce.schedd-ready'] = True
            self.skip_ok('already running')

        service.check_start('condor-ce')

        stat = core.get_stat(core.config['condor-ce.collectorlog'])
        if condor.wait_for_daemon(core.config['condor-ce.collectorlog'], stat, 'Schedd', 300.0):
            core.state['condor-ce.schedd-ready'] = True
Exemple #34
0
    def test_08_start_xrootd(self):
        self.skip_ok_unless(core.state['xrootd.is-configured'], "xrootd is not configured")
        if core.config['xrootd.multiuser']:
            core.config['xrootd_service'] = "xrootd-privileged@standalone"
        else:
            core.config['xrootd_service'] = "xrootd@standalone"

        core.state['xrootd.service-was-running'] = False
        # Stop the service so it gets our new config
        if service.is_running(core.config['xrootd_service']):
            core.state['xrootd.service-was-running'] = True
            service.stop(core.config['xrootd_service'], force=True)
            time.sleep(5)

        # clear the logfile so it only contains our run
        if core.options.manualrun:
            files.preserve_and_remove(xrootd.logfile("standalone"), "xrootd")
        try:
            service.check_start(core.config['xrootd_service'], min_up_time=5)
        except Exception:
            xrootd.dump_log(125, "standalone")
            raise
    def test_05_start_bestman(self):
        core.config['bestman.pid-file'] = '/var/run/bestman2.pid'
        core.state['bestman.started-server'] = False
        core.state['bestman.server-running'] = False

        core.skip_ok_unless_installed('bestman2-server', 'bestman2-client', 'gums-service')
        if service.is_running('bestman2'):
            core.state['bestman.server-running'] = True
            self.skip_ok('bestman2 already running')

        # Dump the bestman logs into the test logs for debugging
        def _dump_logfiles():
            logdir = '/var/log/bestman2'
            for logfile in ('bestman2.log', 'event.srm.log'):
                core.system(('cat', os.path.join(logdir, logfile)))

        try:
            service.check_start('bestman2')
        except AssertionError:
            _dump_logfiles()
            raise
        core.state['bestman.started-server'] = True
        core.state['bestman.server-running'] = True
Exemple #36
0
def is_running():
    service.is_running('mysql', init_script=init_script())
Exemple #37
0
 def slurm_reqs(self):
     core.skip_ok_unless_installed(*core.SLURM_PACKAGES)
     self.skip_bad_unless(service.is_running('munge'), 'slurm requires munge')
Exemple #38
0
 def test_02_start_pbs_sched(self):
     core.state['pbs_sched.started-service'] = False
     core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
     self.skip_ok_if(service.is_running('pbs_sched'),
                     'PBS sched already running')
     service.check_start('pbs_sched')
 def test_03_start_cache(self):
     if not service.is_running("xrootd@stashcache-cache-server"):
         service.check_start("xrootd@stashcache-cache-server")
 def general_requirements(self):
     core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client')
     self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running')
 def skip_bad_unless_running(self, *services):
     for svc in services:
         self.skip_bad_unless(service.is_running(svc), "%s not running" % svc)
Exemple #42
0
def start_xrootd(instance):
    svc = "xrootd@%s" % instance
    if not service.is_running(svc):
        service.check_start(svc)
Exemple #43
0
 def test_02_start_pbs_sched(self):
     core.state['pbs_sched.started-service'] = False
     core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True)
     self.skip_ok_if(service.is_running('pbs_sched'), 'PBS sched already running')
     service.check_start('pbs_sched')
Exemple #44
0
 def slurm_reqs(self):
     core.skip_ok_unless_installed(*core.SLURM_PACKAGES)
     self.skip_bad_unless(service.is_running('munge'),
                          'slurm requires munge')
 def general_requirements(self):
     core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client')
     self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running')
 def test_02_start_origin(self):
     if not service.is_running("xrootd@stashcache-origin-server"):
         service.check_start("xrootd@stashcache-origin-server")