コード例 #1
0
    def test_01_start_munge(self):
        if core.el_release() == 5:
            core.config['munge.lockfile'] = '/var/lock/subsys/munge'
        elif core.el_release() == 6:
            core.config['munge.lockfile'] = '/var/lock/subsys/munged'
        core.config['munge.keyfile'] = '/etc/munge/munge.key'
        core.state['munge.running'] = False

        if core.missing_rpm(*self.required_rpms):
            return
        if os.path.exists(core.config['munge.lockfile']):
            core.skip('munge apparently running')
            return

        files.preserve(core.config['munge.keyfile'], 'pbs')
        command = (
            '/usr/sbin/create-munge-key',
            '-f',
        )
        stdout, _, fail = core.check_system(command, 'Create munge key')
        self.assert_(stdout.find('error') == -1, fail)
        command = ('service', 'munge', 'start')
        stdout, _, fail = core.check_system(command, 'Start munge daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['munge.lockfile']),
                     'munge lock file missing')
        core.state['munge.running'] = True
コード例 #2
0
ファイル: test_290_slurm.py プロジェクト: ddavila0/osg-test
    def test_03_start_slurm(self):
        core.config['slurm.service-name'] = 'slurm'
        if core.el_release() == 7:
            core.config['slurm.service-name'] += 'd'
            core.config['slurm.ctld-service-name'] = 'slurmctld'
        core.state['%s.started-service' %
                   core.config['slurm.service-name']] = False
        self.slurm_reqs()
        self.skip_ok_if(service.is_running(core.config['slurm.service-name']),
                        'slurm already running')

        stat = core.get_stat(CTLD_LOG)

        if core.el_release() == 7:
            # slurmctld is handled by /etc/init.d/slurm on EL6
            command = ['slurmctld']
            core.check_system(command, 'enable slurmctld')
            service.check_start(core.config['slurm.service-name'])
            service.check_start(core.config['slurm.ctld-service-name'])
        else:
            service.check_start(core.config['slurm.service-name'])

        core.monitor_file(
            CTLD_LOG, stat,
            'slurm_rpc_node_registration complete for %s' % SHORT_HOSTNAME,
            60.0)
        log_stat = core.get_stat(SLURM_LOG)
        core.monitor_file(SLURM_LOG, log_stat, 'slurmd started', 60.0)
        command = [
            'scontrol', 'update',
            'nodename=%s' % SHORT_HOSTNAME, 'state=idle'
        ]
        core.check_system(command, 'enable slurm node')
コード例 #3
0
    def test_02_start_mom(self):
        core.config['torque.mom-lockfile'] = '/var/lock/subsys/pbs_mom'
        core.state['torque.pbs-mom-running'] = False

        if core.missing_rpm(*self.required_rpms):
            return
        if os.path.exists(core.config['torque.mom-lockfile']):
            core.skip('pbs mom apparently running')
            return

        if core.el_release() == 5:
            core.config['torque.mom-config'] = '/var/torque/mom_priv/config'
        elif core.el_release() == 6:
            core.config[
                'torque.mom-config'] = '/var/lib/torque/mom_priv/config'
        else:
            core.skip('Distribution version not supported')

        files.write(core.config['torque.mom-config'],
                    "$pbsserver %s\n" % core.get_hostname(),
                    owner='pbs')

        command = ('service', 'pbs_mom', 'start')
        stdout, _, fail = core.check_system(command, 'Start pbs mom daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['torque.mom-lockfile']),
                     'PBS mom run lock file missing')
        core.state['torque.pbs-mom-running'] = True
コード例 #4
0
ファイル: test_290_slurm.py プロジェクト: brianhlin/osg-test
    def test_03_start_slurm(self):
        core.config['slurm.service-name'] = 'slurm'
        if core.el_release() == 7:
            core.config['slurm.service-name'] += 'd'
            core.config['slurm.ctld-service-name'] = 'slurmctld'
        core.state['%s.started-service' % core.config['slurm.service-name']] = False
        self.slurm_reqs()
        self.skip_ok_if(service.is_running(core.config['slurm.service-name']), 'slurm already running')

        stat = core.get_stat(CTLD_LOG)

        if core.el_release() == 7:
            # slurmctld is handled by /etc/init.d/slurm on EL6
            command = ['slurmctld']
            core.check_system(command, 'enable slurmctld')
            service.check_start(core.config['slurm.service-name'])
            service.check_start(core.config['slurm.ctld-service-name'])
        else:
            service.check_start(core.config['slurm.service-name'])

        core.monitor_file(CTLD_LOG,
                          stat,
                          'slurm_rpc_node_registration complete for %s' % SHORT_HOSTNAME,
                          60.0)
        log_stat = core.get_stat(SLURM_LOG)
        core.monitor_file(SLURM_LOG,
                          log_stat,
                          'slurmd started',
                          60.0)
        command = ['scontrol', 'update', 'nodename=%s' % SHORT_HOSTNAME, 'state=idle']
        core.check_system(command, 'enable slurm node')
コード例 #5
0
ファイル: tomcat.py プロジェクト: edquist/osg-test
def majorver():
    "Tomcat major version"
    if core.el_release() == 7:
        return 7
    if core.el_release() == 6:
        return 6
    else:
        return 5
コード例 #6
0
    def test_04_start_pbs(self):
        core.config['torque.pbs-lockfile'] = '/var/lock/subsys/pbs_server'
        core.state['torque.pbs-server-running'] = False
        core.state['torque.pbs-configured'] = False
        core.state['torque.nodes-up'] = False
        if core.el_release() == 5:
            core.config[
                'torque.pbs-nodes-file'] = '/var/torque/server_priv/nodes'
        elif core.el_release() == 6:
            core.config[
                'torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes'
        else:
            core.skip('Distribution version not supported')

        if core.missing_rpm(*self.required_rpms):
            return
        if os.path.exists(core.config['torque.pbs-lockfile']):
            core.skip('pbs server apparently running')
            return

        # add the local node as a compute node
        files.write(core.config['torque.pbs-nodes-file'],
                    "%s np=1\n" % core.get_hostname(),
                    owner='pbs')
        command = ('service', 'pbs_server', 'start')
        stdout, _, fail = core.check_system(command, 'Start pbs server daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['torque.pbs-lockfile']),
                     'pbs server run lock file missing')
        core.state['torque.pbs-server'] = True
        core.state['torque.pbs-server-running'] = True

        core.check_system("echo '%s' | qmgr %s" %
                          (self.pbs_config, core.get_hostname()),
                          "Configuring pbs server",
                          shell=True)
        core.state['torque.pbs-configured'] = True

        # wait up to 5 minutes for the server to come up and trigger a failure
        # if that doesn't happen
        start_time = time.time()
        while ((time.time() - start_time) < 600):
            command = ('/usr/bin/qnodes', '-s', core.get_hostname())
            stdout, _, fail = core.check_system(command, 'Get pbs node info')
            self.assert_(stdout.find('error') == -1, fail)
            if stdout.find('state = free'):
                core.state['torque.nodes-up'] = True
                break
        if not core.state['torque.nodes-up']:
            self.fail('PBS nodes not coming up')
コード例 #7
0
ファイル: special_install.py プロジェクト: brianhlin/osg-test
    def test_03_update_osg_release(self):
        core.state['install.release-updated'] = False
        if not core.options.updaterelease:
            return

        self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')

        command = ['rpm', '-e', 'osg-release']
        core.check_system(command, 'Erase osg-release')

        self.assert_(re.match('\d+\.\d+', core.options.updaterelease), "Unrecognized updaterelease format")
        rpm_url = 'https://repo.opensciencegrid.org/osg/' + core.options.updaterelease + '/osg-' + \
                  core.options.updaterelease + '-el' + str(core.el_release()) + '-release-latest.rpm'
        command = ['rpm', '-Uvh', rpm_url]
        core.check_system(command, 'Update osg-release')

        core.config['yum.clean_repos'] = ['osg'] + core.options.updaterepos
        yum.clean(*core.config['yum.clean_repos'])

        # If update repos weren't specified, just use osg-release
        if not core.options.updaterepos:
            core.options.updaterepos = ['osg']

        core.state['install.release-updated'] = True
        core.osg_release(update_state=True)
コード例 #8
0
    def test_04_start_condorce(self):
        if core.el_release() >= 7:
            core.config[
                'condor-ce.lockfile'] = '/var/lock/condor-ce/htcondor-ceLock'
        else:
            core.config['condor-ce.lockfile'] = '/var/lock/subsys/condor-ce'
        core.state['condor-ce.started-service'] = False
        core.state['condor-ce.schedd-ready'] = False

        core.skip_ok_unless_installed('condor', 'htcondor-ce',
                                      'htcondor-ce-client')
        core.config['condor-ce.collectorlog'] = condor.ce_config_val(
            'COLLECTOR_LOG')

        if service.is_running('condor-ce'):
            core.state['condor-ce.schedd-ready'] = True
            self.skip_ok('already running')

        stat = core.get_stat(core.config['condor-ce.collectorlog'])

        service.check_start('condor-ce', timeout=20)

        if condor.wait_for_daemon(core.config['condor-ce.collectorlog'], stat,
                                  'Schedd', 300.0):
            core.state['condor-ce.schedd-ready'] = True
コード例 #9
0
    def test_02_xrdcp_server_to_local(self):
        if core.missing_rpm('xrootd-server', 'xrootd-client'):
            return

        hostname = socket.getfqdn()
        temp_source_dir = tempfile.mkdtemp()
        temp_target_dir = tempfile.mkdtemp()
        os.chmod(temp_source_dir, 0777)
        os.chmod(temp_target_dir, 0777)
        f=open(temp_source_dir+"/copied_file.txt","w")
        f.write("This is some test data for an xrootd test.")
        f.close()
        xrootd_url = 'root://%s/%s/copied_file.txt' % (hostname, temp_source_dir)
        local_path = temp_target_dir + '/copied_file.txt'
        command = ('xrdcp', xrootd_url, local_path)

        status, stdout, stderr = core.system(command, True)
        
        fail = core.diagnose('Xrootd xrdcp copy, URL to local',
                             status, stdout, stderr)
        file_copied = os.path.exists(local_path)
        shutil.rmtree(temp_source_dir)
        shutil.rmtree(temp_target_dir)
        if core.el_release() != 6:
            self.assertEqual(status, 0, fail)
            self.assert_(file_copied, 'Copied file missing')
        else:
            self.assertEqual(status, 1, fail)
            self.assert_(not file_copied, 'Copied file exists')
コード例 #10
0
ファイル: service.py プロジェクト: efajardo/osg-test
def start(service_name):
    """
    Start a service via init script or systemd.

    'service_name' is used as the base of the keys in the core.config and
    core.state dictionaries.

    The service is started by doing "service service_name start" or "systemctl
    start service_name".

    The service is not started up if core.state[service_name.started-service] is
    True.

    The following globals are set:
    core.config[service_name.sentinel-file] is set to the value of sentinel_file,
    if specified.

    """
    if core.state.get(service_name + '.started-service'):
        core.skip('service ' + service_name + ' already running (flagged as started)')
        return

    if core.el_release() >= 7:
        command = ('systemctl', 'start', service_name)
    else:
        command = ('service', service_name, 'start')
    core.check_system(command, 'Start ' + service_name + ' service')
    core.state[service_name + '.started-service'] = True
コード例 #11
0
    def test_01_slurm_config(self):
        self.slurm_reqs()
        core.config['slurm.config-dir'] = '/etc/slurm'
        core.config['slurm.config'] = os.path.join(
            core.config['slurm.config-dir'], 'slurm.conf')
        files.write(core.config['slurm.config'],
                    SLURM_CONFIG.format(short_hostname=SHORT_HOSTNAME,
                                        cluster=CLUSTER_NAME,
                                        ctld_log=CTLD_LOG),
                    owner='slurm',
                    chmod=0o644)
        core.config['cgroup.config'] = os.path.join(
            core.config['slurm.config-dir'], 'cgroup.conf')
        config = SLURM_CGROUPS_CONFIG
        if core.el_release() == 6:
            config += "\nCgroupMountpoint=/cgroup"
        files.write(core.config['cgroup.config'],
                    config,
                    owner='slurm',
                    chmod=0o644)

        core.config['cgroup_allowed_devices_file.conf'] = os.path.join(
            core.config['slurm.config-dir'],
            'cgroup_allowed_devices_file.conf')
        files.write(core.config['cgroup_allowed_devices_file.conf'],
                    SLURM_CGROUPS_DEVICE_CONFIG,
                    owner='slurm',
                    chmod=0o644)
コード例 #12
0
ファイル: test_17_pbs.py プロジェクト: edquist/osg-test
    def test_02_start_mom(self):
        if core.el_release() <= 6:
            core.config['torque.mom-lockfile'] = '/var/lock/subsys/pbs_mom'
        else:
            core.config['torque.mom-lockfile'] = '/var/lib/torque/mom_priv/mom.lock'
        core.state['torque.pbs-mom-running'] = False

        core.skip_ok_unless_installed(*self.required_rpms)
        self.skip_ok_if(os.path.exists(core.config['torque.mom-lockfile']), 'pbs mom apparently running')

        core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config'
        files.write(core.config['torque.mom-config'],
                    "$pbsserver %s\n" % core.get_hostname(),
                    owner='pbs')
        core.config['torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout'
        files.write(core.config['torque.mom-layout'],
                    "nodes=0",
                    owner='pbs')

        command = ('service', 'pbs_mom', 'start')
        stdout, _, fail = core.check_system(command, 'Start pbs mom daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['torque.mom-lockfile']),
                     'PBS mom run lock file missing')
        core.state['torque.pbs-mom-running'] = True
コード例 #13
0
    def test_03_update_osg_release(self):
        core.state['install.release-updated'] = False
        if not core.options.updaterelease:
            return

        self.skip_bad_unless(core.state['install.success'],
                             'Install did not succeed')

        command = ['rpm', '-e', 'osg-release']
        core.check_system(command, 'Erase osg-release')

        self.assert_(re.match('\d+\.\d+', core.options.updaterelease),
                     "Unrecognized updaterelease format")
        rpm_url = 'https://repo.opensciencegrid.org/osg/' + core.options.updaterelease + '/osg-' + \
                  core.options.updaterelease + '-el' + str(core.el_release()) + '-release-latest.rpm'
        command = ['rpm', '-Uvh', rpm_url]
        core.check_system(command, 'Update osg-release')

        core.config['yum.clean_repos'] = ['osg'] + core.options.updaterepos
        yum.clean(*core.config['yum.clean_repos'])

        # If update repos weren't specified, just use osg-release
        if not core.options.updaterepos:
            core.options.updaterepos = ['osg']

        core.state['install.release-updated'] = True
        core.osg_release(update_state=True)
コード例 #14
0
    def test_01_slurm_config(self):
        self.slurm_reqs()
        core.config['slurm.config'] = '/etc/slurm/slurm.conf'
        files.write(core.config['slurm.config'],
                    SLURM_CONFIG % {
                        'short_hostname': SHORT_HOSTNAME,
                        'cluster': CLUSTER_NAME,
                        'ctld_log': CTLD_LOG
                    },
                    owner='slurm',
                    chmod=0o644)
        core.config['cgroup.config'] = '/etc/slurm/cgroup.conf'
        config = SLURM_CGROUPS_CONFIG
        if core.el_release() == 6:
            config += "\nCgroupMountpoint=/cgroup"
        files.write(core.config['cgroup.config'],
                    config,
                    owner='slurm',
                    chmod=0o644)

        core.config[
            'cgroup_allowed_devices_file.conf'] = '/etc/slurm/cgroup_allowed_devices_file.conf'
        files.write(core.config['cgroup_allowed_devices_file.conf'],
                    SLURM_CGROUPS_DEVICE_CONFIG,
                    owner='slurm',
                    chmod=0o644)
コード例 #15
0
def start(service_name):
    """
    Start a service via init script or systemd.

    'service_name' is used as the base of the keys in the core.config and
    core.state dictionaries.

    The service is started by doing "service service_name start" or "systemctl
    start service_name".

    The service is not started up if core.state[service_name.started-service] is
    True.

    The following globals are set:
    core.config[service_name.sentinel-file] is set to the value of sentinel_file,
    if specified.

    """
    if core.state.get(service_name + '.started-service'):
        core.skip('service ' + service_name +
                  ' already running (flagged as started)')
        return

    if core.el_release() >= 7:
        command = ('systemctl', 'start', service_name)
    else:
        command = ('service', service_name, 'start')
    core.check_system(command, 'Start ' + service_name + ' service')
    core.state[service_name + '.started-service'] = True
コード例 #16
0
def stop(service_name):
    """
    Stop a service via init script or systemd.

    'service_name' is used as the base of the keys in the core.config and
    core.state dictionaries.

    If we started the service, the service is stopped by doing "service
    service_name stop" or "systemctl stop service_name".

    Globals used:
    core.state[service_name.started-service] is used to determine if we started
    the service. After shutdown, this is set to False.

    """
    if not core.state.get(service_name + '.started-service'):
        core.skip('did not start service ' + service_name)
        return

    if core.el_release() >= 7:
        command = ('systemctl', 'stop', service_name)
    else:
        command = ('service', service_name, 'stop')
    core.check_system(command, 'Stop ' + service_name + ' service')
    core.state[service_name + '.started-service'] = False
コード例 #17
0
ファイル: service.py プロジェクト: efajardo/osg-test
def stop(service_name):
    """
    Stop a service via init script or systemd.

    'service_name' is used as the base of the keys in the core.config and
    core.state dictionaries.

    If we started the service, the service is stopped by doing "service
    service_name stop" or "systemctl stop service_name".

    Globals used:
    core.state[service_name.started-service] is used to determine if we started
    the service. After shutdown, this is set to False.

    """
    if not core.state.get(service_name + '.started-service'):
        core.skip('did not start service ' + service_name)
        return

    if core.el_release() >= 7:
        command = ('systemctl', 'stop', service_name)
    else:
        command = ('service', service_name, 'stop')
    core.check_system(command, 'Stop ' + service_name + ' service')
    core.state[service_name + '.started-service'] = False
コード例 #18
0
    def test_01_xrdcp_local_to_server(self):
        if core.missing_rpm('xrootd-server', 'xrootd-client'):
            return

        hostname = socket.getfqdn()
        if core.config['xrootd.gsi'] == "ON":
            temp_dir="/tmp/vdttest"
            if not os.path.exists(temp_dir):
                os.mkdir(temp_dir)
        else:
            temp_dir = tempfile.mkdtemp()
        os.chmod(temp_dir, 0777)
        xrootd_url = 'root://%s/%s/copied_file.txt' % (hostname, temp_dir)
        command = ('xrdcp', TestXrootd.__data_path , xrootd_url)

        status, stdout, stderr = core.system(command, True)

        fail = core.diagnose('xrdcp copy, local to URL',
                             status, stdout, stderr)
        file_copied = os.path.exists(os.path.join(temp_dir, 'copied_file.txt'))
        shutil.rmtree(temp_dir)
        if core.el_release() != 6:
            self.assertEqual(status, 0, fail)
            self.assert_(file_copied, 'Copied file missing')
        else:
            self.assertEqual(status, 1, fail)
            self.assert_(not file_copied, 'Copied file existed somehow')
コード例 #19
0
ファイル: test_290_slurm.py プロジェクト: ddavila0/osg-test
    def test_01_slurm_config(self):
        self.slurm_reqs()
        if core.PackageVersion('slurm') >= '19.05.2':
            core.config['slurm.config-dir'] = '/etc'
        else:
            core.config['slurm.config-dir'] = '/etc/slurm'
        core.config['slurm.config'] = os.path.join(
            core.config['slurm.config-dir'], 'slurm.conf')
        files.write(core.config['slurm.config'],
                    SLURM_CONFIG % {
                        'short_hostname': SHORT_HOSTNAME,
                        'cluster': CLUSTER_NAME,
                        'ctld_log': CTLD_LOG
                    },
                    owner='slurm',
                    chmod=0o644)
        core.config['cgroup.config'] = os.path.join(
            core.config['slurm.config-dir'], 'cgroup.conf')
        config = SLURM_CGROUPS_CONFIG
        if core.el_release() == 6:
            config += "\nCgroupMountpoint=/cgroup"
        files.write(core.config['cgroup.config'],
                    config,
                    owner='slurm',
                    chmod=0o644)

        core.config['cgroup_allowed_devices_file.conf'] = os.path.join(
            core.config['slurm.config-dir'],
            'cgroup_allowed_devices_file.conf')
        files.write(core.config['cgroup_allowed_devices_file.conf'],
                    SLURM_CGROUPS_DEVICE_CONFIG,
                    owner='slurm',
                    chmod=0o644)
コード例 #20
0
ファイル: test_22_myproxy.py プロジェクト: edquist/osg-test
 def test_03_config_myproxy(self):
     core.skip_ok_unless_installed('myproxy-server')
     conFileContents = files.read('/usr/share/osg-test/test_myproxy_server.config')
     files.write('/etc/myproxy-server.config',conFileContents, owner='root', backup=True)  
     if core.el_release() <= 6:
         core.config['myproxy.lock-file']='/var/lock/subsys/myproxy-server'
     else:
         core.config['myproxy.lock-file']='/var/run/myproxy-server/myproxy.pid'
コード例 #21
0
 def test_03_config_myproxy(self):
     core.skip_ok_unless_installed('myproxy-server')
     conFileContents = files.read('/usr/share/osg-test/test_myproxy_server.config')
     files.write('/etc/myproxy-server.config', conFileContents, owner='root', backup=True)
     if core.el_release() <= 6:
         core.config['myproxy.lock-file'] = '/var/lock/subsys/myproxy-server'
     else:
         core.config['myproxy.lock-file'] = '/var/run/myproxy-server/myproxy.pid'
コード例 #22
0
ファイル: test_23_gratia.py プロジェクト: sthapa/osg-test
 def test_10_fix_tomcat_template(self):
     # Fix EL7 bug in Gratia template
     if core.el_release() == 7:
         core.skip_ok_unless_installed(tomcat.pkgname(), 'gratia-service')
         core.config['gratia.broken_template'] = '/usr/share/gratia/server.xml.template'
         bad_line = r'\s+sSLImplementation=.*'
         fixed_line = ' '*15 + 'sslImplementationName="org.glite.security.trustmanager.tomcat.TMSSLImplementation"'
         files.replace_regexpr(core.config['gratia.broken_template'], bad_line, fixed_line, owner='gratia')
コード例 #23
0
ファイル: test_24_tomcat.py プロジェクト: edquist/osg-test
 def test_05_start_tomcat(self):
     core.skip_ok_unless_installed(tomcat.pkgname())
     
     if core.el_release() == 7:
         # tomcat on el7 doesn't seem to actually use its always-present pidfile...
         service.start('tomcat', init_script=tomcat.pkgname())
     else:
         service.start('tomcat', init_script=tomcat.pkgname(), sentinel_file=tomcat.pidfile())
コード例 #24
0
ファイル: test_740_slurm.py プロジェクト: brianhlin/osg-test
 def test_01_stop_slurm(self):
     self.slurm_reqs()
     self.skip_ok_unless(core.state['%s.started-service' % core.config['slurm.service-name']], 'did not start slurm')
     service.check_stop(core.config['slurm.service-name']) # service requires config so we stop it first
     if core.el_release() == 7:
         service.check_stop(core.config['slurm.ctld-service-name'])
     files.restore(core.config['slurm.config'], 'slurm')
     files.restore(core.config['cgroup.config'], 'slurm')
     files.restore(core.config['cgroup_allowed_devices_file.conf'], 'slurm')
コード例 #25
0
ファイル: test_15_xrootd.py プロジェクト: timtheisen/osg-test
    def test_03_start_xrootd(self):
        core.skip_ok_unless_installed('xrootd', by_dependency=True)
        if core.el_release() < 7:
            core.config['xrootd_service'] = "xrootd"
        else:
            core.config['xrootd_service'] = "xrootd@clustered"

        service.check_start(core.config['xrootd_service'])
        core.state['xrootd.started-server'] = True
コード例 #26
0
    def test_02_install_packages(self):
        core.state['install.success'] = False
        core.state['install.installed'] = []
        core.state['install.updated'] = []
        core.state['install.replace'] = []
        core.state['install.orphaned'] = []
        core.state['install.os_updates'] = []

        # Install packages
        core.state['install.transaction_ids'] = set()
        fail_msg = ''
        pkg_repo_dict = OrderedDict(
            (x, core.options.extrarepos) for x in core.options.packages)

        # HACK: Install x509-scitokens-issuer-client out of development (SOFTWARE-3649)
        x509_scitokens_issuer_packages = [
            'xrootd-scitokens', 'osg-tested-internal'
        ]
        for pkg in x509_scitokens_issuer_packages:
            if pkg in pkg_repo_dict:
                pkg_repo_dict["x509-scitokens-issuer-client"] = [
                    "osg-development"
                ]
                break

        # Special case: htcondor-ce-collector on EL8 needs mod_auth_oidc, only avaiable in a module
        if "htcondor-ce-collector" in pkg_repo_dict:
            if core.el_release() > 7:
                core.check_system(
                    ["dnf", "-y", "module", "enable", "mod_auth_openidc"],
                    "Enable mod_auth_openidc module")

        for pkg, repos in pkg_repo_dict.items():
            # Do not try to re-install packages
            if core.rpm_is_installed(pkg):
                continue

            # Attempt installation
            command = ['yum', '-y']
            command += ['--enablerepo=%s' % x for x in repos]
            command += ['install', pkg]

            retry_fail, _, stdout, _ = yum.retry_command(command)
            if retry_fail == '':  # the command succeeded
                core.state['install.transaction_ids'].add(
                    yum.get_transaction_id())
                if not pkg.startswith("/"):
                    # ^^ rpm --verify doesn't work if you asked for a file instead of a package
                    command = ('rpm', '--verify', pkg)
                    core.check_system(command, 'Verify %s' % (pkg))
                yum.parse_output_for_packages(stdout)

            fail_msg += retry_fail

        if fail_msg:
            self.fail(fail_msg)
        core.state['install.success'] = True
コード例 #27
0
ファイル: test_150_xrootd.py プロジェクト: brianhlin/osg-test
    def test_04_start_xrootd(self):
        core.skip_ok_unless_installed('xrootd', by_dependency=True)
        if core.el_release() < 7:
            core.config['xrootd_service'] = "xrootd"
        elif core.config['xrootd.multiuser']:
            core.config['xrootd_service'] = "xrootd-privileged@clustered"
        else:
            core.config['xrootd_service'] = "xrootd@clustered"

        service.check_start(core.config['xrootd_service'])
        core.state['xrootd.started-server'] = True
コード例 #28
0
def status(service_name):
    """
    Return exit code of the 'service_name' init script or systemd status check
    """
    if core.el_release() >= 7:
        command = ('systemctl', 'is-active', service_name)
    else:
        command = ('service', service_name, 'status')

    status_rc, _, _ = core.system(command)
    return status_rc
コード例 #29
0
ファイル: service.py プロジェクト: efajardo/osg-test
def status(service_name):
    """
    Return exit code of the 'service_name' init script or systemd status check
    """
    if core.el_release() >= 7:
        command = ('systemctl', 'is-active', service_name)
    else:
        command = ('service', service_name, 'status')

    status_rc, _, _ = core.system(command)
    return status_rc
コード例 #30
0
ファイル: test_150_xrootd.py プロジェクト: ddavila0/osg-test
    def test_04_start_xrootd(self):
        core.skip_ok_unless_installed('xrootd', 'globus-proxy-utils', by_dependency=True)
        if core.el_release() < 7:
            core.config['xrootd_service'] = "xrootd"
        elif core.config['xrootd.multiuser']:
            core.config['xrootd_service'] = "xrootd-privileged@standalone"
        else:
            core.config['xrootd_service'] = "xrootd@standalone"

        service.check_start(core.config['xrootd_service'])
        core.state['xrootd.started-server'] = True
コード例 #31
0
    def test_01_set_config(self):
        port = core.config['gsisshd.port'] = '2222'
        core.state['gsisshd.can-run'] = (
            not (core.el_release() >= 7 and core.state['selinux.mode']
                 and not core.rpm_is_installed('policycoreutils-python')))
        self.skip_ok_unless(
            core.state['gsisshd.can-run'],
            "Can't run with SELinux on EL >= 7 without policycoreutils-python")

        files.write(SSHD_CONFIG,
                    SSHD_CONFIG_TEXT % {'port': port},
                    owner='gsissh',
                    chmod=0600)
コード例 #32
0
    def test_01_set_config(self):
        port = core.config['gsisshd.port'] = '2222'
        core.state['gsisshd.can-run'] = (
            not (core.el_release() >= 7 and core.state['selinux.mode']
                 and not core.dependency_is_installed("/usr/sbin/semanage")))
        self.skip_ok_unless(
            core.state['gsisshd.can-run'],
            "Can't run with SELinux on EL >= 7 without semanage")

        files.write(SSHD_CONFIG,
                    SSHD_CONFIG_TEXT % {'port': port},
                    owner='gsissh',
                    chmod=0o600)
コード例 #33
0
 def test_01_stop_slurm(self):
     self.slurm_reqs()
     self.skip_ok_unless(
         core.state['%s.started-service' %
                    core.config['slurm.service-name']],
         'did not start slurm')
     service.check_stop(core.config['slurm.service-name']
                        )  # service requires config so we stop it first
     if core.el_release() == 7:
         service.check_stop(core.config['slurm.ctld-service-name'])
     files.restore(core.config['slurm.config'], 'slurm')
     files.restore(core.config['cgroup.config'], 'slurm')
     files.restore(core.config['cgroup_allowed_devices_file.conf'], 'slurm')
コード例 #34
0
ファイル: special_cleanup.py プロジェクト: edquist/osg-test
    def test_01_downgrade_osg_release(self):
        if not core.options.updaterelease:
            return

        self.skip_bad_unless(core.state['install.release-updated'], 'release not updated')

        command = ['rpm', '-e', 'osg-release']
        core.check_system(command, 'Erase osg-release')

        rpm_url = 'http://repo.grid.iu.edu/osg/' + core.config['install.original-release-ver']+ '/osg-' + \
            core.config['install.original-release-ver'] + '-el' + str(core.el_release()) + '-release-latest.rpm'
        command = ['rpm', '-Uvh', rpm_url]
        core.check_system(command, 'Downgrade osg-release')
コード例 #35
0
    def test_09_start_voms(self):
        core.state['voms.started-server'] = False

        voms.skip_ok_unless_installed()
        self.skip_ok_if(os.path.exists(core.config['voms.lock-file']), 'apparently running')

        if core.el_release() < 7:
            core.config['voms_service'] = 'voms'
        else:
            core.config['voms_service'] = 'voms@' + core.config['voms.vo']

        service.check_start(core.config['voms_service'])

        core.state['voms.started-server'] = True
コード例 #36
0
ファイル: test_17_pbs.py プロジェクト: edquist/osg-test
    def test_01_start_munge(self):
        if core.el_release() == 5:
            core.config['munge.lockfile'] = '/var/lock/subsys/munge'
        elif core.el_release() == 6:
            core.config['munge.lockfile'] = '/var/lock/subsys/munged'
        elif core.el_release() == 7:
            core.config['munge.lockfile'] = '/var/run/munge/munged.pid'
        core.config['munge.keyfile'] = '/etc/munge/munge.key'
        core.state['munge.running'] = False

        core.skip_ok_unless_installed(*self.required_rpms)
        self.skip_ok_if(os.path.exists(core.config['munge.lockfile']), 'already running')

        files.preserve(core.config['munge.keyfile'], 'pbs')
        command = ('/usr/sbin/create-munge-key', '-f',)
        stdout, _, fail = core.check_system(command, 'Create munge key')
        self.assert_(stdout.find('error') == -1, fail)
        command = ('service', 'munge', 'start')
        stdout, _, fail = core.check_system(command, 'Start munge daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['munge.lockfile']),
                     'munge lock file missing')
        core.state['munge.running'] = True
コード例 #37
0
    def test_01_set_config(self):
        port = core.config['gsisshd.port'] = '2222'
        core.state['gsisshd.can-run'] = (not (
            core.el_release() >= 7 and
            core.state['selinux.mode'] and
            not core.rpm_is_installed('policycoreutils-python')))
        self.skip_ok_unless(core.state['gsisshd.can-run'],
                            "Can't run with SELinux on EL >= 7 without policycoreutils-python")

        files.write(
            SSHD_CONFIG,
            SSHD_CONFIG_TEXT % {'port': port},
            owner='gsissh',
            chmod=0o600)
コード例 #38
0
    def test_09_start_voms(self):
        core.state['voms.started-server'] = False

        voms.skip_ok_unless_installed()
        self.skip_ok_if(os.path.exists(core.config['voms.lock-file']), 'apparently running')

        if core.el_release() < 7:
            core.config['voms_service'] = 'voms'
        else:
            core.config['voms_service'] = 'voms@' + core.config['voms.vo']

        service.check_start(core.config['voms_service'])

        core.state['voms.started-server'] = True
コード例 #39
0
ファイル: test_840_xrootd.py プロジェクト: djw8605/osg-test
 def test_01_stop_xrootd(self):
     if core.state['xrootd.backups-exist']:
         files.restore(core.config['xrootd.config'], "xrootd")
         files.restore('/etc/xrootd/auth_file', "xrootd")
         if not core.rpm_is_installed('xrootd-lcmaps'):
             files.restore('/etc/grid-security/xrd/xrdmapfile', "xrootd")
         if core.el_release() < 7:
             files.restore(core.config['xrootd.service-defaults'], "xrootd")
     core.skip_ok_unless_installed('xrootd',
                                   'globus-proxy-utils',
                                   by_dependency=True)
     self.skip_ok_if(core.state['xrootd.started-server'],
                     'did not start server')
     service.check_stop(core.config['xrootd_service'])
     files.remove(core.config['xrootd.tmp-dir'], force=True)
コード例 #40
0
ファイル: special_cleanup.py プロジェクト: edquist/osg-test
    def test_02_obsoleting_packages(self):
        # If packages were obsoleted in upgrade, remove the packages that obsoleted them
        # Also skip if we didn't install anything
        if core.el_release() > 5 or len(core.options.packages) == 0:
            return
        self.skip_ok_unless(core.state['install.replace'], 'no packages were replaced')

        # This also removes any package that required the obsoleted packages! If we're not
        # supposed to be removing these packages, they will be considered
        # orphaned and reinstalled in test_04_orphaned_packages
        command = ['yum', '-y', 'remove'] + core.state['install.replace']
        fail_msg, _, stdout, _ = yum.retry_command(command)
        if fail_msg:
            self.fail(fail_msg)
        yum.parse_output_for_packages(stdout)
コード例 #41
0
ファイル: test_30_misc.py プロジェクト: edquist/osg-test
    def test_03_lfc_multilib(self):
        # We do not ship lfc-* in OSG 3.3
        self.skip_ok_if(core.osg_release().split('.') >= ['3','3'], message='OSG 3.3+')
        # We do not build 32-bit packages on EL7
        self.skip_ok_if(core.el_release() >= 7, message='running on EL7+')

        core.skip_ok_unless_installed('yum-utils')

        # We can't test this on 32-bit
        uname_out, _, _ = core.check_system(['uname', '-i'], 'getting arch')
        self.skip_ok_if(re.search(r'i\d86', uname_out), message='running on 32-bit')

        cmdbase = ['repoquery', '--plugins']
        for repo in core.options.extrarepos:
            cmdbase.append('--enablerepo=%s' % repo)

        # Find the 32-bit lfc-python rpm
        stdout, _, _ = core.check_system(cmdbase + ['lfc-python.i386'], 'lfc-python multilib (32bit)')
        if stdout.strip() == '':
            self.fail('32-bit lfc-python not found in 64-bit repo')

        # Sanity check: find the 64-bit lfc-python rpm
        stdout, _, _ = core.check_system(cmdbase + ['lfc-python.x86_64'], 'lfc-python multilib (64bit)')
        if stdout.strip() == '':
            self.fail('64-bit lfc-python not found in 64-bit repo')

        # Find the 32-bit lfc-python26 rpm (on el5 only)
        if core.el_release() == 5:
            stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.i386'], 'lfc-python26 multilib (32bit)')
            if stdout.strip() == '':
                self.fail('32-bit lfc-python not found in 64-bit repo')

            # Sanity check: find the 64-bit lfc-python26 rpm
            stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.x86_64'], 'lfc-python26 multilib (64bit)')
            if stdout.strip() == '':
                self.fail('64-bit lfc-python not found in 64-bit repo')
コード例 #42
0
ファイル: voms.py プロジェクト: brianhlin/osg-test
def is_installed():
    """Return True if the dependencies for setting up and using VOMS are installed.
    EL7 requires a minimum version of the voms-server package to get the service file fix from SOFTWARE-2357.
    """
    for dep in 'voms-server', 'voms-clients', 'voms-mysql-plugin', mysql.client_rpm(), mysql.server_rpm():
        if not core.dependency_is_installed(dep):
            return False

    # TODO: drop this check when 3.3 is completely EOL
    if core.el_release() >= 7:
        if core.PackageVersion('voms-server') < '2.0.12-3.2':
            core.log_message("voms-server installed but too old (missing SOFTWARE-2357 fix)")
            return False

    return True
コード例 #43
0
ファイル: condor.py プロジェクト: edquist/osg-test
def lockfile_path():
    """The path to the condor lockfile (EL5 and EL6 only)
    Returns None on EL7.

    """
    if core.el_release() >= 7:
        return None

    condor_lockfile = '/var/lock/subsys/condor_master'
    # The name of the lockfile changed in 7.8.8
    if core.rpm_is_installed('condor'):
        condor_version = core.get_package_envra('condor')[2]
        condor_version_split = condor_version.split('.')
        if condor_version_split >= ['7', '8', '8']:
            condor_lockfile = '/var/lock/subsys/condor'
    return condor_lockfile
コード例 #44
0
    def test_01_downgrade_osg_release(self):
        if not core.options.updaterelease:
            return

        self.skip_bad_unless(core.state['install.release-updated'],
                             'release not updated')

        command = ['rpm', '-e', 'osg-release']
        core.check_system(command, 'Erase osg-release')

        rpm_url = 'https://repo.opensciencegrid.org/osg/' + core.config['install.original-release-ver']+ '/osg-' + \
            core.config['install.original-release-ver'] + '-el' + str(core.el_release()) + '-release-latest.rpm'
        command = ['rpm', '-Uvh', rpm_url]
        core.check_system(command, 'Downgrade osg-release')

        yum.clean(*core.config['yum.clean_repos'])
コード例 #45
0
    def test_02_start_seg(self):
        core.state['globus.started-seg'] = False
        core.config['globus.seg-lockfile'] = '/var/lock/subsys/globus-scheduler-event-generator'

        core.skip_ok_unless_installed('globus-scheduler-event-generator-progs')
        # globus-job-run against PBS hangs with the SEG so we disable it and use
        # globus-grid-job-manager-pbs-setup-poll instead
        # https://jira.opensciencegrid.org/browse/SOFTWARE-1929
        self.skip_ok_if(core.el_release() == 5, 'Disable the SEG for EL5')
        self.skip_ok_if(os.path.exists(core.config['globus.seg-lockfile']), 'SEG already running')
        command = ('service', 'globus-scheduler-event-generator', 'start')
        stdout, _, fail = core.check_system(command, 'Start Globus SEG')
        self.assert_(stdout.find('FAILED') == -1, fail)
        self.assert_(os.path.exists(core.config['globus.seg-lockfile']),
                     'Globus SEG run lock file missing')
        core.state['globus.started-seg'] = True
コード例 #46
0
ファイル: test_17_pbs.py プロジェクト: edquist/osg-test
    def test_03_start_pbs_sched(self):
        if core.el_release() <= 6:
            core.config['torque.sched-lockfile'] = '/var/lock/subsys/pbs_sched'
        else:
            core.config['torque.sched-lockfile'] = '/var/lib/torque/sched_priv/sched.lock'
        core.state['torque.pbs-sched-running'] = False

        core.skip_ok_unless_installed(*self.required_rpms)
        self.skip_ok_if(os.path.exists(core.config['torque.sched-lockfile']), 'pbs scheduler apparently running')

        command = ('service', 'pbs_sched', 'start')
        stdout, _, fail = core.check_system(command, 'Start pbs scheduler daemon')
        self.assert_(stdout.find('error') == -1, fail)
        self.assert_(os.path.exists(core.config['torque.sched-lockfile']),
                     'pbs sched run lock file missing')
        core.state['torque.pbs-sched-running'] = True
コード例 #47
0
ファイル: test_150_xrootd.py プロジェクト: ddavila0/osg-test
    def test_01_configure_xrootd(self):
        core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid'
        core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem'
        core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem'
        if core.rpm_is_installed('osg-xrootd-standalone'):
            # rootdir and resourcename needs to be set early for the default osg-xrootd config
            core.config['xrootd.config'] = '/etc/xrootd/config.d/10-osg-test.cfg'
        else:
            core.config['xrootd.config'] = '/etc/xrootd/config.d/99-osg-test.cfg'
        core.config['xrootd.service-defaults'] = '/etc/sysconfig/xrootd'
        core.config['xrootd.multiuser'] = False
        core.state['xrootd.started-server'] = False
        core.state['xrootd.backups-exist'] = False

        self.skip_ok_unless(core.options.adduser, 'user not created')
        core.skip_ok_unless_installed('xrootd', 'globus-proxy-utils', by_dependency=True)

        user = pwd.getpwnam("xrootd")
        core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0o644)
        core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0o400)

        if core.rpm_is_installed('osg-xrootd-standalone'):
            core.log_message("Using osg-xrootd configuration")
            xrootd_config = META_XROOTD_CFG_TEXT
        else:
            lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms')
            if all([core.rpm_is_installed(x) for x in lcmaps_packages]):
                core.log_message("Using xrootd-lcmaps authentication")
                sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:loglevel=5,policy=authorize_only'
            else:
                core.log_message("Using XRootD mapfile authentication")
                sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile'
                files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'],
                            owner="xrootd",
                            chown=(user.pw_uid, user.pw_gid))
            xrootd_config = XROOTD_CFG_TEXT % sec_protocol

        files.write(core.config['xrootd.config'], xrootd_config, owner='xrootd', backup=True, chmod=0o644)

        if core.el_release() < 7:
            files.write(core.config['xrootd.service-defaults'], SYSCONFIG_TEXT,
                        owner="xrootd", chown=(user.pw_uid, user.pw_gid), chmod=0o644)

        authfile = '/etc/xrootd/auth_file'
        files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid))

        core.state['xrootd.backups-exist'] = True
コード例 #48
0
ファイル: special_cleanup.py プロジェクト: edquist/osg-test
    def test_03_remove_packages(self):
        # We didn't ask to install anything
        if len(core.options.packages) == 0:
            return

        # Nothing actually got installed
        if len(core.state['install.installed']) == 0:
            core.log_message('No packages installed')
            return

        el_version = core.el_release()

        if el_version >= 6:
            # Rolling back is a lot more reliable in yum post EL5
            core.state['install.transaction_ids'].reverse()
            for transaction in core.state['install.transaction_ids']:
                command = ['yum', 'history', 'undo', '-y', transaction]
                for repo in core.options.extrarepos:
                    command.append('--enablerepo=%s' % repo)
                fail_msg, _, stdout, _ = yum.retry_command(command)
                if fail_msg:
                    self.fail(fail_msg)
        elif el_version == 5:
            # rpm -Uvh --rollback was very finicky so we had to
            # spin up our own method of rolling back installations
            if len(core.state['install.updated']) != 0:
                command = ['yum', 'downgrade', '-y'] + core.state['install.updated']
                fail_msg, _, stdout, _ = yum.retry_command(command)
                if fail_msg:
                    self.fail(fail_msg)
                # Remove packages from install list that were brought in as deps for `yum update`
                yum.parse_output_for_packages(stdout)

            if len(core.state['install.installed']) != 0:
                for pkg in core.state['install.os_updates']:
                    try:
                        core.state['install.installed'].remove(pkg)
                    except ValueError:
                        pass # it was already removed from under us
                rpm_erase_list = self.list_special_install_rpms(core.state['install.installed'])
                package_count = len(rpm_erase_list)
                command = ['rpm', '--quiet', '--erase'] + rpm_erase_list
                core.check_system(command, 'Remove %d packages' % (package_count))
            else:
                core.log_message('No new RPMs')
                return
コード例 #49
0
ファイル: special_install.py プロジェクト: edquist/osg-test
    def test_04_update_packages(self):
        if not (core.options.updaterepos and core.state['install.installed']):
            return
        
        self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')

        # Update packages
        command = ['yum', 'update', '-y']
        for repo in core.options.updaterepos:
            command.append('--enablerepo=%s' % repo)
        fail_msg, status, stdout, stderr = yum.retry_command(command)
        yum.parse_output_for_packages(stdout)

        if fail_msg:
            self.fail(fail_msg)
        else:
            if core.el_release() >=6:
                core.state['install.transaction_ids'].append(yum.get_transaction_id())
コード例 #50
0
ファイル: test_84_xrootd.py プロジェクト: edquist/osg-test
    def test_01_stop_xrootd(self):
        if (core.config['xrootd.gsi'] == "ON") and (core.state['xrootd.backups-exist'] == True):
            files.restore('/etc/xrootd/xrootd-clustered.cfg',"xrootd")
            files.restore('/etc/xrootd/auth_file',"xrootd")
            files.restore('/etc/grid-security/xrd/xrdmapfile',"xrootd")
        core.skip_ok_unless_installed('xrootd', by_dependency=True)
        self.skip_ok_if(core.state['xrootd.started-server'] == False, 'did not start server')

        if core.el_release() < 7:
            command = ('service', 'xrootd', 'stop')
            stdout, _, fail = core.check_system(command, 'Stop Xrootd server')
            self.assert_(stdout.find('FAILED') == -1, fail)
            self.assert_(not os.path.exists(core.config['xrootd.pid-file']),
                         'Xrootd server PID file still present')
        else:
            core.check_system(('systemctl', 'stop', 'xrootd@clustered'), 'Stop Xrootd server')

            core.check_system(('systemctl', 'status', 'xrootd@clustered'), 'Verify Xrootd server stopped', exit=3)
コード例 #51
0
ファイル: test_15_xrootd.py プロジェクト: edquist/osg-test
    def test_01_start_xrootd(self):
        core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid'
        core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem'
        core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem'
        core.config['xrootd.gsi'] = "ON"
        core.state['xrootd.started-server'] = False
        core.state['xrootd.backups-exist'] = False

        self.skip_ok_unless(core.options.adduser, 'user not created')
        vdt_pw = pwd.getpwnam(core.options.username)
        core.config['certs.usercert'] = os.path.join(vdt_pw.pw_dir, '.globus', 'usercert.pem')
        core.skip_ok_unless_installed('xrootd', by_dependency=True)

        # Determine xrootd package name
        if core.rpm_is_installed('xrootd4'):
            core.config['xrootd.package'] = 'xrootd4'
        elif core.rpm_is_installed('xrootd'):
            core.config['xrootd.package'] = 'xrootd'

        user = pwd.getpwnam("xrootd")
        if core.config['xrootd.gsi'] == "ON":
            core.skip_ok_unless_installed('globus-proxy-utils')
            core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0644)
            core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0400)

            cfgfile = '/etc/xrootd/xrootd-clustered.cfg'
            files.append(cfgfile, XROOTD_CFG_TEXT, owner='xrootd', backup=True)
            authfile = '/etc/xrootd/auth_file'
            files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid))

            files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'],
                        owner="xrootd",
                        chown=(user.pw_uid, user.pw_gid))
            core.state['xrootd.backups-exist'] = True

        if core.el_release() < 7:
            stdout, _, fail = core.check_system(('service', 'xrootd', 'start'), 'Start Xrootd server')
            self.assert_('FAILED' not in stdout, fail)
            self.assert_(os.path.exists(core.config['xrootd.pid-file']), 'Xrootd server PID file missing')
        else:
            core.check_system(('systemctl', 'start', 'xrootd@clustered'), 'Start Xrootd server')
            core.check_system(('systemctl', 'status', 'xrootd@clustered'), 'Verify status of Xrootd server')

        core.state['xrootd.started-server'] = True
コード例 #52
0
    def test_01_start_xrootd(self):
        core.config['xrootd.pid-file']='/var/run/xrootd/xrootd-default.pid'
        core.config['certs.hostcert'] = '/etc/grid-security/hostcert.pem'
        core.config['certs.hostkey'] = '/etc/grid-security/hostkey.pem'
        core.config['certs.xrootdcert']='/etc/grid-security/xrd/xrdcert.pem'
        core.config['certs.xrootdkey']='/etc/grid-security/xrd/xrdkey.pem'
        core.config['xrootd.gsi']="ON"
        core.state['xrootd.started-server'] = False
        if not core.rpm_is_installed('xrootd-server'):
            core.skip('not installed')
            return
        user=pwd.getpwnam("xrootd")

        if core.config['xrootd.gsi'] == "ON":
            self.install_cert('certs.xrootdcert', 'certs.hostcert', 
                'xrootd', 0644)
            self.install_cert('certs.xrootdkey', 'certs.hostkey', 
                'xrootd', 0400)

            cfgfile='/etc/xrootd/xrootd-clustered.cfg'
            cfgtext='cms.space min 2g 5g\n'
            cfgtext=cfgtext+'xrootd.seclib /usr/lib64/libXrdSec.so\n'
            cfgtext=cfgtext+'sec.protocol /usr/lib64 gsi -certdir:/etc/grid-security/certificates -cert:/etc/grid-security/xrd/xrdcert.pem -key:/etc/grid-security/xrd/xrdkey.pem -crl:3 -gridmap:/etc/grid-security/xrd/xrdmapfile --gmapopt:10 --gmapto:0\n'
            cfgtext=cfgtext+'acc.authdb /etc/xrootd/auth_file\n'
            cfgtext=cfgtext+'ofs.authorize\n'
            files.append(cfgfile,cfgtext,owner='xrootd',backup=True)
            authfile='/etc/xrootd/auth_file'
            files.write(authfile,'u * /tmp lr\nu = /tmp/@=/ a\nu xrootd /tmp a\n',owner="xrootd")
            os.chown(authfile, user.pw_uid, user.pw_gid)
            
            files.write("/etc/grid-security/xrd/xrdmapfile","\"/O=Grid/OU=GlobusTest/OU=VDT/CN=VDT Test\" vdttest",owner="xrootd")
            os.chown("/etc/grid-security/xrd/xrdmapfile",
                user.pw_uid, user.pw_gid)

        command = ('service', 'xrootd', 'start')
        if core.el_release() != 6:
            stdout, stderr, fail = core.check_system(command, 'Start Xrootd server')
            self.assert_(stdout.find('FAILED') == -1, fail)
            self.assert_(os.path.exists(core.config['xrootd.pid-file']),
                     'xrootd server PID file missing')
            core.state['xrootd.started-server'] = True
        else:
            stdout, stderr, fail = core.check_system(command, 'Start Xrootd server',exit=1)
            self.assert_(stdout.find('OK') == -1, fail)
コード例 #53
0
def is_installed():
    """Return True if the dependencies for setting up and using VOMS are installed.
    EL7 requires a minimum version of the voms-server package to get the service file fix from SOFTWARE-2357.
    """
    for dep in 'voms-server', 'voms-clients', 'voms-mysql-plugin', mysql.client_rpm(
    ), mysql.server_rpm():
        if not core.dependency_is_installed(dep):
            return False

    # TODO: drop this check when 3.3 is completely EOL
    if core.el_release() >= 7:
        epoch, _, version, release, _ = core.get_package_envra('voms-server')
        if core.version_compare((epoch, version, release), '2.0.12-3.2') < 0:
            core.log_message(
                "voms-server installed but too old (missing SOFTWARE-2357 fix)"
            )
            return False

    return True
コード例 #54
0
    def test_04_start_condorce(self):
        if core.el_release() >= 7:
            core.config['condor-ce.lockfile'] = '/var/lock/condor-ce/htcondor-ceLock'
        else:
            core.config['condor-ce.lockfile'] = '/var/lock/subsys/condor-ce'
        core.state['condor-ce.started-service'] = False
        core.state['condor-ce.schedd-ready'] = False

        core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client')
        core.config['condor-ce.collectorlog'] = condor.ce_config_val('COLLECTOR_LOG')

        if service.is_running('condor-ce'):
            core.state['condor-ce.schedd-ready'] = True
            self.skip_ok('already running')

        service.check_start('condor-ce')

        stat = core.get_stat(core.config['condor-ce.collectorlog'])
        if condor.wait_for_daemon(core.config['condor-ce.collectorlog'], stat, 'Schedd', 300.0):
            core.state['condor-ce.schedd-ready'] = True
コード例 #55
0
ファイル: test_290_slurm.py プロジェクト: brianhlin/osg-test
    def test_01_slurm_config(self):
        self.slurm_reqs()
        core.config['slurm.config'] = '/etc/slurm/slurm.conf'
        files.write(core.config['slurm.config'],
                    SLURM_CONFIG % {'short_hostname': SHORT_HOSTNAME, 'cluster': CLUSTER_NAME, 'ctld_log': CTLD_LOG},
                    owner='slurm',
                    chmod=0o644)
        core.config['cgroup.config'] = '/etc/slurm/cgroup.conf'
        config = SLURM_CGROUPS_CONFIG
        if core.el_release() == 6:
            config += "\nCgroupMountpoint=/cgroup"
        files.write(core.config['cgroup.config'],
                    config,
                    owner='slurm',
                    chmod=0o644)

        core.config['cgroup_allowed_devices_file.conf'] = '/etc/slurm/cgroup_allowed_devices_file.conf'
        files.write(core.config['cgroup_allowed_devices_file.conf'],
                    SLURM_CGROUPS_DEVICE_CONFIG,
                    owner='slurm',
                    chmod=0o644)
コード例 #56
0
ファイル: special_install.py プロジェクト: edquist/osg-test
    def test_02_install_packages(self):
        core.state['install.success'] = False
        core.state['install.installed'] = []
        core.state['install.updated'] = []
        core.state['install.replace'] = []
        core.state['install.orphaned'] = []
        core.state['install.os_updates'] = []

        # Install packages
        core.state['install.transaction_ids'] = []
        fail_msg = ''
        for package in core.options.packages:

            # Do not try to re-install packages
            if core.rpm_is_installed(package):
                continue

            # Attempt installation
            command = ['yum', '-y']
            for repo in core.options.extrarepos:
                command.append('--enablerepo=%s' % repo)
            command += ['install', package]

            retry_fail, status, stdout, stderr = yum.retry_command(command)
            if retry_fail == '':   # the command succeeded
                if core.el_release() >= 6:
                    # RHEL 6 does not have the rollback option, so store the
                    # transaction IDs so we can undo each transaction in the
                    # proper order
                    core.state['install.transaction_ids'].append(yum.get_transaction_id())
                command = ('rpm', '--verify', package)
                core.check_system(command, 'Verify %s' % (package))
                yum.parse_output_for_packages(stdout)

            fail_msg += retry_fail

        if fail_msg:
            self.fail(fail_msg)
        core.state['install.success'] = True
コード例 #57
0
ファイル: test_30_misc.py プロジェクト: bnl-sdcc/griddev
    def test_03_lfc_multilib(self):
        if core.missing_rpm('yum-utils'):
            return

        # We can't test this on 32-bit
        uname_out, _, _ = core.check_system(['uname', '-i'], 'getting arch')
        if re.search(r'i\d86', uname_out):
            core.skip('running on 32-bit')
            return

        cmdbase = ['repoquery', '--plugins']
        for repo in core.options.extrarepos:
            cmdbase.append('--enablerepo=%s' % repo)

        # Find the 32-bit lfc-python rpm
        stdout, _, _ = core.check_system(cmdbase + ['lfc-python.i386'],
                                         'lfc-python multilib (32bit)')
        if stdout.strip() == '':
            self.fail('32-bit lfc-python not found in 64-bit repo')

        # Sanity check: find the 64-bit lfc-python rpm
        stdout, _, _ = core.check_system(cmdbase + ['lfc-python.x86_64'],
                                         'lfc-python multilib (64bit)')
        if stdout.strip() == '':
            self.fail('64-bit lfc-python not found in 64-bit repo')

        # Find the 32-bit lfc-python26 rpm (on el5 only)
        if core.el_release() == 5:
            stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.i386'],
                                             'lfc-python26 multilib (32bit)')
            if stdout.strip() == '':
                self.fail('32-bit lfc-python not found in 64-bit repo')

            # Sanity check: find the 64-bit lfc-python26 rpm
            stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.x86_64'],
                                             'lfc-python26 multilib (64bit)')
            if stdout.strip() == '':
                self.fail('64-bit lfc-python not found in 64-bit repo')
コード例 #58
0
def majorver():
    "Tomcat major version"
    if core.el_release() > 5:
        return 6
    else:
        return 5
コード例 #59
0
ファイル: mysql.py プロジェクト: edquist/osg-test
def name():
    if core.el_release() < 7:
        return 'mysql'
    else:
        return 'mariadb'
コード例 #60
0
ファイル: mysql.py プロジェクト: edquist/osg-test
def daemon_name():
    if core.el_release() < 7:
        return 'mysqld'
    else:
        return 'mariadb'