def test_02_config_tomcat_properties(self): if core.missing_rpm(tomcat.pkgname(), 'emi-trustmanager-tomcat'): return server_xml_path = os.path.join(tomcat.sysconfdir(), 'server.xml') old_contents = files.read(server_xml_path, True) pattern = re.compile(r'crlRequired=".*?"', re.IGNORECASE) new_contents = pattern.sub('crlRequired="false"', old_contents) files.write(server_xml_path, new_contents, owner='tomcat')
def test_04_config_tomcat_properties(self): if core.missing_rpm(tomcat.pkgname(), 'gratia-service'): return command = ('/usr/share/gratia/configure_tomcat',) core.check_system(command, 'Unable to configure Gratia.') server_xml_path = os.path.join(tomcat.sysconfdir(), 'server.xml') files.replace_regexpr(server_xml_path, "(<Server.*\n)", r'\1<Listener className="org.apache.catalina.core.JasperListener"/>', owner='gratia', backup=True)
def test_04_stop_munge(self): if core.missing_rpm(*self.required_rpms): return if core.state['munge.running'] == False: core.skip('munge not running') command = ('service', 'munge', 'stop') stdout, _, fail = core.check_system(command, 'Stop munge daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['munge.lockfile']), 'munge lock file still present') core.state['munge.running'] = False files.restore(core.config['munge.keyfile'], 'pbs')
def test_023_job_list_parsable(self): if core.missing_rpm('rsv'): return # This test is currently failing because there are no enabled metrics. Until # we add some RSV configuration to enable metrics. # Check the parsable job-list output #command = ('rsv-control', '--job-list', '--parsable') #stdout = core.check_system(command, 'rsv-control --job-list --parsable')[0] # The separator is a pipe, so just make sure we got one of those #self.assert_(re.search('\|', stdout) is not None) return
def test_01_stop_condor_cron(self): if core.missing_rpm('condor-cron'): return if core.state['condor-cron.started-service'] == False: core.skip('did not start server') return command = ('service', 'condor-cron', 'stop') stdout, _, fail = core.check_system(command, 'Stop Condor-Cron') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['condor-cron.lockfile']), 'Condor-Cron run lock file still present') core.state['condor-cron.running-service'] = False
def test_02_fetch_url_dir(self): if core.missing_rpm('fetch-crl'): return tmpdir = tempfile.mkdtemp() command = ('fetch-crl', '-o', tmpdir) stdout, _, fail = core.check_system( command, 'Start fetch-crl with a output dir') count = 0 for name in os.listdir(tmpdir): if name[-2:] == "r0": count = count + 1 os.unlink(os.path.join(tmpdir, name)) os.rmdir(tmpdir) self.assert_(count > 3, True)
def test_04_start_pbs(self): core.config['torque.pbs-lockfile'] = '/var/lock/subsys/pbs_server' core.state['torque.pbs-server-running'] = False core.state['torque.pbs-configured'] = False core.state['torque.nodes-up'] = False if core.el_release() == 5: core.config[ 'torque.pbs-nodes-file'] = '/var/torque/server_priv/nodes' elif core.el_release() == 6: core.config[ 'torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes' else: core.skip('Distribution version not supported') if core.missing_rpm(*self.required_rpms): return if os.path.exists(core.config['torque.pbs-lockfile']): core.skip('pbs server apparently running') return # add the local node as a compute node files.write(core.config['torque.pbs-nodes-file'], "%s np=1\n" % core.get_hostname(), owner='pbs') command = ('service', 'pbs_server', 'start') stdout, _, fail = core.check_system(command, 'Start pbs server daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['torque.pbs-lockfile']), 'pbs server run lock file missing') core.state['torque.pbs-server'] = True core.state['torque.pbs-server-running'] = True core.check_system("echo '%s' | qmgr %s" % (self.pbs_config, core.get_hostname()), "Configuring pbs server", shell=True) core.state['torque.pbs-configured'] = True # wait up to 5 minutes for the server to come up and trigger a failure # if that doesn't happen start_time = time.time() while ((time.time() - start_time) < 600): command = ('/usr/bin/qnodes', '-s', core.get_hostname()) stdout, _, fail = core.check_system(command, 'Get pbs node info') self.assert_(stdout.find('error') == -1, fail) if stdout.find('state = free'): core.state['torque.nodes-up'] = True break if not core.state['torque.nodes-up']: self.fail('PBS nodes not coming up')
def test_01_stop_rsv(self): if core.missing_rpm('rsv'): return if core.state['rsv.started-service'] == False: core.skip('did not start service') return command = ('service', 'rsv', 'stop') stdout, _, fail = core.check_system(command, 'Stop RSV') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['rsv.lockfile']), 'RSV run lock file still present') core.state['rsv.running-service'] = False
def test_03_copy_server_to_local(self): if core.missing_rpm('bestman2-server', 'bestman2-client', 'voms-clients'): return if not core.state['bestman.started-server']: core.skip('bestman server not started') return srm_url = 'srm://%s:%s/%s?SFN=%s' % (TestBestman.__hostname, TestBestman.__port, TestBestman.__sfn, TestBestman.__remote_path) command = ('srm-copy', srm_url, 'file:///' + TestBestman.__local_path) status, stdout, stderr = core.system(command, True) fail = core.diagnose('Bestman copy, URL to local', status, stdout, stderr) file_copied = os.path.exists(TestBestman.__local_path) self.assertEqual(status, 0, fail) self.assert_(file_copied, 'Copied file missing') files.remove(TestBestman.__local_path)
def test_04_remove_server_file(self): if core.missing_rpm('bestman2-server', 'bestman2-client', 'voms-clients'): return if not core.state['bestman.started-server']: core.skip('bestman server not started') return srm_url = 'srm://%s:%s/%s?SFN=%s' % (TestBestman.__hostname, TestBestman.__port, TestBestman.__sfn, TestBestman.__remote_path) command = ('srm-rm', srm_url) status, stdout, stderr = core.system(command, True) fail = core.diagnose('Bestman remove, URL file', status, stdout, stderr) file_removed = not os.path.exists(TestBestman.__remote_path) self.assertEqual(status, 0, fail) self.assert_(file_removed, 'Copied file still exists') files.remove(TestBestman.__temp_dir)
def test_03_stop_scheduler(self): if core.missing_rpm(*self.required_rpms): return if core.state['torque.pbs-sched-running'] == False: core.skip('did not start pbs scheduler') return command = ('service', 'pbs_sched', 'stop') stdout, _, fail = core.check_system(command, 'Stop pbs scheduler') self.assert_(stdout.find('error') == -1, fail) self.assert_(not os.path.exists(core.config['torque.sched-lockfile']), 'PBS server run lock file still present') files.restore(core.config['torque.pbs-nodes-file'], 'pbs') core.state['torque.pbs-sched-running'] = False
def test_03_remove_vo(self): if core.missing_rpm('voms-admin-server', 'voms-mysql-plugin'): return # Ask VOMS Admin to remove VO db_user_name = 'admin-' + core.config['voms.vo'] command = ('voms-admin-configure', 'remove', '--vo', core.config['voms.vo'], '--undeploy-database') stdout, stderr, fail = core.check_system(command, 'Remove VO') self.assert_('Database undeployed correctly!' in stdout, fail) self.assert_(' succesfully removed.' in stdout, fail) # Really remove database mysql_statement = "DROP DATABASE `voms_%s`" % (core.config['voms.vo']) command = ('mysql', '-u', 'root', '-e', mysql_statement) core.check_system(command, 'Drop MYSQL VOMS database')
def test_04_modify_bestman_conf(self): if core.missing_rpm('bestman2-server', 'bestman2-client', 'voms-clients'): core.skip('Bestman not installed') return bestman_rc_path = '/etc/bestman2/conf/bestman2.rc' env_file = '/etc/sysconfig/bestman2' old_port = 'securePort=8443' new_port = 'securePort=10443' old_gridmap = 'GridMapFileName=/etc/bestman2/conf/grid-mapfile.empty' new_gridmap = 'GridMapFileName=/etc/grid-security/grid-mapfile' old_auth = 'BESTMAN_GUMS_ENABLED=yes' new_auth = 'BESTMAN_GUMS_ENABLED=no' files.replace(bestman_rc_path, old_port, new_port, backup=False) files.replace(bestman_rc_path, old_gridmap, new_gridmap, backup=False) files.replace(env_file, old_auth, new_auth, backup=False)
def test_03_start_pbs_sched(self): core.config['torque.sched-lockfile'] = '/var/lock/subsys/pbs_sched' core.state['torque.pbs-sched-running'] = False if core.missing_rpm(*self.required_rpms): return if os.path.exists(core.config['torque.sched-lockfile']): core.skip('pbs scheduler apparently running') return command = ('service', 'pbs_sched', 'start') stdout, _, fail = core.check_system(command, 'Start pbs scheduler daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['torque.sched-lockfile']), 'pbs sched run lock file missing') core.state['torque.pbs-sched-running'] = True
def test_05_add_mysql_admin(self): if core.missing_rpm('gums-service'): return host_dn, host_issuer = core.certificate_info( core.config['certs.hostcert']) mysql_template_path = '/usr/lib/gums/sql/addAdmin.mysql' self.assert_(os.path.exists(mysql_template_path), 'GUMS MySQL template exists') mysql_template = files.read(mysql_template_path, as_single_string=True).strip() core.log_message(mysql_template) mysql_command = re.sub(r'@ADMINDN@', host_dn, mysql_template) core.log_message(mysql_command) command = ('mysql', '--user=gums', '-p' + core.config['gums.password'], '--execute=' + mysql_command) core.check_system(command, 'Add GUMS MySQL admin')
def test_03_pbs_job(self): if core.missing_rpm('globus-gram-job-manager-pbs', 'globus-gram-client-tools', 'globus-proxy-utils'): return if (not core.state['torque.pbs-configured'] or not core.state['torque.pbs-mom-running'] or not core.state['torque.pbs-server-running'] or not core.state['globus.pbs_configured']): core.skip('pbs not running or configured') return command = ('globus-job-run', self.contact_string('pbs'), '/bin/echo', 'hello') stdout = core.check_system(command, 'globus-job-run on PBS job', user=True)[0] self.assertEqual(stdout, 'hello\n', 'Incorrect output from globus-job-run on PBS job')
def test_07_config_va_properties(self): if core.missing_rpm('voms-admin-server'): return path = os.path.join('/etc/voms-admin', core.config['voms.vo'], 'voms.service.properties') contents = files.read(path) had_csrf_line = False for line in contents: if 'voms.csrf.log_only' in line: line = 'voms.csrf.log_only = true\n' had_csrf_line = True elif line[-1] != '\n': line = line + '\n' if not had_csrf_line: contents += 'voms.csrf.log_only = true\n' files.write(path, contents, backup=False)
def test_02_copy_server_to_local(self): if core.missing_rpm('globus-gridftp-server-progs', 'globus-ftp-client', 'globus-proxy-utils', 'globus-gass-copy-progs'): return hostname = socket.getfqdn() temp_dir = tempfile.mkdtemp() os.chmod(temp_dir, 0777) gsiftp_url = 'gsiftp://' + hostname + TestGridFTP.__data_path local_path = temp_dir + '/copied_file.txt' command = ('globus-url-copy', gsiftp_url, 'file://' + local_path) status, stdout, stderr = core.system(command, True) fail = core.diagnose('GridFTP copy, URL to local', status, stdout, stderr) file_copied = os.path.exists(local_path) shutil.rmtree(temp_dir) self.assertEqual(status, 0, fail) self.assert_(file_copied, 'Copied file missing')
def test_01_start_condor(self): core.config['condor.lockfile'] = '/var/lock/subsys/condor_master' core.state['condor.started-service'] = False core.state['condor.running-service'] = False if core.missing_rpm('condor'): return if os.path.exists(core.config['condor.lockfile']): core.state['condor.running-service'] = True core.skip('apparently running') return command = ('service', 'condor', 'start') stdout, _, fail = core.check_system(command, 'Start Condor') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['condor.lockfile']), 'Condor run lock file missing') core.state['condor.started-service'] = True core.state['condor.running-service'] = True
def test_02_edg_mkgridmap(self): if core.missing_rpm('edg-mkgridmap', 'voms-server'): return command = ('edg-mkgridmap', '--conf', core.config['edg.conf']) os.environ['GRIDMAP'] = '/usr/share/osg-test/grid-mapfile' os.environ['USER_VO_MAP'] = '/usr/share/osg-test/user-vo-map' os.environ['EDG_MKGRIDMAP_LOG'] = \ '/usr/share/osg-test/edg-mkgridmap.log' os.environ['VO_LIST_FILE'] = '/usr/share/osg-test/vo-list-file' os.environ['UNDEFINED_ACCTS_FILE'] = '/usr/share/osg-test/undef-ids' core.check_system(command, 'Run edg-mkgridmap') pwd_entry = pwd.getpwnam(core.options.username) cert_path = os.path.join(pwd_entry.pw_dir, '.globus', 'usercert.pem') user_cert_dn, user_cert_issuer = core.certificate_info(cert_path) expected = '"%s" %s' % (user_cert_dn, core.options.username) contents = files.read(os.environ['GRIDMAP'], True) self.assert_(expected in contents, 'Expected grid-mapfile contents')
def test_002_setup_certificate(self): if core.missing_rpm('rsv'): return # TODO - on fermicloud machines we copy the hostcert. Can we do better? if not os.path.exists(os.path.dirname(core.config['rsv.certfile'])): os.makedirs(os.path.dirname(core.config['rsv.certfile'])) if not os.path.exists(core.config['rsv.certfile']): shutil.copy('/etc/grid-security/hostcert.pem', core.config['rsv.certfile']) if not os.path.exists(core.config['rsv.keyfile']): shutil.copy('/etc/grid-security/hostkey.pem', core.config['rsv.keyfile']) (rsv_uid, rsv_gid) = pwd.getpwnam('rsv')[2:4] os.chown(core.config['rsv.certfile'], rsv_uid, rsv_gid) os.chmod(core.config['rsv.certfile'], 0444) os.chown(core.config['rsv.keyfile'], rsv_uid, rsv_gid) os.chmod(core.config['rsv.keyfile'], 0400) return
def test_01_copy_local_to_server_uberftp(self): if core.missing_rpm('globus-gridftp-server-progs', 'globus-ftp-client', 'globus-proxy-utils', 'globus-gass-copy-progs', 'uberftp'): return hostname = socket.getfqdn() temp_dir = tempfile.mkdtemp() os.chmod(temp_dir, 0777) local_dir = '/usr/share/osg-test' local_path = 'test_gridftp_data.txt' ftp_cmd = 'cd %s; lcd %s; put %s' % (temp_dir, local_dir, local_path) command = ('uberftp', hostname, ftp_cmd) status, stdout, stderr = core.system(command, True) fail = core.diagnose('UberFTP copy, local to URL', status, stdout, stderr) file_copied = os.path.exists(os.path.join(temp_dir, local_path)) shutil.rmtree(temp_dir) self.assertEqual(status, 0, fail) self.assert_(file_copied, 'Copied file missing')
def test_100_html_consumer(self): # This test must come after some of the metric tests so that we have # some job records to use to create an index.html if core.missing_rpm('rsv'): return index_file = "/usr/share/rsv/www/index.html" # We are going to make sure the html-consumer runs, and that the index # file is updated. old_mtime = os.stat(index_file).st_mtime stdout = core.check_system( "su -c '/usr/libexec/rsv/consumers/html-consumer' rsv", "run html-consumer", shell=True)[0] self.assert_('html-consumer initializing' in stdout) new_mtime = os.stat(index_file).st_mtime self.assert_(old_mtime != new_mtime) return
def test_08_advertise(self): if core.missing_rpm('voms-admin-server'): return hostname = socket.getfqdn() host_dn, host_issuer = core.certificate_info( core.config['certs.hostcert']) contents = ('"%s" "%s" "%d" "%s" "%s"\n' % (core.config['voms.vo'], hostname, 15151, host_dn, core.config['voms.vo'])) files.write('/etc/vomses', contents, owner='voms') if not os.path.isdir(core.config['voms.lsc-dir']): os.mkdir(core.config['voms.lsc-dir']) vo_lsc_path = os.path.join(core.config['voms.lsc-dir'], hostname + '.lsc') files.write(vo_lsc_path, (host_dn + '\n', host_issuer + '\n'), backup=False) os.chmod(vo_lsc_path, 0644) core.system('ls -ldF /etc/*vom*', shell=True) core.system(('find', '/etc/grid-security/vomsdir', '-ls'))
def test_03_lfc_multilib(self): if core.missing_rpm('yum-utils'): return # We can't test this on 32-bit uname_out, _, _ = core.check_system(['uname', '-i'], 'getting arch') if re.search(r'i\d86', uname_out): core.skip('running on 32-bit') return cmdbase = ['repoquery', '--plugins'] for repo in core.options.extrarepos: cmdbase.append('--enablerepo=%s' % repo) # Find the 32-bit lfc-python rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python.i386'], 'lfc-python multilib (32bit)') if stdout.strip() == '': self.fail('32-bit lfc-python not found in 64-bit repo') # Sanity check: find the 64-bit lfc-python rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python.x86_64'], 'lfc-python multilib (64bit)') if stdout.strip() == '': self.fail('64-bit lfc-python not found in 64-bit repo') # Find the 32-bit lfc-python26 rpm (on el5 only) if core.el_release() == 5: stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.i386'], 'lfc-python26 multilib (32bit)') if stdout.strip() == '': self.fail('32-bit lfc-python not found in 64-bit repo') # Sanity check: find the 64-bit lfc-python26 rpm stdout, _, _ = core.check_system(cmdbase + ['lfc-python26.x86_64'], 'lfc-python26 multilib (64bit)') if stdout.strip() == '': self.fail('64-bit lfc-python not found in 64-bit repo')
def test_01_cvmfs(self): if core.missing_rpm('cvmfs', 'cvmfs-keys'): return #TESTING command = ('cat','/etc/cvmfs/default.local') status, stdout, stderr = core.system(command, False) # command = ('mkdir','-p', '/mnt/testcvmfs') # status, stdout, stderr = core.system(command, False) # command = ('mount','-t','cvmfs','cms.cern.ch','/mnt/testcvmfs') # status, stdout, stderr = core.system(command, False) # command = ('ls', '/mnt/testcvmfs') # status, stdout, stderr = core.system(command, False) command = ('service','cvmfs', 'probe') status, stdout, stderr = core.system(command, False) #END TESTING command = ('ls', '/cvmfs') status, stdout, stderr = core.system(command, False) file_exists = os.path.exists('/cvmfs') self.assert_(file_exists, 'Cvmfs mount point missing') command = ('ls', '/cvmfs/cms.cern.ch') status, stdout, stderr = core.system(command, False) file_exists = os.path.exists('/cvmfs/cms.cern.ch') self.assert_(file_exists, 'Cvmfs cern mount point missing') command = ('ls', self.__check_path) status, stdout, stderr = core.system(command, False) self.assert_(file_exists, 'Test cvmfs file missing') command = ('bash', '-c', 'source ' + self.__check_path) status, stdout, stderr = core.system(command, False) fail = core.diagnose('cvmfs example source a file on fs', status, stdout, stderr) self.assertEqual(status, 0, fail)
def test_075_switch_to_globus_job_run(self): if core.missing_rpm('rsv'): return self.use_condor_g() return
def test_02_deconfig_sudoers(self): if core.missing_rpm('bestman2-server', 'bestman2-client'): return files.restore('/etc/sudoers', 'bestman')
def test_030_ping_metric(self): if core.missing_rpm('rsv'): return self.run_metric('org.osg.general.ping-host') return
def test_031_hostcert_expiry_metric(self): if core.missing_rpm('rsv'): return self.run_metric('org.osg.local.hostcert-expiry') return
def test_052_vo_supported_metric(self): if core.missing_rpm('rsv', 'globus-gatekeeper'): return self.run_metric('org.osg.general.vo-supported') return
def test_074_osg_version_with_globus_job_run(self): if core.missing_rpm('rsv', 'globus-gatekeeper'): return self.run_metric('org.osg.general.osg-version') return
def test_071_gram_authentication_with_user_proxy(self): if core.missing_rpm('rsv', 'globus-gatekeeper'): return self.run_metric('org.osg.globus.gram-authentication') return