class TestConfigureJobs(osgunittest.OSGTestCase): """Configurations for running jobs""" def test_01_set_job_env(self): # Jobs get submitted with condor_run and condor_ce_run core.state['jobs.env-set'] = False core.skip_ok_unless_one_installed(['htcondor-ce', 'condor']) osg_libdir = os.path.join('/var', 'lib', 'osg') try: os.makedirs(osg_libdir) except OSError, exc: if exc.errno != errno.EEXIST: raise core.config['osg.job-environment'] = os.path.join( osg_libdir, 'osg-job-environment.conf') core.config['osg.local-job-environment'] = os.path.join( osg_libdir, 'osg-local-job-environment.conf') files.write(core.config['osg.job-environment'], "#!/bin/sh\nJOB_ENV='vdt'\nexport JOB_ENV", owner='pbs', chmod=0644) files.write(core.config['osg.local-job-environment'], "#!/bin/sh\nLOCAL_JOB_ENV='osg'\nexport LOCAL_JOB_ENV", owner='pbs', chmod=0644) core.state['jobs.env-set'] = True
def test_02_start_slurmdbd(self): core.state['slurmdbd.started-service'] = False self.slurm_reqs() core.skip_ok_unless_installed('slurm-slurmdbd') self.skip_bad_unless(mysql.is_running(), 'slurmdbd requires mysql') core.config['slurmdbd.config'] = '/etc/slurm/slurmdbd.conf' core.config['slurmdbd.user'] = "******" core.config['slurmdbd.name'] = "osg_test_slurmdb" mysql.check_execute("create database %s; " % core.config['slurmdbd.name'], 'create slurmdb') mysql.check_execute("create user %s; " % core.config['slurmdbd.user'], 'add slurmdb user') mysql.check_execute("grant usage on *.* to %s; " % core.config['slurmdbd.user'], 'slurmdb user access') mysql.check_execute("grant all privileges on %s.* to %s identified by '%s'; " % (core.config['slurmdbd.name'], core.config['slurmdbd.user'], core.options.password), 'slurmdb user permissions') mysql.check_execute("flush privileges;", 'reload privileges') db_config_vals = {'name':core.config['slurmdbd.name'], 'user':core.config['slurmdbd.user'].split('\'')[1], 'pass':core.options.password} files.write(core.config['slurmdbd.config'], SLURMDBD_CONFIG % db_config_vals, owner='slurm', chmod=0o644) service.check_start('slurmdbd') # Adding the cluster to the database command = ('sacctmgr', '-i', 'add', 'cluster', CLUSTER_NAME) core.check_system(command, 'add slurm cluster')
def test_02_start_mom(self): if core.el_release() <= 6: core.config['torque.mom-lockfile'] = '/var/lock/subsys/pbs_mom' else: core.config['torque.mom-lockfile'] = '/var/lib/torque/mom_priv/mom.lock' core.state['torque.pbs-mom-running'] = False core.skip_ok_unless_installed(*self.required_rpms) self.skip_ok_if(os.path.exists(core.config['torque.mom-lockfile']), 'pbs mom apparently running') core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config' files.write(core.config['torque.mom-config'], "$pbsserver %s\n" % core.get_hostname(), owner='pbs') core.config['torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout' files.write(core.config['torque.mom-layout'], "nodes=0", owner='pbs') command = ('service', 'pbs_mom', 'start') stdout, _, fail = core.check_system(command, 'Start pbs mom daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['torque.mom-lockfile']), 'PBS mom run lock file missing') core.state['torque.pbs-mom-running'] = True
def test_01_configure(self): for key, val in [ ("cache_authfile_path", CACHE_AUTHFILE_PATH), ("cache_config_path", CACHE_CONFIG_PATH), ("origin_config_path", ORIGIN_CONFIG_PATH), ("caches_json_path", CACHES_JSON_PATH), ("cache_http_port", CACHE_HTTP_PORT), ("origin_dir", ORIGIN_DIR), ("cache_dir", CACHE_DIR), ("origin_xroot_port", ORIGIN_XROOT_PORT), ("cache_xroot_port", CACHE_XROOT_PORT) ]: _setcfg(key, val) xrootd_user = pwd.getpwnam("xrootd") for d in [_getcfg("origin_dir"), _getcfg("cache_dir"), os.path.dirname(_getcfg("caches_json_path"))]: files.safe_makedirs(d) os.chown(d, xrootd_user.pw_uid, xrootd_user.pw_gid) for key, text in [ ("cache_config_path", CACHE_CONFIG_TEXT), ("cache_authfile_path", CACHE_AUTHFILE_TEXT), ("origin_config_path", ORIGIN_CONFIG_TEXT), ("caches_json_path", CACHES_JSON_TEXT) ]: files.write(_getcfg(key), text, owner=_NAMESPACE, chmod=0o644)
def test_01_configure_lcmaps(self): core.state['glexec.lcmaps_written'] = False core.skip_ok_unless_installed('glexec', 'lcmaps-plugins-basic') # Use the lcmaps.db.gridmap.glexec template from OSG 3.3 template = '''glexectracking = "lcmaps_glexec_tracking.mod" "-exec /usr/sbin/glexec_monitor" gridmapfile = "lcmaps_localaccount.mod" "-gridmap /etc/grid-security/grid-mapfile" verifyproxy = "lcmaps_verify_proxy.mod" "--allow-limited-proxy" " -certdir /etc/grid-security/certificates" good = "lcmaps_dummy_good.mod" bad = "lcmaps_dummy_bad.mod" authorize_only: gridmapfile -> good | bad glexec: verifyproxy -> gridmapfile gridmapfile -> glexectracking ''' files.write(core.config['lcmaps.db'], template, owner='glexec') core.state['glexec.lcmaps_written'] = True
def test_03_modify_sudoers(self): core.skip_ok_unless_installed('bestman2-server', 'bestman2-client', 'gums-service') sudoers_path = '/etc/sudoers' contents = files.read(sudoers_path) srm_cmd = 'Cmnd_Alias SRM_CMD = /bin/rm, /bin/mkdir, /bin/rmdir, /bin/mv, /bin/cp, /bin/ls' srm_usr = '******' bestman_perm = 'bestman ALL=(SRM_USR) NOPASSWD: SRM_CMD' require_tty = 'Defaults requiretty' had_srm_cmd_line = False had_requiretty_commented = False for line in contents: if require_tty in line: if line.startswith("#"): had_requiretty_commented = True if srm_cmd in line: had_srm_cmd_line = True new_contents = [] for line in contents: if not had_requiretty_commented: if line.strip() == require_tty.strip(): new_contents += '#' + line + '\n' else: new_contents += line.strip() + '\n' if not had_srm_cmd_line: new_contents += srm_cmd + '\n' new_contents += srm_usr + '\n' new_contents += bestman_perm + '\n' if not had_srm_cmd_line or not had_requiretty_commented: files.write(sudoers_path, new_contents, owner='bestman')
def test_04_modify_bestman_conf(self): core.skip_ok_unless_installed('bestman2-server', 'bestman2-client', 'gums-service') bestman_rc_path = '/etc/bestman2/conf/bestman2.rc' old_port = 'securePort=8443' new_port = 'securePort=10443' files.replace(bestman_rc_path, old_port, new_port, backup=False) old_gridmap = 'GridMapFileName=/etc/bestman2/conf/grid-mapfile.empty' new_gridmap = 'GridMapFileName=/etc/grid-security/grid-mapfile' files.replace(bestman_rc_path, old_gridmap, new_gridmap, backup=False) files.replace(bestman_rc_path, 'eventLogLevel=INFO', 'eventLogLevel=DEBUG', backup=False) core.system(('cat', bestman_rc_path)) env_file = '/etc/sysconfig/bestman2' old_auth = 'BESTMAN_GUMS_ENABLED=yes' new_auth = 'BESTMAN_GUMS_ENABLED=no' files.replace(env_file, old_auth, new_auth, backup=False) log4j_path = '/etc/bestman2/properties/log4j.properties' log4j_contents = files.read(log4j_path, as_single_string=True) log4j_contents = log4j_contents.replace('FATAL', 'INFO') files.write(log4j_path, log4j_contents, backup=False)
def test_01_configure_xrootd(self): core.config['xrootd.tpc.config-1'] = '/etc/xrootd/xrootd-third-party-copy-1.cfg' core.config['xrootd.tpc.config-2'] = '/etc/xrootd/xrootd-third-party-copy-2.cfg' core.config['xrootd.tpc.http-port1'] = HTTP_PORT1 core.config['xrootd.tpc.http-port2'] = HTTP_PORT2 core.state['xrootd.started-http-server-1'] = False core.state['xrootd.started-http-server-2'] = False core.state['xrootd.tpc.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('globus-proxy-utils', 'xrootd', 'xrootd-scitokens', by_dependency=True) user = pwd.getpwnam("xrootd") lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms') if all([core.rpm_is_installed(x) for x in lcmaps_packages]): core.log_message("Using xrootd-lcmaps authentication") sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:--loglevel,5' sec_protocol += ',--policy,authorize_only' else: core.log_message("Using XRootD mapfile authentication") sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile' files.write(core.config['xrootd.tpc.config-1'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.tpc.http-port1'], core.config['xrootd.tpc.http-port1']), owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) files.write(core.config['xrootd.tpc.config-2'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.tpc.http-port2'], core.config['xrootd.tpc.http-port2']), owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.tpc.backups-exist'] = True
def test_03_modify_sudoers(self): core.skip_ok_unless_installed('bestman2-server', 'bestman2-client') sudoers_path = '/etc/sudoers' contents = files.read(sudoers_path) srm_cmd = 'Cmnd_Alias SRM_CMD = /bin/rm, /bin/mkdir, /bin/rmdir, /bin/mv, /bin/cp, /bin/ls' srm_usr = '******' bestman_perm = 'bestman ALL=(SRM_USR) NOPASSWD: SRM_CMD' require_tty = 'Defaults requiretty' had_srm_cmd_line = False had_requiretty_commented = False for line in contents: if require_tty in line: if line.startswith("#"): had_requiretty_commented = True if srm_cmd in line: had_srm_cmd_line = True new_contents = [] for line in contents: if not had_requiretty_commented: if line.strip() == require_tty.strip(): new_contents += '#'+line+'\n' else: new_contents += line.strip()+'\n' if not had_srm_cmd_line: new_contents += srm_cmd+'\n' new_contents += srm_usr+'\n' new_contents += bestman_perm+'\n' if not had_srm_cmd_line or not had_requiretty_commented: files.write(sudoers_path, new_contents, owner='bestman')
def test_03_config_parameters(self): core.skip_ok_unless_installed('gratia-service') core.config['gratia.host'] = core.get_hostname() core.config['gratia.config.dir'] = '/etc/gratia' # The name of the gratia directory changed gratia_version = core.get_package_envra('gratia-service')[2] gratia_version_split = gratia_version.split('.') if self.tuple_cmp(gratia_version_split, ['1', '13', '5']) < 0: core.config['gratia.directory'] = "collector" else: core.config['gratia.directory'] = "services" core.config['certs.httpcert'] = '/etc/grid-security/http/httpcert.pem' core.config['certs.httpkey'] = '/etc/grid-security/http/httpkey.pem' filename = "/tmp/gratia_reader_pass." + str(os.getpid()) + ".txt" contents = "[client]\n" + "password=reader\n" files.write(filename, contents, backup=False) core.config['gratia.sql.file'] = filename core.config['gratia.sql.querystring'] = "\" | mysql --defaults-extra-file=\"" + core.config['gratia.sql.file'] + "\" --skip-column-names -B --unbuffered --user=reader --port=3306" core.config['gratia.tmpdir.prefix'] = "/var/lib/gratia/tmp/gratiafiles/" core.config['gratia.tmpdir.postfix'] = "_" + core.config['gratia.host'] + "_" + core.config['gratia.host'] + "_8880" core.config['gratia.log.file'] = "/var/log/gratia-service/gratia.log" core.state['gratia.log.stat'] = None
def test_03_config_myproxy(self): core.skip_ok_unless_installed('myproxy-server') conFileContents = files.read('/usr/share/osg-test/test_myproxy_server.config') files.write('/etc/myproxy-server.config',conFileContents, owner='root', backup=True) if core.el_release() <= 6: core.config['myproxy.lock-file']='/var/lock/subsys/myproxy-server' else: core.config['myproxy.lock-file']='/var/run/myproxy-server/myproxy.pid'
def test_04_config_tomcat_endorsed_jars(self): core.skip_ok_unless_installed(tomcat.pkgname()) old_contents = files.read(tomcat.conffile(), True) line = 'JAVA_ENDORSED_DIRS="${JAVA_ENDORSED_DIRS+$JAVA_ENDORSED_DIRS:}/usr/share/voms-admin/endorsed"\n' if old_contents.find(line) == -1: new_contents = old_contents + "\n" + line files.write(tomcat.conffile(), new_contents, owner='tomcat')
def test_03_config_myproxy(self): core.skip_ok_unless_installed('myproxy-server') conFileContents = files.read('/usr/share/osg-test/test_myproxy_server.config') files.write('/etc/myproxy-server.config', conFileContents, owner='root', backup=True) if core.el_release() <= 6: core.config['myproxy.lock-file'] = '/var/lock/subsys/myproxy-server' else: core.config['myproxy.lock-file'] = '/var/run/myproxy-server/myproxy.pid'
def test_02_config_tomcat_properties(self): if core.missing_rpm(tomcat.pkgname(), 'emi-trustmanager-tomcat'): return server_xml_path = os.path.join(tomcat.sysconfdir(), 'server.xml') old_contents = files.read(server_xml_path, True) pattern = re.compile(r'crlRequired=".*?"', re.IGNORECASE) new_contents = pattern.sub('crlRequired="false"', old_contents) files.write(server_xml_path, new_contents, owner='tomcat')
def test_01_config_mkgridmap(self): core.config['edg.conf'] = '/usr/share/osg-test/edg-mkgridmap.conf' core.skip_ok_unless_installed('edg-mkgridmap', 'voms-server') contents = ('group vomss://%s:8443/voms/%s %s\n' % (socket.getfqdn(), core.config['voms.vo'], core.options.username)) files.write(core.config['edg.conf'], contents, owner='edg') core.system(('cat', core.config['edg.conf']))
def advertise_lsc(vo, hostcert='/etc/grid-security/hostcert.pem'): """Create the VO directory and .lsc file under /etc/grid-security/vomsdir for the given VO""" host_dn, host_issuer = cagen.certificate_info(hostcert) hostname = socket.getfqdn() lsc_dir = os.path.join('/etc/grid-security/vomsdir', vo) if not os.path.isdir(lsc_dir): os.makedirs(lsc_dir) vo_lsc_path = os.path.join(lsc_dir, hostname + '.lsc') files.write(vo_lsc_path, (host_dn + '\n', host_issuer + '\n'), backup=False, chmod=0o644)
def test_04_config_tomcat_endorsed_jars(self): if core.missing_rpm(tomcat.pkgname()): return old_contents = files.read(tomcat.conffile(), True) line = 'JAVA_ENDORSED_DIRS="${JAVA_ENDORSED_DIRS+$JAVA_ENDORSED_DIRS:}/usr/share/voms-admin/endorsed"\n' if old_contents.find(line) == -1: new_contents = old_contents + "\n" + line files.write(tomcat.conffile(), new_contents, owner='tomcat')
def test_03_configure_ce(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') # Set up Condor, PBS, and Slurm routes # Leave the GRIDMAP knob in tact to verify that it works with the LCMAPS VOMS plugin core.config['condor-ce.condor-ce-cfg'] = '/etc/condor-ce/config.d/99-osgtest.condor-ce.conf' # Add host DN to condor_mapfile if core.options.hostcert: core.config['condor-ce.condorce_mapfile'] = '/etc/condor-ce/condor_mapfile.osg-test' hostcert_dn, _ = cagen.certificate_info(core.config['certs.hostcert']) mapfile_contents = files.read('/etc/condor-ce/condor_mapfile') mapfile_contents.insert(0, re.sub(r'([/=\.])', r'\\\1', "GSI \"^%s$\" " % hostcert_dn) + \ "%[email protected]\n" % core.get_hostname()) files.write(core.config['condor-ce.condorce_mapfile'], mapfile_contents, owner='condor-ce', chmod=0o644) else: core.config['condor-ce.condorce_mapfile'] = '/etc/condor-ce/condor_mapfile' condor_contents = """GRIDMAP = /etc/grid-security/grid-mapfile CERTIFICATE_MAPFILE = %s ALL_DEBUG=D_FULLDEBUG JOB_ROUTER_DEFAULTS = $(JOB_ROUTER_DEFAULTS) [set_default_maxMemory = 128;] JOB_ROUTER_ENTRIES = \\ [ \\ GridResource = "batch pbs"; \\ TargetUniverse = 9; \\ name = "Local_PBS"; \\ Requirements = target.osgTestBatchSystem =?= "pbs"; \\ ] \\ [ \\ GridResource = "batch slurm"; \\ TargetUniverse = 9; \\ name = "Local_Slurm"; \\ Requirements = target.osgTestBatchSystem =?= "slurm"; \\ ] \\ [ \\ TargetUniverse = 5; \\ name = "Local_Condor"; \\ Requirements = (target.osgTestBatchSystem =!= "pbs" && target.osgTestBatchSystem =!= "slurm"); \\ ] JOB_ROUTER_SCHEDD2_SPOOL=/var/lib/condor/spool JOB_ROUTER_SCHEDD2_NAME=$(FULL_HOSTNAME) JOB_ROUTER_SCHEDD2_POOL=$(FULL_HOSTNAME):9618 """ % core.config['condor-ce.condorce_mapfile'] if core.rpm_is_installed('htcondor-ce-view'): condor_contents += "\nDAEMON_LIST = $(DAEMON_LIST), CEVIEW, GANGLIAD, SCHEDD" core.config['condor-ce.view-port'] = condor.ce_config_val('HTCONDORCE_VIEW_PORT') files.write(core.config['condor-ce.condor-ce-cfg'], condor_contents, owner='condor-ce', chmod=0o644)
def advertise_vomses(vo, hostcert='/etc/grid-security/hostcert.pem'): """Edit /etc/vomses to advertise the current host as the VOMS server for the given VO. Caller is responsible for preserving and restoring /etc/vomses. """ host_dn, _ = cagen.certificate_info(hostcert) hostname = socket.getfqdn() vomses_path = '/etc/vomses' contents = ('"%s" "%s" "%d" "%s" "%s"\n' % (vo, hostname, 15151, host_dn, vo)) files.write(vomses_path, contents, backup=False, chmod=0o644)
def test_01_write_condor_config(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') core.config['condor-ce.condor-cfg'] = '/etc/condor/config.d/99-osgtest.condor.conf' contents = """SCHEDD_INTERVAL=5""" files.write(core.config['condor-ce.condor-cfg'], contents, owner='condor-ce', chmod=0644)
def advertise_vomses(vo, hostcert='/etc/grid-security/hostcert.pem'): """Edit /etc/vomses to advertise the current host as the VOMS server for the given VO. Caller is responsible for preserving and restoring /etc/vomses. """ host_dn, _ = cagen.certificate_info(hostcert) hostname = core.get_hostname() vomses_path = '/etc/vomses' contents = ('"%s" "%s" "%d" "%s" "%s"\n' % (vo, hostname, VOPORT, host_dn, vo)) files.write(vomses_path, contents, backup=False, chmod=0o644)
def test_03_disable_persistence(self): core.skip_ok_unless_installed(tomcat.pkgname()) self.skip_ok_if(core.options.nightly, 'Allow persistence in the nightlies') contents=''' <Context> <WatchedResource>WEB-INF/web.xml</WatchedResource> <Manager pathname="" /> </Context> ''' files.write(tomcat.contextfile(), contents, owner='tomcat')
def test_01_config_mkgridmap(self): core.config['edg.conf'] = '/usr/share/osg-test/edg-mkgridmap.conf' core.skip_ok_unless_installed('edg-mkgridmap', 'voms-admin-server') self.skip_bad_unless(core.state['voms-admin.read-members'], 'Cannot read VO member list') self.skip_bad_unless(core.state['tomcat.started'], 'Tomcat not started') contents = ('group vomss://%s:8443/voms/%s %s\n' % (socket.getfqdn(), core.config['voms.vo'], core.options.username)) files.write(core.config['edg.conf'], contents, owner='edg') core.system(('cat', core.config['edg.conf']))
def test_06_configure_scitokens(self): self.skip_ok_unless("SCITOKENS" in core.config['xrootd.security'], "Not using SciTokens for XRootD") scitokens_conf_path = "/etc/xrootd/scitokens.conf" files.write(scitokens_conf_path, SCITOKENS_CONF_TEXT, owner='xrootd', chmod=0o644) if os.path.exists("/etc/xrootd/config.d/50-osg-scitokens.cfg"): core.log_message("Not adding XRootD SciTokens config, already exists") else: files.append(core.config['xrootd.config'], XROOTD5_SCITOKENS_CFG_TXT % scitokens_conf_path, backup=False)
def test_01_config_mkgridmap(self): core.config['edg.conf'] = '/usr/share/osg-test/edg-mkgridmap.conf' if core.missing_rpm('edg-mkgridmap', 'voms-server'): return contents = ( 'group vomss://%s:8443/voms/%s %s\n' % (socket.getfqdn(), core.config['voms.vo'], core.options.username)) files.write(core.config['edg.conf'], contents, owner='edg') core.system(('cat', core.config['edg.conf']))
def setup_cvmfs(self): command = ('mkdir','-p', '/tmp/cvmfs') status, stdout, stderr = core.system(command, False) contents=[] contents.append("CVMFS_REPOSITORIES=\"`echo $((echo oasis.opensciencegrid.org;echo cms.cern.ch;ls /cvmfs)|sort -u)|tr ' ' ,`\"\n") contents.append("CVMFS_QUOTA_LIMIT=10000\n") contents.append("CVMFS_HTTP_PROXY=\"http://cache01.hep.wisc.edu:8001|http://cache02.hep.wisc.edu:8001;DIRECT\"\n") files.write("/etc/cvmfs/default.local", contents, owner='cvmfs', chmod=0o644) contents=[] contents.append("CVMFS_SERVER_URL=\"http://cvmfs.fnal.gov:8000/opt/@org@;http://cvmfs.racf.bnl.gov:8000/opt/@org@;http://cvmfs-stratum-one.cern.ch:8000/opt/@org@;http://cernvmfs.gridpp.rl.ac.uk:8000/opt/@org@\"\n") files.write("/etc/cvmfs/domain.d/cern.ch.local", contents, owner='cvmfs', chmod=0o644)
def advertise_lsc(vo, hostcert='/etc/grid-security/hostcert.pem'): """Create the VO directory and .lsc file under /etc/grid-security/vomsdir for the given VO""" host_dn, host_issuer = cagen.certificate_info(hostcert) hostname = core.get_hostname() lsc_dir = os.path.join('/etc/grid-security/vomsdir', vo) if not os.path.isdir(lsc_dir): os.makedirs(lsc_dir) vo_lsc_path = os.path.join(lsc_dir, hostname + '.lsc') files.write(vo_lsc_path, (host_dn + '\n', host_issuer + '\n'), backup=False, chmod=0o644)
def test_01_configure_condor(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') core.config['condor-ce.condor-cfg'] = '/etc/condor/config.d/99-osgtest.condor.conf' contents = """SCHEDD_INTERVAL=1 QUEUE_SUPER_USER_MAY_IMPERSONATE = .*""" files.write(core.config['condor-ce.condor-cfg'], contents, owner='condor-ce', chmod=0o644)
def test_03_configure_ce(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') # Set up Condor, PBS, and Slurm routes # Leave the GRIDMAP knob in tact to verify that it works with the LCMAPS VOMS plugin core.config[ 'condor-ce.condor-ce-cfg'] = '/etc/condor-ce/config.d/99-osgtest.condor-ce.conf' condor_contents = """GRIDMAP = /etc/grid-security/grid-mapfile ALL_DEBUG=D_CAT D_ALWAYS:2 JOB_ROUTER_DEFAULTS = $(JOB_ROUTER_DEFAULTS) [set_default_maxMemory = 128;] JOB_ROUTER_ENTRIES = \\ [ \\ GridResource = "batch pbs"; \\ TargetUniverse = 9; \\ name = "Local_PBS"; \\ Requirements = target.osgTestBatchSystem =?= "pbs"; \\ ] \\ [ \\ GridResource = "batch slurm"; \\ TargetUniverse = 9; \\ name = "Local_Slurm"; \\ Requirements = target.osgTestBatchSystem =?= "slurm"; \\ ] \\ [ \\ TargetUniverse = 5; \\ name = "Local_Condor"; \\ Requirements = (target.osgTestBatchSystem =!= "pbs" && target.osgTestBatchSystem =!= "slurm"); \\ ] JOB_ROUTER_SCHEDD2_SPOOL=/var/lib/condor/spool JOB_ROUTER_SCHEDD2_NAME=$(FULL_HOSTNAME) JOB_ROUTER_SCHEDD2_POOL=$(FULL_HOSTNAME):9618 AUTH_SSL_SERVER_CERTFILE = /etc/grid-security/hostcert.pem AUTH_SSL_SERVER_KEYFILE = /etc/grid-security/hostkey.pem AUTH_SSL_SERVER_CADIR = /etc/grid-security/certificates AUTH_SSL_SERVER_CAFILE = AUTH_SSL_CLIENT_CERTFILE = /etc/grid-security/hostcert.pem AUTH_SSL_CLIENT_KEYFILE = /etc/grid-security/hostkey.pem AUTH_SSL_CLIENT_CADIR = /etc/grid-security/certificates AUTH_SSL_CLIENT_CAFILE = """ if core.rpm_is_installed('htcondor-ce-view'): condor_contents += "\nDAEMON_LIST = $(DAEMON_LIST), CEVIEW, GANGLIAD, SCHEDD" core.config['condor-ce.view-port'] = condor.ce_config_val( 'HTCONDORCE_VIEW_PORT') files.write(core.config['condor-ce.condor-ce-cfg'], condor_contents, owner='condor-ce', chmod=0o644)
def test_03_disable_persistence(self): core.skip_ok_unless_installed(tomcat.pkgname()) self.skip_ok_if(core.options.nightly, 'Allow persistence in the nightlies') contents = ''' <Context> <WatchedResource>WEB-INF/web.xml</WatchedResource> <Manager pathname="" /> </Context> ''' files.write(tomcat.contextfile(), contents, owner='tomcat')
def test_03_start_trqauthd(self): core.state['trqauthd.started-service'] = False core.config['torque.pbs-servername-file'] = '/var/lib/torque/server_name' core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_if(service.is_running('trqauthd'), 'trqauthd is already running') # set hostname as servername instead of localhost # config required before starting trqauthd files.write(core.config['torque.pbs-servername-file'], "%s" % core.get_hostname(), owner='pbs') service.check_start('trqauthd')
def test_09_config_user_vo_map(self): core.skip_ok_unless_installed('gratia-service') user_vo_map_file = '/var/lib/osg/user-vo-map' core.config['gratia.user-vo-map'] = user_vo_map_file conFileContents = files.read('/usr/share/osg-test/gratia/user-vo-map') if files.filesBackedup(user_vo_map_file, 'root'): files.write(core.config['gratia.user-vo-map'], conFileContents, backup=False) else: files.write(core.config['gratia.user-vo-map'], conFileContents, owner='root')
def setup_automount(): automount_conf_path = '/etc/auto.master' files.preserve(automount_conf_path, 'cvmfs') try: contents = files.read(automount_conf_path) except IOError: # Sometimes this file doesn't exist contents = [] for line in contents: if "cvmfs" in line: return contents.append("/cvmfs /etc/auto.cvmfs\n") files.write(automount_conf_path, contents, owner='cvmfs', backup=False, chmod=0o644)
def setup_automount(self): automount_conf_path='/etc/auto.master' files.preserve(automount_conf_path, 'cvmfs') try: contents = files.read(automount_conf_path) except IOError: #Sometimes this file doesn't exist contents=[] for line in contents: if "cvmfs" in line: return contents.append("/cvmfs /etc/auto.cvmfs\n") files.write(automount_conf_path, contents, owner='cvmfs', backup=False, chmod=0644)
def test_01_configure(self): core.config['lcmaps.db'] = '/etc/lcmaps.db' core.config['lcmaps.gsi-authz'] = '/etc/grid-security/gsi-authz.conf' core.skip_ok_unless_installed(*self.required_rpms) template = files.read('/usr/share/lcmaps/templates/lcmaps.db.vomsmap', as_single_string=True) files.write(core.config['lcmaps.db'], template, owner='lcmaps') files.write(core.config['lcmaps.gsi-authz'], "globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n", owner='lcmaps')
def test_01_set_config(self): port = core.config['gsisshd.port'] = '2222' core.state['gsisshd.can-run'] = ( not (core.el_release() >= 7 and core.state['selinux.mode'] and not core.rpm_is_installed('policycoreutils-python'))) self.skip_ok_unless( core.state['gsisshd.can-run'], "Can't run with SELinux on EL >= 7 without policycoreutils-python") files.write(SSHD_CONFIG, SSHD_CONFIG_TEXT % {'port': port}, owner='gsissh', chmod=0600)
def setup_fuse(self): fuse_conf_path='/etc/fuse.conf' files.preserve(fuse_conf_path, 'cvmfs') try: contents = files.read(fuse_conf_path) except IOError: #Sometimes this file doesn't exist contents=[] for line in contents: if "user_allow_other" in line: return contents.append("user_allow_other\n") files.write(fuse_conf_path, contents, owner='cvmfs', backup=False, chmod=0644)
def setup_fuse(self): fuse_conf_path = '/etc/fuse.conf' try: contents = files.read(fuse_conf_path) except IOError: #Sometimes this file doesn't exist contents = [] for line in contents: if "user_allow_other" in line: return contents.append("user_allow_other\n") files.write(fuse_conf_path, contents, 'root') os.chmod(fuse_conf_path, 0644)
def setup_fuse(): fuse_conf_path = '/etc/fuse.conf' files.preserve(fuse_conf_path, 'cvmfs') try: contents = files.read(fuse_conf_path) except IOError: # Sometimes this file doesn't exist contents = [] for line in contents: if "user_allow_other" in line: return contents.append("user_allow_other\n") files.write(fuse_conf_path, contents, owner='cvmfs', backup=False, chmod=0o644)
def test_03_start_trqauthd(self): core.state['trqauthd.started-service'] = False core.config[ 'torque.pbs-servername-file'] = '/var/lib/torque/server_name' core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_if(service.is_running('trqauthd'), 'trqauthd is already running') # set hostname as servername instead of localhost # config required before starting trqauthd files.write(core.config['torque.pbs-servername-file'], "%s" % core.get_hostname(), owner='pbs') service.check_start('trqauthd')
def test_01_set_config(self): port = core.config['gsisshd.port'] = '2222' core.state['gsisshd.can-run'] = ( not (core.el_release() >= 7 and core.state['selinux.mode'] and not core.dependency_is_installed("/usr/sbin/semanage"))) self.skip_ok_unless( core.state['gsisshd.can-run'], "Can't run with SELinux on EL >= 7 without semanage") files.write(SSHD_CONFIG, SSHD_CONFIG_TEXT % {'port': port}, owner='gsissh', chmod=0o600)
def setup_automount(self): automount_conf_path = '/etc/auto.master' try: contents = files.read(automount_conf_path) except IOError: #Sometimes this file doesn't exist contents = [] for line in contents: if "cvmfs" in line: return contents.append("/cvmfs /etc/auto.cvmfs\n") files.write(automount_conf_path, contents, 'root') os.chmod(automount_conf_path, 0644)
def test_01_start_mom(self): core.state['pbs_mom.started-service'] = False core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_if(service.is_running('pbs_mom'), 'PBS mom already running') core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config' files.write(core.config['torque.mom-config'], "$pbsserver %s\n" % core.get_hostname(), owner='pbs') core.config[ 'torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout' files.write(core.config['torque.mom-layout'], "nodes=0", owner='pbs') service.check_start('pbs_mom')
def test_01_set_config(self): port = core.config['gsisshd.port'] = '2222' core.state['gsisshd.can-run'] = (not ( core.el_release() >= 7 and core.state['selinux.mode'] and not core.rpm_is_installed('policycoreutils-python'))) self.skip_ok_unless(core.state['gsisshd.can-run'], "Can't run with SELinux on EL >= 7 without policycoreutils-python") files.write( SSHD_CONFIG, SSHD_CONFIG_TEXT % {'port': port}, owner='gsissh', chmod=0o600)
def test_01_start_mom(self): core.state['pbs_mom.started-service'] = False core.skip_ok_unless_installed(*self.required_rpms, by_dependency=True) self.skip_ok_if(service.is_running('pbs_mom'), 'PBS mom already running') core.config['torque.mom-config'] = '/var/lib/torque/mom_priv/config' files.write(core.config['torque.mom-config'], "$pbsserver %s\n" % core.get_hostname(), owner='pbs') core.config['torque.mom-layout'] = '/var/lib/torque/mom_priv/mom.layout' files.write(core.config['torque.mom-layout'], "nodes=0", owner='pbs') service.check_start('pbs_mom')
def test_03_configure_globus_pbs(self): core.config['globus.pbs-config'] = '/etc/globus/globus-pbs.conf' core.state['globus.pbs_configured'] = False core.skip_ok_unless_installed('globus-gram-job-manager-pbs') config_file = file(core.config['globus.pbs-config']).read() server_name = core.get_hostname() re_obj = re.compile('^pbs_default=.*$', re.MULTILINE) if 'pbs_default' in config_file: config_file = re_obj.sub("pbs_default=\"%s\"" % server_name, config_file) else: config_file += "pbs_default=\"%s\"" % server_name files.write(core.config['globus.pbs-config'], config_file, owner='pbs') core.state['globus.pbs_configured'] = True
def test_01_configure_xrootd(self): core.config['xrootd.tpc.config-1'] = '/etc/xrootd/xrootd-third-party-copy-1.cfg' core.config['xrootd.tpc.config-2'] = '/etc/xrootd/xrootd-third-party-copy-2.cfg' core.config['xrootd.tpc.basic-config'] = '/etc/xrootd/config.d/36-osg-test-tpc.cfg' core.state['xrootd.started-http-server-1'] = False core.state['xrootd.started-http-server-2'] = False core.state['xrootd.tpc.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('globus-proxy-utils', by_dependency=True) user = pwd.getpwnam("xrootd") files.write(core.config['xrootd.tpc.config-1'], XROOTD_CFG_TEXT, owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) files.write(core.config['xrootd.tpc.config-2'], XROOTD_CFG_TEXT, owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) files.write('/etc/xrootd/config.d/40-osg-standalone.cfg', XROOTD_STANDALONE_TXT, owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) files.write(core.config['xrootd.tpc.basic-config'], XROOTD_MACAROON_TXT, owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.tpc.backups-exist'] = True
def test_04_start_pbs(self): core.config['torque.pbs-lockfile'] = '/var/lock/subsys/pbs_server' core.state['torque.pbs-server-running'] = False core.state['torque.pbs-configured'] = False core.state['torque.nodes-up'] = False if core.el_release() == 5: core.config[ 'torque.pbs-nodes-file'] = '/var/torque/server_priv/nodes' elif core.el_release() == 6: core.config[ 'torque.pbs-nodes-file'] = '/var/lib/torque/server_priv/nodes' else: core.skip('Distribution version not supported') if core.missing_rpm(*self.required_rpms): return if os.path.exists(core.config['torque.pbs-lockfile']): core.skip('pbs server apparently running') return # add the local node as a compute node files.write(core.config['torque.pbs-nodes-file'], "%s np=1\n" % core.get_hostname(), owner='pbs') command = ('service', 'pbs_server', 'start') stdout, _, fail = core.check_system(command, 'Start pbs server daemon') self.assert_(stdout.find('error') == -1, fail) self.assert_(os.path.exists(core.config['torque.pbs-lockfile']), 'pbs server run lock file missing') core.state['torque.pbs-server'] = True core.state['torque.pbs-server-running'] = True core.check_system("echo '%s' | qmgr %s" % (self.pbs_config, core.get_hostname()), "Configuring pbs server", shell=True) core.state['torque.pbs-configured'] = True # wait up to 5 minutes for the server to come up and trigger a failure # if that doesn't happen start_time = time.time() while ((time.time() - start_time) < 600): command = ('/usr/bin/qnodes', '-s', core.get_hostname()) stdout, _, fail = core.check_system(command, 'Get pbs node info') self.assert_(stdout.find('error') == -1, fail) if stdout.find('state = free'): core.state['torque.nodes-up'] = True break if not core.state['torque.nodes-up']: self.fail('PBS nodes not coming up')
def test_02_config_tomcat(self): core.skip_ok_unless_installed(tomcat.pkgname()) old_contents = files.read(tomcat.conffile(), True) # Endorse JARs lines = ['JAVA_ENDORSED_DIRS="${JAVA_ENDORSED_DIRS+$JAVA_ENDORSED_DIRS:}/usr/share/voms-admin/endorsed"'] # Improve Tomcat 7 startup times (SOFTWARE-2383) lines.append('JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom"') for line in lines: if old_contents.find(line) != -1: lines.remove(line) new_contents = '\n'.join([old_contents] + lines) files.write(tomcat.conffile(), new_contents, owner='tomcat')
def test_03_configure_globus_pbs(self): core.config['globus.pbs-config'] = '/etc/globus/globus-pbs.conf' core.state['globus.pbs_configured'] = False if not core.rpm_is_installed('globus-gram-job-manager-pbs'): return config_file = file(core.config['globus.pbs-config']).read() server_name = core.get_hostname() re_obj = re.compile('^pbs_default=.*$', re.MULTILINE) if 'pbs_default' in config_file: config_file = re_obj.sub("pbs_default=\"%s\"" % server_name, config_file) else: config_file += "pbs_default=\"%s\"" % server_name files.write(core.config['globus.pbs-config'], config_file, 'pbs') core.state['globus.pbs_configured'] = True
def test_02_uninstall_gratia_database(self): core.skip_ok_unless_installed('gratia-service') filename = "/tmp/gratia_admin_pass." + str(os.getpid()) + ".txt" contents="[client]\n" + "password=\n" files.write(filename, contents, backup=False) #Command to drop the gratia database is: #echo "drop database gratia;" | mysql --defaults-extra-file="/tmp/gratia_admin_pass.<pid>.txt" -B --unbuffered --user=root --port=3306 command = "echo \"drop database gratia_osgtest;\" | mysql --defaults-extra-file=\"" + filename + "\" -B --unbuffered --user=root --port=3306" core.check_system(command, 'Unable to drop Gratia Database.', shell=True) files.remove(filename) #At this time, remove the gratia reader password file also files.remove(core.config['gratia.sql.file'])
def test_01_start_xrootd(self): core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid' core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem' core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem' core.config['xrootd.config'] = '/etc/xrootd/xrootd-clustered.cfg' core.config['xrootd.gsi'] = "ON" core.state['xrootd.started-server'] = False core.state['xrootd.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('xrootd', by_dependency=True) user = pwd.getpwnam("xrootd") if core.config['xrootd.gsi'] == "ON": core.skip_ok_unless_installed('globus-proxy-utils') core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0o644) core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0o400) lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms') if all([core.rpm_is_installed(x) for x in lcmaps_packages]): core.log_message("Using xrootd-lcmaps authentication") sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:--loglevel,5' if core.package_version_compare('xrootd-lcmaps', '1.4.0') >= 0: sec_protocol += ',--policy,authorize_only' else: core.log_message("Using XRootD mapfile authentication") sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile' files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'], owner="xrootd", chown=(user.pw_uid, user.pw_gid)) files.append(core.config['xrootd.config'], XROOTD_CFG_TEXT % sec_protocol, owner='xrootd', backup=True) authfile = '/etc/xrootd/auth_file' files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.backups-exist'] = True
def test_01_create_files(self): xrootd_user = pwd.getpwnam("xrootd") for name, contents in self.testfiles: files.write(os.path.join(getcfg("OriginRootdir"), getcfg("OriginExport").lstrip("/"), name), contents, backup=False, chmod=0o644, chown=(xrootd_user.pw_uid, xrootd_user.pw_gid)) files.write(os.path.join(getcfg("OriginRootdir"), getcfg("OriginAuthExport").lstrip("/"), name), contents, backup=False, chmod=0o644, chown=(xrootd_user.pw_uid, xrootd_user.pw_gid))
def test_02_config_tomcat(self): core.skip_ok_unless_installed(tomcat.pkgname()) old_contents = files.read(tomcat.conffile(), True) # Endorse JARs lines = [ 'JAVA_ENDORSED_DIRS="${JAVA_ENDORSED_DIRS+$JAVA_ENDORSED_DIRS:}/usr/share/voms-admin/endorsed"' ] # Improve Tomcat 7 startup times (SOFTWARE-2383) lines.append('JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom"') for line in lines: if old_contents.find(line) != -1: lines.remove(line) new_contents = '\n'.join([old_contents] + lines) files.write(tomcat.conffile(), new_contents, owner='tomcat')
def test_01_start_xrootd(self): core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid' core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem' core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem' core.config['xrootd.gsi'] = "ON" core.state['xrootd.started-server'] = False core.state['xrootd.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') vdt_pw = pwd.getpwnam(core.options.username) core.config['certs.usercert'] = os.path.join(vdt_pw.pw_dir, '.globus', 'usercert.pem') core.skip_ok_unless_installed('xrootd', by_dependency=True) # Determine xrootd package name if core.rpm_is_installed('xrootd4'): core.config['xrootd.package'] = 'xrootd4' elif core.rpm_is_installed('xrootd'): core.config['xrootd.package'] = 'xrootd' user = pwd.getpwnam("xrootd") if core.config['xrootd.gsi'] == "ON": core.skip_ok_unless_installed('globus-proxy-utils') core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0644) core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0400) cfgfile = '/etc/xrootd/xrootd-clustered.cfg' files.append(cfgfile, XROOTD_CFG_TEXT, owner='xrootd', backup=True) authfile = '/etc/xrootd/auth_file' files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid)) files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'], owner="xrootd", chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.backups-exist'] = True if core.el_release() < 7: stdout, _, fail = core.check_system(('service', 'xrootd', 'start'), 'Start Xrootd server') self.assert_('FAILED' not in stdout, fail) self.assert_(os.path.exists(core.config['xrootd.pid-file']), 'Xrootd server PID file missing') else: core.check_system(('systemctl', 'start', 'xrootd@clustered'), 'Start Xrootd server') core.check_system(('systemctl', 'status', 'xrootd@clustered'), 'Verify status of Xrootd server') core.state['xrootd.started-server'] = True
def test_03_configure_authentication(self): core.skip_ok_unless_installed('condor', 'htcondor-ce', 'htcondor-ce-client') # Configure condor-ce to use the gridmap file and set up PBS and Condor routes core.config['condor-ce.condor-ce-cfg'] = '/etc/condor-ce/config.d/99-osgtest.condor-ce.conf' condor_contents = """GRIDMAP = /etc/grid-security/grid-mapfile ALL_DEBUG=D_FULLDEBUG JOB_ROUTER_ENTRIES = \\ [ \\ GridResource = "batch pbs"; \\ TargetUniverse = 9; \\ name = "Local_PBS"; \\ Requirements = target.osgTestPBS =?= true; \\ ] \\ [ \\ TargetUniverse = 5; \\ name = "Local_Condor"; \\ ] JOB_ROUTER_SCHEDD2_SPOOL=/var/lib/condor/spool JOB_ROUTER_SCHEDD2_NAME=$(FULL_HOSTNAME) JOB_ROUTER_SCHEDD2_POOL=$(FULL_HOSTNAME):9618 """ files.write(core.config['condor-ce.condor-ce-cfg'], condor_contents, owner='condor-ce', chmod=0644) # lcmaps needs to know to use the gridmap file instead of GUMS core.config['condor-ce.lcmapsdb'] = '/etc/lcmaps.db' lcmaps_contents = """ authorize_only: gridmapfile -> good | bad """ files.append(core.config['condor-ce.lcmapsdb'], lcmaps_contents, owner='condor-ce') # Add host DN to condor_mapfile if core.options.hostcert: core.config['condor-ce.condorce_mapfile'] = '/etc/condor-ce/condor_mapfile' condor_mapfile_contents = files.read('/usr/share/osg-test/test_condorce_mapfile') files.write(core.config['condor-ce.condorce_mapfile'], condor_mapfile_contents, owner='condor-ce', chmod=0644)