def test_06_remove_trustmanager(self): core.skip_ok_unless_installed(tomcat.pkgname(), 'emi-trustmanager-tomcat') # mv -f /etc/tomcat5/server.xml.old-trustmanager /etc/tomcat5/server.xml old_tm = os.path.join(tomcat.sysconfdir(), 'server.xml.old-trustmanager') new_tm = os.path.join(tomcat.sysconfdir(), 'server.xml') if os.path.exists(old_tm) and os.path.isdir(os.path.dirname(new_tm)): shutil.move(old_tm, new_tm) # rm -f /usr/share/tomcat5/server/lib/bcprov*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'bcprov*.jar')) # rm -f /usr/share/tomcat5/server/lib/log4j*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'log4j*.jar')) # rm -f /usr/share/tomcat5/server/lib/trustmanager-*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'trustmanager-*.jar')) # rm -f /etc/tomcat5/log4j-trustmanager.properties files.remove(os.path.join(tomcat.sysconfdir(), 'log4j-trustmanager.properties')) # rm -f /var/lib/trustmanager-tomcat/server.xml files.remove('/var/lib/trustmanager-tomcat/server.xml') core.log_message('EMI trustmanager removed')
def parse_output_for_packages(yum_output): core.log_message('install.installed:' + ', '.join(core.state['install.installed'])) clean_output = yum_output.strip().split('\n') transaction_regexp = re.compile(r'\s+(Installing|Updating|Cleanup|Erasing)\s+:\s+\d*:?(\S+)\s+\d') # Try not to match any packages named 'replacing.*' obsolete_regexp = re.compile(r'\s+replacing\s+([^\.]+).*\.osg\S+$') replacement_regexp = r'\s+(\S+)\s+(?:\S+\s+){2}osg' # Only remove obsoleting packages from the OSG previous_line = '' for line in clean_output: obsoleted = obsolete_regexp.match(line) if obsoleted: replaced_pkg = obsoleted.group(1) try: pkg = re.match(replacement_regexp, previous_line).group(1) if pkg == replaced_pkg: # xrootd obsoletes older versions of itself so we end up with duplicates in install.installed. # This isn't caught in the transaction logic below since the replaced package is 'cleaned up' # instead of being 'erased' try: core.state['install.installed'].remove(pkg) except ValueError: # EL6 stores packages full NVR while EL5 only stores the name. This causes problems because we # only capture obsoleting/replaced packages by name and try to remove by name. This means that # 'install.installed' gets 'polluted' in EL6 but it doesn't really matter since we use 'yum # undo' in EL6 for yum operations. We don't get rid of 'install.installed' because it currently # (2/1/16) determines whether some tests get run or not continue core.state['install.replace'].append(pkg) except AttributeError: continue # no match and not a transaction line else: pass # no match previous_line = line try: operation, pkg = transaction_regexp.match(line).groups() except AttributeError: continue # Not a transaction if operation == 'Installing' and pkg != 'kernel': # uninstalling kernel updates is a headache core.state['install.installed'].append(pkg) elif operation == 'Updating': core.state['install.updated'].append(pkg) elif operation == 'Cleanup' and pkg not in core.state['install.installed']: # Cleanup only occurs on upgrades/downgrades and if we didn't # install the package, it already existed on the machine core.state['install.os_updates'].append(pkg) elif operation == 'Erasing': try: core.state['install.installed'].remove(pkg) except ValueError: # We just removed a package that we didn't install, uh-oh! core.state['install.orphaned'].append(pkg) try: core.state['install.updated'].remove(pkg) except ValueError: # Package wasn't updated continue
def test_03_xrootd_fuse(self): """ This tests xrootd-fuse using a mount in /mnt """ if core.missing_rpm('xrootd-server', 'xrootd-client','xrootd-fuse'): return if not os.path.exists("/mnt"): core.log_message("/mnt did not exist, skipping xrootd fuse test") return if not os.path.exists(TestXrootd.__fuse_path): os.mkdir(TestXrootd.__fuse_path) if core.config['xrootd.gsi'] == "ON": core.log_message("fuse incompatible with GSI, skipping xrootd fuse") return hostname = socket.getfqdn() #command = ('xrootdfs',TestXrootd.__fuse_path,'-o','rdr=xroot://localhost:1094//tmp','-o','uid=xrootd') command = ('mount', '-t','fuse','-o','rdr=xroot://localhost:1094//tmp,uid=xrootd','xrootdfs',TestXrootd.__fuse_path) command_str= ' '.join(command) #For some reason, sub process hangs on fuse processes, use os.system #status, stdout, stderr = core.system(command_str,shell=True) os.system(command_str) # Copy a file in and see if it made it into the fuse mount xrootd_url = 'root://%s/%s/copied_file.txt' % (hostname, "/tmp") command = ('xrdcp', TestXrootd.__data_path , xrootd_url) status, stdout, stderr = core.system(command, True) command = ('ls', "/tmp/copied_file.txt") stdout, stderr, fail = core.check_system(command, "Checking file is copied to xrootd fuse mount correctly", user=True) command = ('umount',TestXrootd.__fuse_path) status, stdout, stderr = core.system(command) os.rmdir(TestXrootd.__fuse_path) files.remove("/tmp/copied_file.txt")
def test_18_execute_condor_meter(self): core.state['gratia.condor-meter-running'] = False core.skip_ok_unless_installed('gratia-probe-condor', 'gratia-service', 'htcondor-ce-condor') self.skip_bad_if(core.state['gratia.condor-logs-copied'] == False) self.skip_bad_unless(core.state['condor-ce.started-service'], 'condor-ce not running') self.skip_bad_unless(core.state['condor.running-service'], message='Condor service not running') if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = core.get_stat( core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/condor/condor_meter', ) core.check_system(command, 'Unable to execute condor_meter.') core.config['gratia.condor-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.condor" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty( core.config['gratia.condor-temp-dir']) self.assert_(result, 'condor outbox check failed.') core.state['gratia.condor-meter-running'] = True
def test_07_remove_test_user(self): if not core.state['general.user_added']: core.log_message('Did not add user') return username = core.options.username password_entry = pwd.getpwnam(username) globus_dir = os.path.join(password_entry.pw_dir, '.globus') # Remove certs in case userdel fails if core.state['general.user_cert_created']: files.remove(os.path.join(globus_dir, 'usercert.pem')) files.remove(os.path.join(globus_dir, 'userkey.pem')) # Get list of PIDs owned by the test user command = ('ps', '-U', username, '-u', username, '-o', 'pid=') _, output, _ = core.system(command) # Take no prisoners for pid in output.splitlines(): try: os.kill(int(pid), signal.SIGKILL) except OSError: continue command = ('userdel', username) core.check_system(command, "Remove user '%s'" % (username)) files.remove(os.path.join('/var/spool/mail', username)) shutil.rmtree(password_entry.pw_dir)
def isProbeDataValidInDatabase(self, command, queryFailureString, assertionValue='', atLeastOneRecord=False): """This helper method queries the database for probe related information and based on the passed-in data, determines if the queried information is valid or not.""" status, stdout, _ = core.system(command, shell=True) if status != 0: core.log_message(queryFailureString) return False #Unable to query the database ######If we reached this point, database query was successful. Now, it's time to examine the query output##### if assertionValue != '': result = re.search(assertionValue, stdout, re.IGNORECASE) if result != None: return True #Found the assertionValue in the database else: return False #Unable to find the assertionValue in the database else: #No assertionValue passed in if atLeastOneRecord == True: if int(stdout) < 1: core.log_message( "Query did not return one or more records.") return False #Query should return at least one record else: return True #Query returned at least one record else: #"atLeastOneRecord" Flag was not passed in return True
def test_06_execute_gridftptransfer_probedriver(self): core.state['gratia.gridftp-transfer-running'] = False core.skip_ok_unless_installed('gratia-probe-gridftp-transfer', 'gratia-service', 'globus-gridftp-server-progs', 'globus-ftp-client', 'globus-proxy-utils', 'globus-gass-copy-progs') self.skip_ok_unless(core.state['gridftp.started-server'], 'gridftp server not running') self.skip_bad_unless(core.state['gratia.gridftp-logs-copied'], 'gridftp logs not copied') if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = core.get_stat( core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) if core.package_version_compare('gratia-probe-gridftp-transfer', '1.17.0-1') >= 0: probe_script = 'gridftp-transfer_meter' else: probe_script = 'GridftpTransferProbeDriver' command = ('/usr/share/gratia/gridftp-transfer/%s' % probe_script, ) core.check_system(command, 'Unable to execute %s.' % probe_script) core.config['gratia.gridftp-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.gridftp-transfer" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty( core.config['gratia.gridftp-temp-dir']) self.assert_(result, 'gridftp-transfer outbox check failed.') core.state['gratia.gridftp-transfer-running'] = True
def test_14_execute_dcache_storage(self): # Malformed XML errors due to network issues (SOFTWARE-1748) core.state['gratia.dcache-whitelisted-error'] = False whitelisted_errors = ['The element type "metric" must be terminated by the matching end-tag "</metric>".', 'XML document structures must start and end within the same entity.'] core.skip_ok_unless_installed('gratia-probe-dcache-storage', 'gratia-service') core.state['gratia.dcache-storage-running'] = False self.skip_bad_if(core.state['gratia.dcache-logs-copied'] == False) if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = os.stat(core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/dCache-storage/dCache-storage_meter.cron.sh',) status, stdout, stderr = core.system(command) if status != 0: for error in whitelisted_errors: if error in stdout: core.state['gratia.dcache-whitelisted-error'] = True break if not core.state['gratia.dcache-whitelisted-error']: self.fail(core.diagnose('Unable to execute dCache-storage.', command, status, stdout, stderr)) core.config['gratia.dcache-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.dCache-storage" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty(core.config['gratia.dcache-temp-dir']) self.assert_(result, 'dCache-storage outbox check failed.') core.state['gratia.dcache-storage-running'] = True
def test_01_start_xrootd(self): core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid' core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem' core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem' core.config['xrootd.config'] = '/etc/xrootd/xrootd-clustered.cfg' core.config['xrootd.gsi'] = "ON" core.state['xrootd.started-server'] = False core.state['xrootd.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('xrootd', by_dependency=True) user = pwd.getpwnam("xrootd") if core.config['xrootd.gsi'] == "ON": core.skip_ok_unless_installed('globus-proxy-utils') core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0644) core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0400) lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms') if all([core.rpm_is_installed(x) for x in lcmaps_packages]): core.log_message("Using xrootd-lcmaps authentication") sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:--loglevel,5' else: core.log_message("Using XRootD mapfile authentication") sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile' files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'], owner="xrootd", chown=(user.pw_uid, user.pw_gid)) files.append(core.config['xrootd.config'], XROOTD_CFG_TEXT % sec_protocol, owner='xrootd', backup=True) authfile = '/etc/xrootd/auth_file' files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.backups-exist'] = True
def output_is_acceptable(fetch_crl_output): """All lines output from fetch-crl are considered an error. We whitelist a few transient ones. """ error_message_whitelist = [ r'^VERBOSE\(0\)', 'CRL has lastUpdate time in the future', 'CRL has nextUpdate time in the past', # ERROR CRL verification failed for BrGrid/0 (BrGrid) 'CRL verification failed for', # ERROR verify called on empty data blob 'verify called on empty data blob', # LWP::Protocol::http::Socket: connect: No route to host at /usr/share/perl5/LWP/Protocol/http.pm line 51. 'LWP::Protocol::http::Socket', ] all_lines_ok = True for line in fetch_crl_output.rstrip('\n').split('\n'): if not line: # skip blank lines continue line_ok = False for error_string in error_message_whitelist: if re.search(error_string, line): line_ok = True break if not line_ok: all_lines_ok = False core.log_message("Found uncaught error message: '%s'" % line.strip()) break return all_lines_ok
def preserve(path, owner): """Backup the file at path and remember it with the given owner. owner must be specified. The (path, owner) pair must not have been previously used in a call to preserve. Raises ValueError if either of these are not true. """ if owner is None: raise ValueError('Must have owner string') backup_id = (path, owner) if backup_id in _backups: raise ValueError("Already have a backup of '%s' for '%s'" % (path, owner)) backup_path = os.path.join(_backup_directory, os.path.basename(path) + '#' + owner) if os.path.exists(backup_path): raise ValueError("Backup already exists at '%s'" % (backup_path)) if os.path.exists(path): if not os.path.isdir(_backup_directory): os.mkdir(_backup_directory) shutil.copy2(path, backup_path) _backups[backup_id] = {'path': backup_path, 'uid': os.stat(path).st_uid, 'gid': os.stat(path).st_gid} core.log_message("Backed up '%s' as '%s'" % (path, backup_path)) else: _backups[backup_id] = None
def preserve(path, owner): """Backup the file at path and remember it with the given owner. owner must be specified. The (path, owner) pair must not have been previously used in a call to preserve. Raises ValueError if either of these are not true. """ if owner is None: raise ValueError('Must have owner string') backup_id = (path, owner) if backup_id in _backups: raise ValueError("Already have a backup of '%s' for '%s'" % (path, owner)) backup_path = os.path.join(_backup_directory, os.path.basename(path) + '#' + owner) if os.path.exists(backup_path): raise ValueError("Backup already exists at '%s'" % (backup_path)) if os.path.exists(path): if not os.path.isdir(_backup_directory): os.mkdir(_backup_directory) shutil.copy2(path, backup_path) _backups[backup_id] = { 'path': backup_path, 'uid': os.stat(path).st_uid, 'gid': os.stat(path).st_gid } core.log_message("Backed up '%s' as '%s'" % (path, backup_path)) else: _backups[backup_id] = None
def test_05_start_tomcat(self): core.skip_ok_unless_installed(tomcat.pkgname()) core.state['tomcat.started'] = False catalina_log = tomcat.catalinafile() initial_stat = core.get_stat(catalina_log) tomcat_sentinel = r'Server startup in \d+ ms' # Bump log level core.config['tomcat.logging-conf'] = os.path.join(tomcat.sysconfdir(), 'logging.properties') files.append(core.config['tomcat.logging-conf'], 'org.apache.catalina.level = %s\n' % 'FINER', owner='tomcat', backup=True) old_str = "1catalina.org.apache.juli.FileHandler.prefix = catalina." repl_str = ("1catalina.org.apache.juli.FileHandler.prefix = catalina\n" "1catalina.org.apache.juli.FileHandler.rotatable = false") files.replace(core.config['tomcat.logging-conf'], old_str, repl_str, owner='tomcat', backup=False) service.check_start(tomcat.pkgname()) if core.options.nightly: timeout = 3600.0 else: timeout = 1200.0 line, gap = core.monitor_file(catalina_log, initial_stat, tomcat_sentinel, timeout) self.assert_(line is not None, 'Tomcat did not start within the %d min window' % int(timeout/60)) core.state['tomcat.started'] = True core.log_message('Tomcat started after %.1f seconds' % gap)
def test_02_condor_ce_run_condor(self): core.skip_ok_unless_installed('htcondor-ce', 'htcondor-ce-client', 'htcondor-ce-condor', 'condor') self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running') self.skip_bad_unless(service.is_running('condor'), 'condor not running') self.skip_bad_unless(core.state['jobs.env-set'], 'job environment not set') token_file = core.config['token.condor_write'] self.skip_bad_unless( core.state['proxy.valid'] or os.path.exists(token_file), 'requires a scitoken or a proxy') command = [ 'condor_ce_run', '--debug', '-r', '%s:9619' % core.get_hostname(), '/bin/env' ] if os.path.exists(token_file): # FIXME: After HTCONDOR-636 is released (targeted for HTCondor-CE 5.1.2), # we can stop setting _condor_SCITOKENS_FILE for token_var in ('_condor_SCITOKENS_FILE', 'BEARER_TOKEN_FILE'): os.environ[token_var] = token_file else: core.log_message( 'condor WRITE token not found; skipping SCITOKENS auth') if core.osg_release() == "3.6" and \ core.PackageVersion('condor') >= '9.0.0' and \ core.PackageVersion('condor') < '9.0.8': with core.no_x509(core.options.username): self.run_job_in_tmp_dir(command, 'condor_ce_run a Condor job') else: self.run_job_in_tmp_dir(command, 'condor_ce_run a Condor job')
def test_01_configure_xrootd(self): core.config['xrootd.tpc.config-1'] = '/etc/xrootd/xrootd-third-party-copy-1.cfg' core.config['xrootd.tpc.config-2'] = '/etc/xrootd/xrootd-third-party-copy-2.cfg' core.config['xrootd.tpc.http-port1'] = HTTP_PORT1 core.config['xrootd.tpc.http-port2'] = HTTP_PORT2 core.state['xrootd.started-http-server-1'] = False core.state['xrootd.started-http-server-2'] = False core.state['xrootd.tpc.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('globus-proxy-utils', 'xrootd', 'xrootd-scitokens', by_dependency=True) user = pwd.getpwnam("xrootd") lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms') if all([core.rpm_is_installed(x) for x in lcmaps_packages]): core.log_message("Using xrootd-lcmaps authentication") sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:--loglevel,5' sec_protocol += ',--policy,authorize_only' else: core.log_message("Using XRootD mapfile authentication") sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile' files.write(core.config['xrootd.tpc.config-1'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.tpc.http-port1'], core.config['xrootd.tpc.http-port1']), owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) files.write(core.config['xrootd.tpc.config-2'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.tpc.http-port2'], core.config['xrootd.tpc.http-port2']), owner='xrootd', backup=True, chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.tpc.backups-exist'] = True
def test_06_remove_trustmanager(self): core.skip_ok_unless_installed(tomcat.pkgname(), 'emi-trustmanager-tomcat') # mv -f /etc/tomcat5/server.xml.old-trustmanager /etc/tomcat5/server.xml old_tm = os.path.join(tomcat.sysconfdir(), 'server.xml.old-trustmanager') new_tm = os.path.join(tomcat.sysconfdir(), 'server.xml') if os.path.exists(old_tm) and os.path.isdir(os.path.dirname(new_tm)): shutil.move(old_tm, new_tm) # rm -f /usr/share/tomcat5/server/lib/bcprov*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'bcprov*.jar')) # rm -f /usr/share/tomcat5/server/lib/log4j*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'log4j*.jar')) # rm -f /usr/share/tomcat5/server/lib/trustmanager-*.jar files.remove(os.path.join(tomcat.serverlibdir(), 'trustmanager-*.jar')) # rm -f /etc/tomcat5/log4j-trustmanager.properties files.remove( os.path.join(tomcat.sysconfdir(), 'log4j-trustmanager.properties')) # rm -f /var/lib/trustmanager-tomcat/server.xml files.remove('/var/lib/trustmanager-tomcat/server.xml') core.log_message('EMI trustmanager removed')
def test_02_condor_ce_run_condor(self): core.skip_ok_unless_installed('htcondor-ce', 'htcondor-ce-client', 'htcondor-ce-condor', 'condor') self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running') self.skip_bad_unless(service.is_running('condor'), 'condor not running') self.skip_bad_unless(core.state['jobs.env-set'], 'job environment not set') self.skip_bad_unless( core.state['proxy.valid'] or core.state['token.condor_write_created'], 'requires a scitoken or a proxy') command = [ 'condor_ce_run', '--debug', '-r', '%s:9619' % core.get_hostname(), '/bin/env' ] if core.state['token.condor_write_created']: # FIXME: After HTCONDOR-636 is released (targeted for HTCondor-CE 5.1.2), # we can stop setting _condor_SCITOKENS_FILE for token_var in ('_condor_SCITOKENS_FILE', 'BEARER_TOKEN_FILE'): os.environ[token_var] = core.config['token.condor_write'] else: core.log_message( 'condor WRITE token not found; skipping SCITOKENS auth') self.run_job_in_tmp_dir(command, 'condor_ce_run a Condor job')
def isProbeDataValidInDatabase(self, command, queryFailureString, assertionValue='', atLeastOneRecord=False): """This helper method queries the database for probe related information and based on the passed-in data, determines if the queried information is valid or not.""" status, stdout, _ = core.system(command, shell=True) if status != 0: core.log_message(queryFailureString) return False #Unable to query the database ######If we reached this point, database query was successful. Now, it's time to examine the query output##### if assertionValue != '': result = re.search(assertionValue, stdout, re.IGNORECASE) if result != None: return True #Found the assertionValue in the database else: return False #Unable to find the assertionValue in the database else: #No assertionValue passed in if atLeastOneRecord == True: if int(stdout) < 1: core.log_message("Query did not return one or more records.") return False #Query should return at least one record else: return True #Query returned at least one record else: #"atLeastOneRecord" Flag was not passed in return True
def test_05_configure_multiuser(self): core.skip_ok_unless_installed('xrootd-multiuser', by_dependency=True) xrootd_multiuser_conf = "ofs.osslib ++ libXrdMultiuser.so\n" \ "ofs.ckslib ++ libXrdMultiuser.so\n" if os.path.exists("/etc/xrootd/config.d/60-osg-multiuser.cfg"): core.log_message("Not adding XRootD multiuser config, already exists") else: files.append(core.config['xrootd.config'], xrootd_multiuser_conf, owner='xrootd', backup=False) core.config['xrootd.multiuser'] = True
def test_01_remove_packages(self): if (('install.preinstalled' not in core.state) or (len(core.state['install.preinstalled']) == 0)): core.skip('no original list') return if 'install.installed' not in core.state: core.skip('no packages installed') return current_rpms = core.installed_rpms() new_rpms = current_rpms - core.state['install.preinstalled'] if len(new_rpms) == 0: core.skip('no new RPMs') return # For the "rpm -e" command, RPMs should be listed in the same order as # installed. Why? The erase command processes files in reverse order # as listed on the command line, mostly; it seems to do a bit of # reordering (search -vv output for "tsort"), but it is not clear what # the algorithm is. So, rpm will cheerfully erase a package, the # contents of which are needed by the pre- or post-uninstall scriptlets # of a package that will be erased later in sequence. By listing them # in yum install order, we presumably get a valid ordering and increase # the chances of a clean erase. rpm_erase_candidates = [] for package in core.state['install.installed']: if package in new_rpms: rpm_erase_candidates.append(package) remaining_new_rpms = new_rpms - set(rpm_erase_candidates) count = len(remaining_new_rpms) if count > 0: core.log_message('%d RPMs installed but not in yum output' % count) rpm_erase_candidates += remaining_new_rpms # Creating the list of RPMs to erase is more complicated than just using # the list of new RPMs, because there may be RPMs with both 32- and # 64-bit versions installed. In that case, rpm will fail if given just # the base package name; instead, the architecture must be specified, # and an easy way to get that information is from 'rpm -q'. So we use # the bare name when possible, and the fully versioned one when # necessary. rpm_erase_list = [] for package in rpm_erase_candidates: command = ('rpm', '--query', package, '--queryformat', r'%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\n') status, stdout, stderr = core.system(command, log_output=False) versioned_rpms = re.split('\n', stdout.strip()) if len(versioned_rpms) > 1: rpm_erase_list += versioned_rpms else: rpm_erase_list.append(package) package_count = len(rpm_erase_list) command = ['rpm', '--quiet', '--erase'] + rpm_erase_list core.check_system(command, 'Remove %d packages' % (package_count))
def test_01_wait_for_voms_admin(self): core.state['voms.started-webapp'] = False core.skip_ok_unless_installed('voms-admin-server') line, gap = core.monitor_file(core.config['voms.webapp-log'], core.state['voms.webapp-log-stat'], 'VOMS-Admin started succesfully', 120.0) self.assert_(line is not None, 'VOMS Admin webapp started') core.state['voms.started-webapp'] = True core.log_message('VOMS Admin started after %.1f seconds' % gap)
def write(path, contents, owner=None, backup=True, chown=(0, 0), chmod=0o600): """Write the contents to a file at the path. The 'owner' argument (default: None) is a string that identifies the owner of the file. If the 'backup' argument is True (default), then any existing file at the path will be backed up for later restoration. However, because backups are identified in part by 'owner', if 'backup' is True, then 'owner' must be defined. Typically, a caller specifies either 'backup=False' to turn off backups (not recommended) or 'owner=[some string]' to set the owner for the backup. The 'chown' argument (default: (0, 0)) is a tuple of integers (uid, gid) that assigns the owner and group of the written file if the file doesn't exist. If the file does exist, the owner and group are copied from the previous file. The 'chmod' argument (default: 0600) is an integer that assigns the permissions of the written file if it doesn't exist. If it does exist, then the permissions are copied from the old file. """ # The default arguments are invalid: Either "backup" must be false or the # "owner" must be specified. if (owner is None) and backup: raise ValueError('Must specify an owner or backup=False') # Write temporary file temp_fd, temp_path = tempfile.mkstemp(prefix=os.path.basename(path) + '.', suffix='.osgtest-new', dir=os.path.dirname(path)) temp_file = os.fdopen(temp_fd, 'w') if isinstance(contents, (list, tuple)): temp_file.writelines(contents) else: temp_file.write(contents) temp_file.close() # Copy ownership and permissions if os.path.exists(path): old_stat = os.stat(path) os.chown(temp_path, old_stat.st_uid, old_stat.st_gid) os.chmod(temp_path, old_stat.st_mode) else: os.chown(temp_path, chown[0], chown[1]) os.chmod(temp_path, chmod) # Back up existing file if backup: preserve(path, owner) # Atomically move temporary file into final location os.rename(temp_path, path) core.log_message('Wrote %d bytes to %s' % (os.stat(path).st_size, path))
def isProbeOutboxDirEmpty(self, gratiaProbeTempDir): """This helper method returns True if the outbox directory for the probe, is empty; False otherwise""" outboxdir = gratiaProbeTempDir + "/outbox/" try: core.log_message('isProbeOutboxDirEmpty method - outboxdir is: ' + str(outboxdir)) if not os.listdir(outboxdir): return True else: return False except OSError: return False
def test_06_configure_scitokens(self): self.skip_ok_unless("SCITOKENS" in core.config['xrootd.security'], "Not using SciTokens for XRootD") scitokens_conf_path = "/etc/xrootd/scitokens.conf" files.write(scitokens_conf_path, SCITOKENS_CONF_TEXT, owner='xrootd', chmod=0o644) if os.path.exists("/etc/xrootd/config.d/50-osg-scitokens.cfg"): core.log_message("Not adding XRootD SciTokens config, already exists") else: files.append(core.config['xrootd.config'], XROOTD5_SCITOKENS_CFG_TXT % scitokens_conf_path, backup=False)
def copy_user_vo_map_file(self): """This helper method copies user-vo-map in /var/lib/osg, if not already present""" user_vo_map_dir = '/var/lib/osg/' user_vo_map_file = '/usr/share/osg-test/gratia/user-vo-map' if not os.path.exists(user_vo_map_dir): os.makedirs(user_vo_map_dir) try: shutil.copy(user_vo_map_file, user_vo_map_dir) except IOError, e: core.log_message("Unable to copy file. %s" % e) return False
def write(path, contents, owner=None, backup=True, chown=(0,0), chmod=0o600): """Write the contents to a file at the path. The 'owner' argument (default: None) is a string that identifies the owner of the file. If the 'backup' argument is True (default), then any existing file at the path will be backed up for later restoration. However, because backups are identified in part by 'owner', if 'backup' is True, then 'owner' must be defined. Typically, a caller specifies either 'backup=False' to turn off backups (not recommended) or 'owner=[some string]' to set the owner for the backup. The 'chown' argument (default: (0, 0)) is a tuple of integers (uid, gid) that assigns the owner and group of the written file if the file doesn't exist. If the file does exist, the owner and group are copied from the previous file. The 'chmod' argument (default: 0600) is an integer that assigns the permissions of the written file if it doesn't exist. If it does exist, then the permissions are copied from the old file. """ # The default arguments are invalid: Either "backup" must be false or the # "owner" must be specified. if (owner is None) and backup: raise ValueError('Must specify an owner or backup=False') # Write temporary file temp_fd, temp_path = tempfile.mkstemp(prefix=os.path.basename(path) + '.', suffix='.osgtest-new', dir=os.path.dirname(path)) temp_file = os.fdopen(temp_fd, 'w') if isinstance(contents, (list, tuple)): temp_file.writelines(contents) else: temp_file.write(contents) temp_file.close() # Copy ownership and permissions if os.path.exists(path): old_stat = os.stat(path) os.chown(temp_path, old_stat.st_uid, old_stat.st_gid) os.chmod(temp_path, old_stat.st_mode) else: os.chown(temp_path, chown[0], chown[1]) os.chmod(temp_path, chmod) # Back up existing file if backup: preserve(path, owner) # Atomically move temporary file into final location os.rename(temp_path, path) core.log_message('Wrote %d bytes to %s' % (os.stat(path).st_size, path))
def test_01_wait_for_voms_admin(self): core.state['voms.started-webapp'] = False if core.missing_rpm('voms-admin-server'): return line, gap = core.monitor_file(core.config['voms.webapp-log'], core.state['voms.webapp-log-stat'], 'VOMS-Admin started succesfully', 60.0) self.assert_(line is not None, 'VOMS Admin webapp started') core.state['voms.started-webapp'] = True core.log_message('VOMS Admin started after %.1f seconds' % gap)
def test_21_execute_bdii_status(self): core.skip_ok_unless_installed('gratia-probe-bdii-status', 'gratia-service') core.state['gratia.bdii-status-running'] = False if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = os.stat(core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/bdii-status/bdii_cese_record',) core.check_system(command, 'Unable to execute bdii-status.') core.config['gratia.bdii-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.bdii_" + "*" + \ core.config['gratia.tmpdir.postfix'] # TODO: Implement bdii outbox check core.state['gratia.bdii-status-running'] = True
def copy_probe_logs(self, log='', logdirectory=''): """This helper method copies Probe Logs to the passed in directory""" if not 'gratia.user-vo-map' in core.config: return False else: if log != '' and logdirectory != '': try: if not os.path.exists(logdirectory): os.makedirs(logdirectory) shutil.copy(log, logdirectory) core.log_message(str(os.listdir(logdirectory))) except IOError, e: core.log_message("Unable to copy log. %s" % e) return False
def restore(path, owner): """Restores the path to its state prior to being written by its owner.""" backup_id = (path, owner) if backup_id not in _backups: raise ValueError("No backup of '%s' for '%s'" % (path, owner)) if os.path.exists(path): os.remove(path) core.log_message('Removed test %s' % (path)) backup_path = _backups[backup_id] if (backup_path is not None) and os.path.exists(backup_path): shutil.move(backup_path, path) core.log_message('Restored original %s' % (path)) del _backups[backup_id]
def is_installed(): """Return True if the dependencies for setting up and using VOMS are installed. EL7 requires a minimum version of the voms-server package to get the service file fix from SOFTWARE-2357. """ for dep in 'voms-server', 'voms-clients', 'voms-mysql-plugin', mysql.client_rpm(), mysql.server_rpm(): if not core.dependency_is_installed(dep): return False # TODO: drop this check when 3.3 is completely EOL if core.el_release() >= 7: if core.PackageVersion('voms-server') < '2.0.12-3.2': core.log_message("voms-server installed but too old (missing SOFTWARE-2357 fix)") return False return True
def setUp(self): # Enforce SciToken or GSI auth for testing os.environ['_condor_SEC_CLIENT_AUTHENTICATION_METHODS'] = 'SCITOKENS, GSI' core.skip_ok_unless_installed('condor', 'htcondor-ce') self.skip_bad_unless(service.is_running('condor-ce'), 'ce not running') self.command = [] if core.state['token.condor_write_created']: # FIXME: After HTCONDOR-636 is released (targeted for HTCondor-CE 5.1.2), # we can stop setting _condor_SCITOKENS_FILE for token_var in ('_condor_SCITOKENS_FILE', 'BEARER_TOKEN_FILE'): os.environ[token_var] = core.config['token.condor_write'] else: core.log_message('condor WRITE token not found; skipping SCITOKENS auth')
def test_29_execute_sge(self): core.skip_ok_unless_installed('gratia-probe-sge', 'gratia-service') core.state['gratia.sge-running'] = False self.skip_bad_if(core.state['gratia.sge-logs-copied'] == False) if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = os.stat(core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/sge/sge_meter.cron.sh',) core.check_system(command, 'Unable to execute sge_meter.') core.config['gratia.sge-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.sge" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty(core.config['gratia.sge-temp-dir']) self.assert_(result, 'sge outbox check failed.') core.state['gratia.sge-running'] = True
def test_04_add_mysql_admin(self): core.skip_ok_unless_installed('gums-service') host_dn, _ = cagen.certificate_info(core.config['certs.hostcert']) mysql_template_path = '/usr/lib/gums/sql/addAdmin.mysql' self.assert_(os.path.exists(mysql_template_path), 'GUMS MySQL template exists') mysql_template = files.read(mysql_template_path, as_single_string=True).strip() core.log_message(mysql_template) mysql_command = re.sub(r'@ADMINDN@', host_dn, mysql_template) core.log_message(mysql_command) command = ('mysql', '--user=gums', '-p' + core.config['gums.password'], '--execute=' + mysql_command) core.check_system(command, 'Could not add GUMS MySQL admin')
def test_03_remove_packages(self): # We didn't ask to install anything if len(core.options.packages) == 0: return # Nothing actually got installed if len(core.state['install.installed']) == 0: core.log_message('No packages installed') return el_version = core.el_release() if el_version >= 6: # Rolling back is a lot more reliable in yum post EL5 core.state['install.transaction_ids'].reverse() for transaction in core.state['install.transaction_ids']: command = ['yum', 'history', 'undo', '-y', transaction] for repo in core.options.extrarepos: command.append('--enablerepo=%s' % repo) fail_msg, _, stdout, _ = yum.retry_command(command) if fail_msg: self.fail(fail_msg) elif el_version == 5: # rpm -Uvh --rollback was very finicky so we had to # spin up our own method of rolling back installations if len(core.state['install.updated']) != 0: command = ['yum', 'downgrade', '-y'] + core.state['install.updated'] fail_msg, _, stdout, _ = yum.retry_command(command) if fail_msg: self.fail(fail_msg) # Remove packages from install list that were brought in as deps for `yum update` yum.parse_output_for_packages(stdout) if len(core.state['install.installed']) != 0: for pkg in core.state['install.os_updates']: try: core.state['install.installed'].remove(pkg) except ValueError: pass # it was already removed from under us rpm_erase_list = self.list_special_install_rpms(core.state['install.installed']) package_count = len(rpm_erase_list) command = ['rpm', '--quiet', '--erase'] + rpm_erase_list core.check_system(command, 'Remove %d packages' % (package_count)) else: core.log_message('No new RPMs') return
def test_02_remove_packages(self): # We didn't ask to install anything if len(core.options.packages) == 0: return # Nothing actually got installed if len(core.state['install.installed']) == 0: core.log_message('No packages installed') return for transaction in reversed( sorted(core.state['install.transaction_ids'])): command = ['yum', 'history', 'undo', '-y', str(transaction)] for repo in core.options.extrarepos: command.append('--enablerepo=%s' % repo) fail_msg, _, _, _ = yum.retry_command(command) if fail_msg: self.fail(fail_msg)
def check_status(service_name, expected_status, timeout=10, log_to_check = None): """ Return True if the exit code of the 'service_name' status check is expected_status before 'timeout' seconds. Otherwise, False. """ timer = 0 status_rc = None while timer < timeout and status_rc != expected_status: status_rc = status(service_name) time.sleep(1) timer += 1 if status_rc != expected_status and log_to_check: log_file_contents = files.read(log_to_check) core.log_message("Last lines of log: %s" % log_to_check) for line in log_file_contents[-9:]: core.log_message(line) return status_rc == expected_status
def test_02_remove_packages(self): # We didn't ask to install anything if len(core.options.packages) == 0: return # Nothing actually got installed if len(core.state['install.installed']) == 0: core.log_message('No packages installed') return core.state['install.transaction_ids'].reverse() for transaction in core.state['install.transaction_ids']: command = ['yum', 'history', 'undo', '-y', transaction] for repo in core.options.extrarepos: command.append('--enablerepo=%s' % repo) fail_msg, _, _, _ = yum.retry_command(command) if fail_msg: self.fail(fail_msg)
def check_status(service_name, expected_status, timeout=10, log_to_check=None): """ Return True if the exit code of the 'service_name' status check is expected_status before 'timeout' seconds. Otherwise, False. """ timer = 0 status_rc = None while timer < timeout and status_rc != expected_status: status_rc = status(service_name) time.sleep(1) timer += 1 if status_rc != expected_status and log_to_check: log_file_contents = files.read(log_to_check) core.log_message("Last lines of log: %s" % log_to_check) for line in log_file_contents[-9:]: core.log_message(line) return status_rc == expected_status
def test_05_add_mysql_admin(self): if core.missing_rpm('gums-service'): return host_dn, host_issuer = core.certificate_info( core.config['certs.hostcert']) mysql_template_path = '/usr/lib/gums/sql/addAdmin.mysql' self.assert_(os.path.exists(mysql_template_path), 'GUMS MySQL template exists') mysql_template = files.read(mysql_template_path, as_single_string=True).strip() core.log_message(mysql_template) mysql_command = re.sub(r'@ADMINDN@', host_dn, mysql_template) core.log_message(mysql_command) command = ('mysql', '--user=gums', '-p' + core.config['gums.password'], '--execute=' + mysql_command) core.check_system(command, 'Add GUMS MySQL admin')
def restore(path, owner): """Restores the path to its state prior to being written by its owner.""" backup_id = (path, owner) if backup_id not in _backups: raise ValueError("No backup of '%s' for '%s'" % (path, owner)) if os.path.exists(path): os.remove(path) core.log_message('Removed test %s' % (path)) try: backup_path = _backups[backup_id]['path'] except TypeError: backup_path = None if (backup_path is not None) and os.path.exists(backup_path): shutil.move(backup_path, path) os.chown(path, _backups[backup_id]['uid'], _backups[backup_id]['gid']) core.log_message('Restored original %s' % (path)) del _backups[backup_id]
def test_07_ceview(self): core.config['condor-ce.view-listening'] = False core.skip_ok_unless_installed('htcondor-ce-view') view_url = 'http://%s:%s' % (core.get_hostname(), int(core.config['condor-ce.view-port'])) try: src = core.to_str(urlopen(view_url).read()) core.log_message(src) except EnvironmentError as err: debug_file = '/var/log/condor-ce/CEViewLog' debug_contents = 'Contents of %s\n%s\n' % (debug_file, '=' * 20) try: debug_contents += files.read(debug_file, True) except EnvironmentError: debug_contents += 'Failed to read %s\n' % debug_file core.log_message(debug_contents) self.fail('Could not reach HTCondor-CE View at %s: %s' % (view_url, err)) self.assertTrue(re.search(r'HTCondor-CE Overview', src), 'Failed to find expected CE View contents') core.config['condor-ce.view-listening'] = True
def test_06_execute_gridftptransfer_probedriver(self): core.state['gratia.gridftp-transfer-running'] = False core.skip_ok_unless_installed('gratia-probe-gridftp-transfer', 'gratia-service', 'globus-gridftp-server-progs', 'globus-ftp-client', 'globus-proxy-utils', 'globus-gass-copy-progs') self.skip_ok_unless(core.state['gridftp.started-server'], 'gridftp server not running') self.skip_bad_unless(core.state['gratia.gridftp-logs-copied'], 'gridftp logs not copied') if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = os.stat(core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/gridftp-transfer/GridftpTransferProbeDriver',) core.check_system(command, 'Unable to execute GridftpTransferProbeDriver.') core.config['gratia.gridftp-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.gridftp-transfer" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty(core.config['gratia.gridftp-temp-dir']) self.assert_(result, 'gridftp-transfer outbox check failed.') core.state['gratia.gridftp-transfer-running'] = True
def retry_command(command, timeout_seconds=3600): """Run a Yum command repeatedly until success, hard failure, or timeout. Run the given Yum command. If it succeeds, return. If it fails for a whitelisted reason, keep trying, otherwise return a failure message. But, do not retry commands for longer than the timeout duration. """ deadline = time.time() + timeout_seconds fail_msg, status, stdout, stderr = '', '', '', '' # EPEL released xrootd-compat (2/17/2015), which requires xrootd >= 4.1, # which is not available in 3.1 if core.config['install.original-release-ver'] == '3.1': command.append('--exclude=xrootd-compat*') # Loop for retries while True: # Stop (re)trying if the deadline has passed if time.time() > deadline: fail_msg += "Retries terminated after timeout period" break clean_yum() status, stdout, stderr = core.system(command) # Deal with success if status == 0: break # Deal with failures that can be retried elif yum_failure_can_be_retried(stdout): time.sleep(30) core.log_message("Retrying command") continue # Otherwise, we do not expect a retry to succeed, ever, so fail this # package else: fail_msg = core.diagnose("Command failed", command, status, stdout, stderr) break return fail_msg, status, stdout, stderr
def test_01_start_xrootd(self): core.config['xrootd.pid-file'] = '/var/run/xrootd/xrootd-default.pid' core.config['certs.xrootdcert'] = '/etc/grid-security/xrd/xrdcert.pem' core.config['certs.xrootdkey'] = '/etc/grid-security/xrd/xrdkey.pem' core.config['xrootd.config'] = '/etc/xrootd/xrootd-clustered.cfg' core.config['xrootd.config-extra'] = '/etc/xrootd/config.d/99-osg-test.cfg' core.config['xrootd.port'] = XROOTD_PORT core.config['xrootd.multiuser'] = False core.state['xrootd.started-server'] = False core.state['xrootd.backups-exist'] = False self.skip_ok_unless(core.options.adduser, 'user not created') core.skip_ok_unless_installed('xrootd', by_dependency=True) user = pwd.getpwnam("xrootd") core.skip_ok_unless_installed('globus-proxy-utils') core.install_cert('certs.xrootdcert', 'certs.hostcert', 'xrootd', 0o644) core.install_cert('certs.xrootdkey', 'certs.hostkey', 'xrootd', 0o400) lcmaps_packages = ('lcmaps', 'lcmaps-db-templates', 'xrootd-lcmaps', 'vo-client', 'vo-client-lcmaps-voms') if all([core.rpm_is_installed(x) for x in lcmaps_packages]): core.log_message("Using xrootd-lcmaps authentication") sec_protocol = '-authzfun:libXrdLcmaps.so -authzfunparms:--loglevel,5' if core.PackageVersion('xrootd-lcmaps') >= '1.4.0': sec_protocol += ',--policy,authorize_only' else: core.log_message("Using XRootD mapfile authentication") sec_protocol = '-gridmap:/etc/grid-security/xrd/xrdmapfile' files.write("/etc/grid-security/xrd/xrdmapfile", "\"%s\" vdttest" % core.config['user.cert_subject'], owner="xrootd", chown=(user.pw_uid, user.pw_gid)) if core.PackageVersion('xrootd') < '1:4.9.0': files.append(core.config['xrootd.config'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.port']), owner='xrootd', backup=True) else: files.write(core.config['xrootd.config-extra'], XROOTD_CFG_TEXT % (sec_protocol, core.config['xrootd.port']), owner='xrootd', backup=True, chmod=0o644) authfile = '/etc/xrootd/auth_file' files.write(authfile, AUTHFILE_TEXT, owner="xrootd", chown=(user.pw_uid, user.pw_gid)) core.state['xrootd.backups-exist'] = True
def test_08_backups(self): record_is_clear = True if len(files._backups) > 0: details = '' for backup_id, backup_path in files._backups.items(): details += "-- Backup of '%s' for '%s' in '%s'\n" % (backup_id[0], backup_id[1], backup_path) core.log_message('Backups remain in backup dictionary:\n' + details) record_is_clear = False actual_is_clear = True if os.path.isdir(files._backup_directory): backups = os.listdir(files._backup_directory) if len(backups) > 0: core.log_message("Files remain in '%s:'" % (files._backup_directory)) core.system('ls -lF ' + files._backup_directory, shell=True) actual_is_clear = False shutil.rmtree(files._backup_directory, ignore_errors=True) self.assert_(record_is_clear and actual_is_clear, 'Backups were not restored fully')
def test_18_execute_condor_meter(self): core.state['gratia.condor-meter-running'] = False core.skip_ok_unless_installed('gratia-probe-condor', 'gratia-service') core.skip_ok_unless_one_installed('htcondor-ce-condor', 'globus-gram-job-manager-condor') self.skip_bad_if(core.state['gratia.condor-logs-copied'] == False) self.skip_bad_unless(core.state['globus-gatekeeper.running'] or core.state['condor-ce.started'], 'gatekeeper not running') self.skip_bad_unless(core.state['condor.running-service'], message='Condor service not running') if os.path.exists(core.config['gratia.log.file']): core.state['gratia.log.stat'] = os.stat(core.config['gratia.log.file']) core.log_message('stat.st_ino is: ' + str(core.state['gratia.log.stat'].st_ino)) core.log_message('stat.st_size is: ' + str(core.state['gratia.log.stat'].st_size)) command = ('/usr/share/gratia/condor/condor_meter',) core.check_system(command, 'Unable to execute condor_meter.') core.config['gratia.condor-temp-dir'] = core.config['gratia.tmpdir.prefix'] + "subdir.condor" + \ core.config['gratia.tmpdir.postfix'] if core.state['gratia.database-installed'] == True: result = self.isProbeOutboxDirEmpty(core.config['gratia.condor-temp-dir']) self.assert_(result, 'condor outbox check failed.') core.state['gratia.condor-meter-running'] = True
def is_installed(): """Return True if the dependencies for setting up and using VOMS are installed. EL7 requires a minimum version of the voms-server package to get the service file fix from SOFTWARE-2357. """ for dep in 'voms-server', 'voms-clients', 'voms-mysql-plugin', mysql.client_rpm( ), mysql.server_rpm(): if not core.dependency_is_installed(dep): return False # TODO: drop this check when 3.3 is completely EOL if core.el_release() >= 7: epoch, _, version, release, _ = core.get_package_envra('voms-server') if core.version_compare((epoch, version, release), '2.0.12-3.2') < 0: core.log_message( "voms-server installed but too old (missing SOFTWARE-2357 fix)" ) return False return True
def list_special_install_rpms(self, rpm_list): # For the "rpm -e" command, RPMs should be listed in the same order as # installed. Why? The erase command processes files in reverse order # as listed on the command line, mostly; it seems to do a bit of # reordering (search -vv output for "tsort"), but it is not clear what # the algorithm is. So, rpm will cheerfully erase a package, the # contents of which are needed by the pre- or post-uninstall scriptlets # of a package that will be erased later in sequence. By listing them # in yum install order, we presumably get a valid ordering and increase # the chances of a clean erase. rpm_candidates = [] for package in rpm_list: status, stdout, _ = core.system( ('rpm', '--query', package, '--queryformat', r'%{NAME}')) if status == 0 and stdout in rpm_list: rpm_candidates.append(stdout) remaining_rpms = set(rpm_list) - set(rpm_candidates) count = len(remaining_rpms) if count > 0: core.log_message('%d RPMs installed but not in yum output' % count) rpm_candidates += remaining_rpms # Creating the list of RPMs to erase is more complicated than just using # the list of new RPMs, because there may be RPMs with both 32- and # 64-bit versions installed. In that case, rpm will fail if given just # the base package name; instead, the architecture must be specified, # and an easy way to get that information is from 'rpm -q'. So we use # the bare name when possible, and the fully versioned one when # necessary. final_rpm_list = [] for package in rpm_candidates: command = ('rpm', '--query', package, '--queryformat', r'%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\n') status, stdout, _ = core.system(command, log_output=False) versioned_rpms = re.split('\n', stdout.strip()) if len(versioned_rpms) > 1: final_rpm_list += versioned_rpms else: final_rpm_list.append(package) return final_rpm_list
def list_special_install_rpms(self, rpm_list): # For the "rpm -e" command, RPMs should be listed in the same order as # installed. Why? The erase command processes files in reverse order # as listed on the command line, mostly; it seems to do a bit of # reordering (search -vv output for "tsort"), but it is not clear what # the algorithm is. So, rpm will cheerfully erase a package, the # contents of which are needed by the pre- or post-uninstall scriptlets # of a package that will be erased later in sequence. By listing them # in yum install order, we presumably get a valid ordering and increase # the chances of a clean erase. rpm_candidates = [] for package in rpm_list: status, stdout, _ = core.system(('rpm', '--query', package, '--queryformat', r'%{NAME}')) if status == 0 and stdout in rpm_list: rpm_candidates.append(stdout) remaining_rpms = set(rpm_list) - set(rpm_candidates) count = len(remaining_rpms) if count > 0: core.log_message('%d RPMs installed but not in yum output' % count) rpm_candidates += remaining_rpms # Creating the list of RPMs to erase is more complicated than just using # the list of new RPMs, because there may be RPMs with both 32- and # 64-bit versions installed. In that case, rpm will fail if given just # the base package name; instead, the architecture must be specified, # and an easy way to get that information is from 'rpm -q'. So we use # the bare name when possible, and the fully versioned one when # necessary. final_rpm_list = [] for package in rpm_candidates: command = ('rpm', '--query', package, '--queryformat', r'%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\n') status, stdout, _ = core.system(command, log_output=False) versioned_rpms = re.split('\n', stdout.strip()) if len(versioned_rpms) > 1: final_rpm_list += versioned_rpms else: final_rpm_list.append(package) return final_rpm_list
def isProbeInfoProcessed(self, ProbePattern): """This helper method parses gratia log for patterns signifying that Gratia has processed the probe information A. It loops through the lines with the pattern 'RecordProcessor: 0: ProbeDetails' B. Examines the output line to check if it contains the passed in Probe specific pattern, / AND the word saved Sample target lines from a gratia log is: 2013-07-14 17:21:48,073 gratia.service(Thread-66) [FINE]: RecordProcessor: 0: ProbeDetails 9 / 9 (gridftp-transfer:fermicloud101.fnal.gov, recordId= Record (Id: fermicloud101.fnal.gov:3274.0 CreateTime: 14 July 2013 at 22:21:37 GMT KeyInfo: null) ) saved. 2013-07-14 17:22:18,161 gratia.service(Thread-66) [FINE]: RecordProcessor: 0: ProbeDetails 5 / 5 (glexec:fermicloud101.fnal.gov, recordId= Record (Id: fermicloud101.fnal.gov:3299.0 CreateTime: 14 July 2013 at 22:21:48 GMT KeyInfo: null) ) saved. 2013-07-14 17:23:18,294 gratia.service(Thread-66) [FINE]: RecordProcessor: 0: ProbeDetails 2 / 2 (condor:fermicloud101.fnal.gov, recordId= Record (Id: fermicloud101.fnal.gov:3390.0 CreateTime: 14 July 2013 at 22:22:48 GMT KeyInfo: null) ) saved. 2013-07-14 17:24:50,465 gratia.service(Thread-66) [FINE]: RecordProcessor: 0: ProbeDetails 31 / 31 (pbs-lsf:fermicloud101.fnal.gov, recordId= Record (Id: fermicloud101.fnal.gov:4549.0 CreateTime: 14 July 2013 at 22:24:19 GMT KeyInfo: null) ) saved. """ record_re = '.*' + 'RecordProcessor: 0: ProbeDetails' + '.*' + '/' + '.*' + ProbePattern + '.*' + 'saved' line, gap = core.monitor_file(core.config['gratia.log.file'], core.state['gratia.log.stat'], record_re, 600.0) if line is not None: core.log_message('Gratia processed probe data - Time taken is %.1f seconds' % gap) core.log_message('Gratia processed probe data - Line is ' + str(line)) return True else: core.log_message('Did not find the search pattern within the given time limit.') return False
def retry_command(command, timeout_seconds=3600): """Run a Yum command repeatedly until success, hard failure, or timeout. Run the given Yum command. If it succeeds, return. If it fails for a whitelisted reason, keep trying, otherwise return a failure message. But, do not retry commands for longer than the timeout duration. """ deadline = time.time() + timeout_seconds fail_msg, status, stdout, stderr = '', '', '', '' # Loop for retries while True: # Stop (re)trying if the deadline has passed if time.time() > deadline: fail_msg += "Retries terminated after timeout period" break status, stdout, stderr = core.system(command) # Deal with success if status == 0: break # Deal with failures that can be retried elif yum_failure_can_be_retried(stdout): time.sleep(30) clean() core.log_message("Retrying command") continue # Otherwise, we do not expect a retry to succeed, ever, so fail this # package else: fail_msg = core.diagnose("Command failed", command, status, stdout, stderr) break return fail_msg, status, stdout, stderr
def output_is_acceptable(fetch_crl_output): """All lines output from fetch-crl are considered an error. We whitelist a few transient ones. """ error_message_whitelist = [ 'CRL has lastUpdate time in the future', 'CRL has nextUpdate time in the past', # VERBOSE(0) BrGrid/0: downloaded CRL lastUpdate could not be derived 'CRL lastUpdate could not be derived', # ERROR CRL verification failed for BrGrid/0 (BrGrid) 'CRL verification failed for', # VERBOSE(0) BrGrid/0: 0 r': \d+$', # VERBOSE(0) Download error http://lacgridca.ic.uff.br/crl/cacrl.crl: timed out after 120s 'Download error', # ERROR verify called on empty data blob 'verify called on empty data blob', # VERBOSE(0) SDG-G2/0: CRL signature failed 'CRL signature failed', # LWP::Protocol::http::Socket: connect: No route to host at /usr/share/perl5/LWP/Protocol/http.pm line 51. 'LWP::Protocol::http::Socket', ] all_lines_ok = True for line in fetch_crl_output.rstrip('\n').split('\n'): if not line: # skip blank lines continue line_ok = False for error_string in error_message_whitelist: if re.search(error_string, line): line_ok = True break if not line_ok: all_lines_ok = False core.log_message("Found uncaught error message: '%s'" % line.strip()) break return all_lines_ok
def copy_user_vo_map_file(self): """This helper method copies user-vo-map in /var/lib/osg, if not already present""" user_vo_map_dir = '/var/lib/osg/' user_vo_map_file = '/usr/share/osg-test/gratia/user-vo-map' if not os.path.exists(user_vo_map_dir): os.makedirs(user_vo_map_dir) try: shutil.copy(user_vo_map_file, user_vo_map_dir) except IOError as e: core.log_message("Unable to copy file. %s" % e) return False elif not os.path.exists(os.path.join(user_vo_map_dir, 'user-vo-map')): # Directory exists, copy file, if the file is not already present try: shutil.copy(user_vo_map_file, user_vo_map_dir) except IOError as e: core.log_message("Unable to copy file. %s" % e) return False else: #both directory and file are present and so, do nothing... core.log_message(str(os.listdir(user_vo_map_dir))) return True