def test_normal_logging(self): """ Test if just using import logging, logging.warning still works after importing fancylogger """ stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() import logging msg = 'this is my string' logging.warning(msg) self.assertTrue(msg in stringfile.getvalue(), msg="'%s' in '%s'" % (msg, stringfile.getvalue())) msg = 'there are many like it' logging.getLogger().warning(msg) self.assertTrue(msg in stringfile.getvalue(), msg="'%s' in '%s'" % (msg, stringfile.getvalue())) msg = 'but this one is mine' logging.getLogger('mine').warning(msg) self.assertTrue(msg in stringfile.getvalue(), msg="'%s' in '%s'" % (msg, stringfile.getvalue())) # restore fancylogger.logToScreen(enable=False, handler=handler)
def run(self, args): """Run 'create' subcommand.""" optparser = CreateOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) options = optparser.options if not validate_pbs_option(options): sys.stderr.write('Missing config options. Exiting.\n') return 1 label = options.label if not hc.validate_label(label, hc.known_cluster_labels()): sys.exit(1) if not hc.validate_hodconf_or_dist(options.hodconf, options.dist): sys.exit(1) try: j = PbsHodJob(optparser) hc.report_cluster_submission(label) j.run() jobs = j.state() hc.post_job_submission(label, jobs, optparser.options.workdir) return 0 except StandardError as e: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(e.message)
def parse_options(args=None): """wrapper function for option parsing""" if os.environ.get("DEBUG_EASYBUILD_OPTIONS", "0").lower() in ("1", "true", "yes", "y"): # very early debug, to debug the generaloption itself fancylogger.logToScreen(enable=True) fancylogger.setLogLevel("DEBUG") usage = "%prog [options] easyconfig [...]" description = ( "Builds software based on easyconfig (or parse a directory).\n" "Provide one or more easyconfigs or directories, use -H or --help more information." ) try: eb_go = EasyBuildOptions( usage=usage, description=description, prog="eb", envvar_prefix=CONFIG_ENV_VAR_PREFIX, go_args=args, error_env_options=True, error_env_option_method=raise_easybuilderror, ) except Exception, err: raise EasyBuildError("Failed to parse configuration options: %s" % err)
def parseoptions(self, options_list=None): """ Handle mpirun mode: - continue with reduced set of commandline options - These options are the keys of opts_to_remove. - The values of opts_to_remove are the number of arguments of these options, that also need to be removed. """ if options_list is None: options_list = self.default_parseoptions() newopts = options_list[:] # copy if self.mpirunmode: opts_to_remove = { '-np': 1, '-machinefile': 1 } for opt in opts_to_remove.keys(): try: pos = newopts.index(opt) # remove 1 + args del newopts[pos:pos + 1 + opts_to_remove[opt]] except ValueError: continue GeneralOption.parseoptions(self, newopts) # set error logging to file as soon as possible if self.options.logtofile: print("logtofile %s" % self.options.logtofile) if os.path.exists(self.options.logtofile): os.remove(self.options.logtofile) fancylogger.logToFile(self.options.logtofile) fancylogger.logToScreen(False)
def test_fancylogger_as_rootlogger_logging(self): """ Test if just using import logging, logging with logging uses fancylogger after setting the root logger """ # test logging.root is loggin root logger # this is an assumption made to make the fancyrootlogger code work orig_root = logging.getLogger() self.assertEqual(logging.root, orig_root, msg='logging.root is the root logger') self.assertFalse(isinstance(logging.root, fancylogger.FancyLogger), msg='logging.root is not a FancyLogger') stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() fancylogger.setLogLevelDebug() logger = fancylogger.getLogger() self.assertEqual(logger.handlers, [self.handler, handler], msg='active handler for root fancylogger') self.assertEqual(logger.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set') msg = 'this is my string' logging.debug(msg) self.assertEqual(stringfile.getvalue(), '', msg="logging.debug reports nothing when fancylogger loglevel is debug") fancylogger.setroot() self.assertTrue(isinstance(logging.root, fancylogger.FancyLogger), msg='logging.root is a FancyLogger after setRootLogger') self.assertEqual(logging.root.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set for root') self.assertEqual(logger.level, logging.NOTSET, msg='original root fancylogger level set to NOTSET') self.assertEqual(logging.root.handlers, [self.handler, handler], msg='active handler for root logger from previous root fancylogger') self.assertEqual(logger.handlers, [], msg='no active handlers on previous root fancylogger') root_logger = logging.getLogger('') self.assertEqual(root_logger, logging.root, msg='logging.getLogger() returns logging.root FancyLogger') frl = fancylogger.getLogger() self.assertEqual(frl, logging.root, msg='fancylogger.getLogger() returns logging.root FancyLogger') logging.debug(msg) self.assertTrue(msg in stringfile.getvalue(), msg="logging.debug reports when fancylogger loglevel is debug") fancylogger.resetroot() self.assertEqual(logging.root, orig_root, msg='logging.root is the original root logger after resetroot') # restore fancylogger.logToScreen(enable=False, handler=handler)
def run(self, args): """Run 'connect' subcommand.""" optparser = ConnectOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) try: if len(optparser.args) > 1: label = optparser.args[1] else: _log.error("No label provided.") sys.exit(1) print "Connecting to HOD cluster with label '%s'..." % label try: jobid = cluster_jobid(label) env_script = cluster_env_file(label) except ValueError as err: _log.error(err) sys.exit(1) print "Job ID found: %s" % jobid pbs = rm_pbs.Pbs(optparser) jobs = pbs.state() pbsjobs = [job for job in jobs if job.jobid == jobid] if len(pbsjobs) == 0: _log.error("Job with job ID '%s' not found by pbs.", jobid) sys.exit(1) elif len(pbsjobs) > 1: _log.error("Multiple jobs found with job ID '%s': %s", jobid, pbsjobs) sys.exit(1) pbsjob = pbsjobs[0] if pbsjob.state == ['Q', 'H']: # This should never happen since the hod.d/<jobid>/env file is # written on cluster startup. Maybe someone hacked the dirs. _log.error("Cannot connect to cluster with job ID '%s' yet. It is still queued.", jobid) sys.exit(1) else: print "HOD cluster '%s' @ job ID %s appears to be running..." % (label, jobid) print "Setting up SSH connection to %s..." % pbsjob.hosts # -i: interactive non-login shell cmd = ['ssh', '-t', pbsjob.hosts, 'exec', 'bash', '--rcfile', env_script, '-i'] _log.info("Logging in using command: %s", ' '.join(cmd)) os.execvp('/usr/bin/ssh', cmd) return 0 # pragma: no cover except StandardError as err: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(err)
def parse_options(args=None): """wrapper function for option parsing""" if os.environ.get('DEBUG_EASYBUILD_OPTIONS', '0').lower() in ('1', 'true', 'yes', 'y'): # very early debug, to debug the generaloption itself fancylogger.logToScreen(enable=True) fancylogger.setLogLevel('DEBUG') usage = "%prog [options] easyconfig [...]" description = ("Builds software based on easyconfig (or parse a directory).\n" "Provide one or more easyconfigs or directories, use -H or --help more information.") eb_go = EasyBuildOptions(usage=usage, description=description, prog='eb', envvar_prefix='EASYBUILD', go_args=args) return eb_go
def run(self, args): """Run 'clean' subcommand.""" optparser = CleanOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) try: pbs = rm_pbs.Pbs(optparser) state = pbs.state() labels = hc.known_cluster_labels() rm_master = rm_pbs.master_hostname() info = hc.mk_cluster_info_dict(labels, state, master=rm_master) hc.clean_cluster_info(rm_master, info) except StandardError as err: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(err.message)
def init_logging(logfile, logtostdout=False, silent=False, colorize=fancylogger.Colorize.AUTO): """Initialize logging.""" if logtostdout: fancylogger.logToScreen(enable=True, stdout=True, colorize=colorize) else: if logfile is None: # mkstemp returns (fd,filename), fd is from os.open, not regular open! fd, logfile = tempfile.mkstemp(suffix='.log', prefix='easybuild-') os.close(fd) fancylogger.logToFile(logfile) print_msg('temporary log file in case of crash %s' % (logfile), log=None, silent=silent) log = fancylogger.getLogger(fname=False) return log, logfile
def init_logging(logfile, logtostdout=False, testing=False): """Initialize logging.""" if logtostdout: fancylogger.logToScreen(enable=True, stdout=True) else: if logfile is None: # mkstemp returns (fd,filename), fd is from os.open, not regular open! fd, logfile = tempfile.mkstemp(suffix='.log', prefix='easybuild-') os.close(fd) fancylogger.logToFile(logfile) print_msg('temporary log file in case of crash %s' % (logfile), log=None, silent=testing) log = fancylogger.getLogger(fname=False) return log, logfile
def __init__(self, hostname, port, log_dir, filename, pidfile): """Constructor""" stdin = '/dev/null' stdout = os.path.join(log_dir, 'logging_error.log') stderr = os.path.join(log_dir, 'logging_error.log') Daemon.__init__(self, pidfile, stdin, stdout, stderr) self.hostname = hostname self.port = port #Set up logging # get logger, we will log to file fancylogger.logToScreen(False) # we want to log absolutely everything that's comming at us fancylogger.setLogLevel(0) self.logfile = os.path.join(log_dir, filename) fancylogger.logToFile(self.logfile) self.logger = fancylogger.getLogger() self.socket_ = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def test_zzz_logtostdout(self): """Testing redirecting log to stdout.""" fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log') os.close(fd) for stdout_arg in ['--logtostdout', '-l']: _stdout = sys.stdout fd, fn = tempfile.mkstemp() fh = os.fdopen(fd, 'w') sys.stdout = fh args = [ '--software-name=somethingrandom', '--robot', '.', '--debug', stdout_arg, ] self.eb_main(args, logfile=dummylogfn) # make sure we restore sys.stdout.flush() sys.stdout = _stdout fancylogger.logToScreen(enable=False, stdout=True) outtxt = read_file(fn) self.assertTrue( len(outtxt) > 100, "Log messages are printed to stdout when %s is used (outtxt: %s)" % (stdout_arg, outtxt)) # cleanup os.remove(fn) modify_env(os.environ, self.orig_environ) tempfile.tempdir = None if os.path.exists(dummylogfn): os.remove(dummylogfn) fancylogger.logToFile(self.logfile)
def run(self, args): """Run 'batch' subcommand.""" optparser = BatchOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) options = optparser.options if not validate_pbs_option(options): sys.stderr.write('Missing config options. Exiting.\n') return 1 if optparser.options.script is None: sys.stderr.write('Missing script. Exiting.\n') return 1 # resolve script path to absolute path optparser.options.script = os.path.abspath(optparser.options.script) if not os.path.exists(optparser.options.script): sys.stderr.write("Specified script does not exist: %s. Exiting.\n" % optparser.options.script) return 1 # make sure script is executable cur_perms = os.stat(optparser.options.script)[stat.ST_MODE] if not (cur_perms & stat.S_IXUSR): print "Specified script %s is not executable yet, fixing that..." % optparser.options.script os.chmod(optparser.options.script, cur_perms|stat.S_IXUSR) label = options.label if not hc.validate_label(label, hc.known_cluster_labels()): sys.exit(1) if not hc.validate_hodconf_or_dist(options.hodconf, options.dist): sys.exit(1) try: j = PbsHodJob(optparser) hc.report_cluster_submission(label) j.run() jobs = j.state() hc.post_job_submission(label, jobs, optparser.options.workdir) return 0 except StandardError as e: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(e.message)
def _stream_stdouterr(self, isstdout=True, expect_match=True): """Log to stdout or stderror, check stdout or stderror""" fd, logfn = tempfile.mkstemp() # fh will be checked fh = os.fdopen(fd, 'w') _stdout = sys.stdout _stderr = sys.stderr if isstdout == expect_match: sys.stdout = fh sys.stderr = open(os.devnull, 'w') else: sys.stdout = open(os.devnull, 'w') sys.stderr = fh fancylogger.setLogLevelInfo() name = 'test_stream_stdout' lh = fancylogger.logToScreen(stdout=isstdout) logger = fancylogger.getLogger(name, fname=True, clsname=False) # logfn makes it unique msg = 'TEST isstdout %s expect_match %s logfn %s' % ( isstdout, expect_match, logfn) logger.info(msg) # restore fancylogger.logToScreen(enable=False, handler=lh) sys.stdout = _stdout sys.stderr = _stderr fh2 = open(logfn) txt = fh2.read().strip() fh2.close() reg_exp = re.compile(r"INFO\s+\S+.%s.%s\s+\S+\s+%s" % (name, '_stream_stdouterr', msg)) match = reg_exp.search(txt) is not None self.assertEqual(match, expect_match) try: os.remove(logfn) except: pass
def run(self, args): """Run 'list' subcommand.""" optparser = ListOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) try: pbs = rm_pbs.Pbs(optparser) state = pbs.state() labels = hc.known_cluster_labels() info = hc.mk_cluster_info_dict(labels, state) if not info: print 'No jobs found' sys.exit(0) headers = ['Cluster label', 'Job ID', 'State', 'Hosts'] info_rows = format_list_rows(info) print ht.format_table(info_rows, headers) except StandardError as err: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(err) return 0
def test_normal_logging(self): """ Test if just using import logging, logging.warning still works after importing fancylogger """ _stderr = sys.stderr stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() import logging logging.warning('this is my string') self.assertTrue('this is my string' in stringfile.getvalue()) logging.getLogger().warning('there are many like it') self.assertTrue('there are many like it' in stringfile.getvalue()) logging.getLogger('mine').warning('but this one is mine') self.assertTrue('but this one is mine' in stringfile.getvalue()) # restore fancylogger.logToScreen(enable=False, handler=handler) sys.stderr = _stderr
def test_classname_in_log(self): """Do a log and check if the classname is correctly in it""" _stderr = sys.stderr class Foobar: def somefunction(self): logger = fancylogger.getLogger(fname=True, clsname=True) logger.warn('we are logging something here') stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() Foobar().somefunction() self.assertTrue('Foobar.somefunction' in stringfile.getvalue()) stringfile.close() # restore fancylogger.logToScreen(enable=False, handler=handler) # and again stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() classless_function() self.assertTrue('unknown__getCallingClassName.classless_function' in stringfile.getvalue()) # restore fancylogger.logToScreen(enable=False, handler=handler) stringfile = StringIO() sys.stderr = stringfile fancylogger.setLogFormat("%(className)s blabla") handler = fancylogger.logToScreen() logger = fancylogger.getLogger(fname=False, clsname=False) logger.warning("blabla") txt = stringfile.getvalue() # this will only hold in debug mode, so also disable the test if __debug__: pattern = 'FancyLoggerTest' self.assertTrue(pattern in txt, "Pattern '%s' found in: %s" % (pattern, txt)) # restore fancylogger.logToScreen(enable=False, handler=handler) sys.stderr = _stderr
def _stream_stdouterr(self, isstdout=True, expect_match=True): """Log to stdout or stderror, check stdout or stderror""" fd, logfn = tempfile.mkstemp() # fh will be checked fh = os.fdopen(fd, 'w') _stdout = sys.stdout _stderr = sys.stderr if isstdout == expect_match: sys.stdout = fh sys.stderr = open(os.devnull, 'w') else: sys.stdout = open(os.devnull, 'w') sys.stderr = fh fancylogger.setLogLevelInfo() name = 'test_stream_stdout' lh = fancylogger.logToScreen(stdout=isstdout) logger = fancylogger.getLogger(name, fname=True, clsname=False) # logfn makes it unique msg = 'TEST isstdout %s expect_match %s logfn %s' % (isstdout, expect_match, logfn) logger.info(msg) # restore fancylogger.logToScreen(enable=False, handler=lh) sys.stdout = _stdout sys.stderr = _stderr fh2 = open(logfn) txt = fh2.read().strip() fh2.close() reg_exp = re.compile(r"INFO\s+\S+.%s.%s\s+\S+\s+%s" % (name, '_stream_stdouterr', msg)) match = reg_exp.search(txt) is not None self.assertEqual(match, expect_match) try: os.remove(logfn) except: pass
def test_zzz_logtostdout(self): """Testing redirecting log to stdout.""" fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log') os.close(fd) for stdout_arg in ['--logtostdout', '-l']: _stdout = sys.stdout fd, fn = tempfile.mkstemp() fh = os.fdopen(fd, 'w') sys.stdout = fh args = [ '--software-name=somethingrandom', '--robot', '.', '--debug', stdout_arg, ] self.eb_main(args, logfile=dummylogfn) # make sure we restore sys.stdout.flush() sys.stdout = _stdout fancylogger.logToScreen(enable=False, stdout=True) outtxt = read_file(fn) self.assertTrue(len(outtxt) > 100, "Log messages are printed to stdout when %s is used (outtxt: %s)" % (stdout_arg, outtxt)) # cleanup os.remove(fn) modify_env(os.environ, self.orig_environ) tempfile.tempdir = None if os.path.exists(dummylogfn): os.remove(dummylogfn) fancylogger.logToFile(self.logfile)
def run(self, args): """Run 'relabel' subcommand.""" optparser = RelabelOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) try: if len(optparser.args) != 3: _log.error(self.usage()) sys.exit(1) labels = hc.known_cluster_labels() if optparser.args[1] not in labels: _log.error("Cluster with label '%s' not found", optparser.args[1]) sys.exit(1) try: hc.mv_cluster_info(optparser.args[1], optparser.args[2]) except (IOError, OSError) as err: _log.error("Could not change label '%s' to '%s': %s", optparser.args[1], optparser.args[2], err) sys.exit(1) except StandardError as err: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(err) return 0
def test_classname_in_log(self): """Do a log and check if the classname is correctly in it""" _stderr = sys.stderr class Foobar: def somefunction(self): logger = fancylogger.getLogger(fname=True, clsname=True) logger.warn('we are logging something here') stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() Foobar().somefunction() self.assertTrue('Foobar.somefunction' in stringfile.getvalue()) stringfile.close() # restore fancylogger.logToScreen(enable=False, handler=handler) # and again stringfile = StringIO() sys.stderr = stringfile handler = fancylogger.logToScreen() classless_function() self.assertTrue('?.classless_function' in stringfile.getvalue()) # restore fancylogger.logToScreen(enable=False, handler=handler) stringfile = StringIO() sys.stderr = stringfile fancylogger.setLogFormat("%(className)s blabla") handler = fancylogger.logToScreen() logger = fancylogger.getLogger(fname=False, clsname=False) logger.warn("blabla") print stringfile.getvalue() # this will only hold in debug mode, so also disable the test if __debug__: self.assertTrue('FancyLoggerTest' in stringfile.getvalue()) # restore fancylogger.logToScreen(enable=False, handler=handler) sys.stderr = _stderr
def parse_options(args=None): """wrapper function for option parsing""" if os.environ.get('DEBUG_EASYBUILD_OPTIONS', '0').lower() in ('1', 'true', 'yes', 'y'): # very early debug, to debug the generaloption itself fancylogger.logToScreen(enable=True) fancylogger.setLogLevel('DEBUG') usage = "%prog [options] easyconfig [...]" description = ( "Builds software based on easyconfig (or parse a directory).\n" "Provide one or more easyconfigs or directories, use -H or --help more information." ) try: eb_go = EasyBuildOptions(usage=usage, description=description, prog='eb', envvar_prefix=CONFIG_ENV_VAR_PREFIX, go_args=args, error_env_options=True, error_env_option_method=raise_easybuilderror) except Exception, err: raise EasyBuildError("Failed to parse configuration options: %s" % err)
def stop_logging(logfile, logtostdout=False): """Stop logging.""" if logtostdout: fancylogger.logToScreen(enable=False, stdout=True) fancylogger.logToFile(logfile, enable=False)
# -*- encoding: utf-8 -*- import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) import test.asyncprocess as a import test.dateandtime as td import test.generaloption as tg import test.fancylogger as tf import test.missing as tm import test.run as trun import test.optcomplete as topt import test.wrapper as wrapt import unittest from vsc.utils import fancylogger fancylogger.logToScreen(enable=False) suite = unittest.TestSuite([x.suite() for x in (a, td, tg, tf, tm, trun, topt, wrapt)]) try: import xmlrunner rs = xmlrunner.XMLTestRunner(output="test-reports").run(suite) except ImportError, err: rs = unittest.TextTestRunner().run(suite) if not rs.wasSuccessful(): sys.exit(1)
@author: Jens Timmerman (Ghent University) @author: Stijn De Weirdt (Ghent University) """ import os import pwd import sys from vsc.jobs.pbs.clusterdata import get_clusterdata, get_cluster_mpp, get_cluster_overhead, MASTER_REGEXP from vsc.jobs.pbs.submitfilter import SubmitFilter, get_warnings, warn, PMEM, VMEM from vsc.jobs.pbs.submitfilter import MEM from vsc.utils import fancylogger fancylogger.logToDevLog(True, 'syslogger') fancylogger.logToScreen(enable=False) syslogger = fancylogger.getLogger('syslogger') def make_new_header(sf): """ Generate a new header by rewriting selected options and adding missing ones. Takes a submitfilter instance as only argument, returns the header as a list of strings (one line per element) """ state, newopts = sf.gather_state(MASTER_REGEXP) ppn = state['l'].get('_ppn', 1) make = sf.make_header
RELEASEJOB_LIMITS = { # jobs in hold per user (maximum of all users) 'peruser_warning': 10, 'peruser_critical': 20, # total number of jobs in hold 'total_warning': 50, 'total_critical': 100, # per job release attempts (maximum of all jobs) 'release_warning': 50, 'release_critical': 70, } RELEASEJOB_SUPPORTED_HOLDTYPES = ('BatchHold',) _log = getLogger(__name__, fname=False) logToScreen(True) setLogLevelInfo() def process_hold(clusters, dry_run=False): """Process a filtered queueinfo dict""" releasejob_cache = FileCache(RELEASEJOB_CACHE_FILE) # get the showq data for hosts, data in clusters.items(): data['path'] = data['spath'] # showq path showq = Showq(clusters, cache_pickle=True) (queue_information, reported_hosts, failed_hosts) = showq.get_moab_command_information() # release the jobs, prepare the command m = MoabCommand(cache_pickle=False, dry_run=dry_run) for hosts, data in clusters.items():
log.info("%s: fixed" % ec_file) else: log.info("%s: nothing to fix" % ec_file) # MAIN try: init_build_options() options = { 'backup': ("Backup up easyconfigs before modifying them", None, 'store_true', True, 'b'), } go = FixBrokenEasyconfigsOption(options) log = go.log fancylogger.logToScreen(enable=True, stdout=True) fancylogger.setLogLevel('WARNING') try: import easybuild.easyblocks.generic.configuremake except ImportError, err: raise EasyBuildError("easyblocks are not available in Python search path: %s", err) for path in go.args: if not os.path.exists(path): raise EasyBuildError("Non-existing path %s specified", path) ec_files = [ec for p in go.args for ec in find_easyconfigs(p)] if not ec_files: raise EasyBuildError("No easyconfig files specified")
def run(self, args): """Run 'destroy' subcommand.""" optparser = DestroyOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt) try: label, jobid = None, None if len(optparser.args) > 1: label = optparser.args[1] print "Destroying HOD cluster with label '%s'..." % label else: _log.error("No label provided.") sys.exit(1) try: jobid = cluster_jobid(label) print "Job ID: %s" % jobid except ValueError as err: _log.error(err) sys.exit(1) # try to figure out job state job_state = None pbs = rm_pbs.Pbs(optparser) jobs = pbs.state() pbsjobs = [job for job in jobs if job.jobid == jobid] _log.debug("Matching jobs for job ID '%s': %s", jobid, pbsjobs) if len(pbsjobs) == 1: job_state = pbsjobs[0].state print "Job status: %s" % job_state elif len(pbsjobs) == 0: print "(job no longer found)" else: _log.error("Multiple jobs found with job ID '%s': %s", jobid, pbsjobs) sys.exit(1) # request confirmation is case the job is currently running if job_state == 'R': resp = raw_input("Confirm destroying the *running* HOD cluster with label '%s'? [y/n]: " % label) if resp != 'y': print "(destruction aborted)" return elif job_state in ['C', 'E']: print "(job has already ended/completed)" job_state = None print "\nStarting actual destruction of HOD cluster with label '%s'...\n" % label # actually destroy HOD cluster by deleting job and removing cluster info dir and local work dir if job_state is not None: # if job was not successfully deleted, pbs.remove will print an error message if pbs.remove(jobid): print "Job with ID %s deleted." % jobid rm_cluster_localworkdir(label) if cluster_info_exists(label): rm_cluster_info(label) print "\nHOD cluster with label '%s' (job ID: %s) destroyed." % (label, jobid) except StandardError as err: fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) _log.raiseException(err)
def main(): """Main""" options = { 'nagios': ('Report in nagios format', None, 'store_true', False, 'n'), 'regex': ('Filter on regexp, data for first match', None, 'regex', None, 'r'), 'allregex': ('Combined with --regex/-r, return all data', None, 'store_true', False, 'A'), 'anystate': ('Matches any state (eg down_on_error node will also list as error)', None, 'store_true', False, 'a'), 'down': ('Down nodes', None, 'store_true', False, 'D'), 'downonerror': ('Down on error nodes', None, 'store_true', False, 'E'), 'offline': ('Offline nodes', None, 'store_true', False, 'o'), 'partial': ('Partial nodes (one or more running job(s), jobslot(s) available)', None, 'store_true', False, 'p'), 'job-exclusive': ('Job-exclusive nodes (no jobslots available)', None, 'store_true', False, 'x'), 'free': ('Free nodes (0 or more running jobs, jobslot(s) available)', None, 'store_true', False, 'f'), 'unknown': ('State unknown nodes', None, 'store_true', False, 'u'), 'bad': ('Bad nodes (broken jobregex)', None, 'store_true', False, 'b'), 'error': ('Error nodes', None, 'store_true', False, 'e'), 'idle': ('Idle nodes (No running jobs, jobslot(s) available)', None, 'store_true', False, 'i'), 'singlenodeinfo': (('Single (most-frequent) node information in key=value format' '(no combination with other options)'), None, 'store_true', False, 'I'), 'reportnodeinfo': ('Report node information (no combination with other options)', None, 'store_true', False, 'R'), 'moab': ('Use moab information (mdiag -n)', None, 'store_true', False, 'm'), 'moabxml': ('Use xml moab data from file (for testing)', None, 'store', None), 'shorthost': ('Return (short) hostname', None, 'store_true', False, 's'), 'invert': ('Return inverted selection', None, 'store_true', False, 'v'), } go = simple_option(options) if go.options.nagios and not go.options.debug: fancylogger.logToDevLog(enable=True) fancylogger.logToScreen(enable=False) fancylogger.setLogLevelInfo() all_states = ND_NAGIOS_CRITICAL + ND_NAGIOS_WARNING + ND_NAGIOS_OK report_states = [] if go.options.down: report_states.append(ND_down) if go.options.downonerror: report_states.append(ND_down_on_error) if go.options.offline: report_states.append(ND_offline) if go.options.free: report_states.append(ND_free) if go.options.partial: report_states.append(ND_free_and_job) if go.options.job_exclusive: report_states.append(ND_job_exclusive) if go.options.unknown: report_states.append(ND_state_unknown) if go.options.bad: report_states.append(ND_bad) if go.options.error: report_states.append(ND_error) if go.options.idle: report_states.append(ND_idle) if len(report_states) == 0: report_states = all_states if go.options.singlenodeinfo or go.options.reportnodeinfo: nodeinfo = collect_nodeinfo()[2] if len(nodeinfo) == 0: _log.error('No nodeinfo found') sys.exit(1) ordered = sorted(nodeinfo.items(), key=lambda x: len(x[1]), reverse=True) if go.options.singlenodeinfo: if len(nodeinfo) > 1: msg = "Not all nodes have same parameters. Using most frequent ones." if go.options.reportnodeinfo: _log.warning(msg) else: _log.error(msg) # usage: export `./show_nodes -I` ; env |grep SHOWNODES_ most_freq = ordered[0][0] msg = [] msg.append("SHOWNODES_PPN=%d" % most_freq[0]) msg.append("SHOWNODES_PHYSMEMMB=%d" % (most_freq[1] * 1024)) else: msg = [] for info, nodes in ordered: txt = "%d nodes with %d cores, %s MB physmem, %s GB swap and %s GB local disk" % ( len(nodes), info[0], info[1] * 1024, info[2], info[3]) msg.append(txt) # print and _log are dumped to stdout at different moment, repeat the txt in the debug log _log.debug("Found %s with matching nodes: %s" % (txt, nodes)) print "\n".join(msg) sys.exit(0) if go.options.moab: if go.options.moabxml: try: moabxml = open(go.options.moabxml).read() except (OSError, IOError): _log.error('Failed to read moab xml from %s' % go.options.moabxml) else: moabxml = None nodes_dict = moab_get_nodes_dict(xml=moabxml) nodes = get_nodes(nodes_dict) else: nodes = get_nodes() nagiosexit = { NDNAG_WARNING: warning_exit, NDNAG_CRITICAL: critical_exit, NDNAG_OK: ok_exit, } nagios_res = {} detailed_res = {} nodes_found = [] all_nodes = [] for name, full_state in nodes: all_nodes.append(name) if go.options.regex and not go.options.regex.search(name): continue nagios_state = full_state['derived']['nagiosstate'] if nagios_state not in nagios_res: nagios_res[nagios_state] = [] state = full_state['derived']['state'] states = full_state['derived']['states'] if state == ND_free and ND_idle in states: state = ND_idle # special case for idle if state not in detailed_res: detailed_res[state] = [] if go.options.anystate: states_to_check = states else: states_to_check = [state] # filter the allowed states if any(x for x in states_to_check if x in report_states): nagios_res[nagios_state].append(states) detailed_res[state].append(states) nodes_found.append(name) if go.options.regex and not go.options.allregex: break if go.options.invert: nodes_found = [x for x in all_nodes if x not in nodes_found] if go.options.regex and not go.options.allregex: # there should only be one node nagios_state, all_states = nagios_res.items()[0] states = all_states[0] if go.options.nagios: msg = "show_nodes - %s" % ",".join(states) nagiosexit[nagios_state](msg) else: txt = "%s %s" % (nagios_state, ",".join(states)) print txt else: if go.options.nagios: msg = NagiosResult('show_nodes') txt = [] total = 0 for state in all_states: if state in detailed_res: nr = len(detailed_res[state]) else: nr = 0 total += nr setattr(msg, state, nr) msg.total = total reported_state = [str(NDNAG_OK), ''] if ND_bad in detailed_res: reported_state[0] = NDNAG_CRITICAL msg.message += ' - %s bad nodes' % (len(detailed_res[ND_bad])) nagiosexit[reported_state[0]](msg) else: # just print the nodes if go.options.shorthost: nodes_found = [x.split('.')[0] for x in nodes_found] print ' '.join(nodes_found)
# -*- encoding: utf-8 -*- import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) import test.managertests as m import unittest suite = unittest.TestSuite([x.suite() for x in (m,)]) try: import xmlrunner rs = xmlrunner.XMLTestRunner(output="test-reports").run(suite) except ImportError, err: rs = unittest.TextTestRunner().run(suite) if not rs.wasSuccessful(): sys.exit(1) if __name__ == '__main__': from vsc.utils import fancylogger fancylogger.logToScreen(enable=True)
dest="local", help="Use a local path, not on github.com (Default false)") options, args = parser.parse_args() # get and configure logger log = fancylogger.getLogger(__name__) if options.verbose == 1: fancylogger.setLogLevelWarning() elif options.verbose == 2: fancylogger.setLogLevelInfo() elif options.verbose >= 3: fancylogger.setLogLevelDebug() if options.quiet: fancylogger.logToScreen(False) else: fancylogger.logToScreen(True) # other options if not options.branch: options.branch = "develop" if not options.username: options.username = "******" if not options.repo: options.repo = "easybuild-easyconfigs" if not options.path: options.path = "easybuild/easyconfigs" if options.local: import os walk = os.walk
def _log_and_raise(self, err): """Report error that occured, and raise Exception""" fancylogger.setLogFormat(fancylogger.TEST_LOGGING_FORMAT) fancylogger.logToScreen(enable=True) self.log.raiseException(str(err))
log.info("%s: nothing to fix" % ec_file) # MAIN try: init_build_options() options = { 'backup': ("Backup up easyconfigs before modifying them", None, 'store_true', True, 'b'), } go = FixBrokenEasyconfigsOption(options) log = go.log fancylogger.logToScreen(enable=True, stdout=True) fancylogger.setLogLevel('WARNING') try: import easybuild.easyblocks.generic.configuremake except ImportError, err: raise EasyBuildError( "easyblocks are not available in Python search path: %s", err) for path in go.args: if not os.path.exists(path): raise EasyBuildError("Non-existing path %s specified", path) ec_files = [ec for p in go.args for ec in find_easyconfigs(p)] if not ec_files: raise EasyBuildError("No easyconfig files specified")
This mimics the result of Moab's checkjob command, but without the risks of letting users see job information of jobs that are not theirs. """ import cPickle import os import pwd import time from vsc.utils import fancylogger from vsc.utils.generaloption import simple_option MAXIMAL_AGE = 60 * 30 # 30 minutes logger = fancylogger.getLogger("myshowq") fancylogger.logToScreen(True) fancylogger.setLogLevelWarning() def checkjob_data_location(user_name, location): """Retrieve the pickled data form the right file. @type user_name: string @type location: string @param user_name: VSC user name (vscxyzuv) @param location: string defining the location of the pickle file - home: user's home directory - scratch: user's personal fileset on muk @returns: absolute path to the pickle file
from vsc.utils import fancylogger from vsc.utils.generaloption import ExtOptionParser # add all keys to the keyring for which the comment field has this entry in it ADD_IF_COMMENT = "keyring" DEFAULT_FILE = "../../ugenthpc/documents/keys.kdb" # parse options parser = ExtOptionParser(usage="testusage\n%s" % __doc__) parser.add_option( "-f", "--file", dest="filename", help="use FILE as keepass database", metavar="FILE", default=DEFAULT_FILE ) (options, args) = parser.parse_args() # set up logging fancylogger.logToScreen() log = fancylogger.getLogger() fancylogger.setLogLevelInfo() log.info("using file %s", os.path.abspath(options.filename)) db = Database(options.filename, password=getpass.getpass("Please enter keepass file password: "******"adding %s", entry.title) keyring.set_password(entry.title, entry.username, entry.password) finally: db.close()
def main(): """the main function""" fancylogger.logToScreen(enable=True, stdout=True) fancylogger.setLogLevelInfo() options = { 'github-user': ('Your github username to use', None, 'store', None, 'g'), 'closed-pr': ('Delete all gists from closed pull-requests', None, 'store_true', True, 'p'), 'all': ('Delete all gists from Easybuild ', None, 'store_true', False, 'a'), 'orphans': ('Delete all gists without a pull-request', None, 'store_true', False, 'o'), } go = simple_option(options) log = go.log if not (go.options.all or go.options.closed_pr or go.options.orphans): log.error("Please tell me what to do?") if go.options.github_user is None: eb_go = EasyBuildOptions(envvar_prefix='EASYBUILD', go_args=[]) username = eb_go.options.github_user log.debug("Fetch github username from easybuild, found: %s", username) else: username = go.options.github_user if username is None: log.error("Could not find a github username") else: log.info("Using username = %s", username) token = fetch_github_token(username) gh = RestClient(GITHUB_API_URL, username=username, token=token) # ToDo: add support for pagination status, gists = gh.gists.get(per_page=100) if status != HTTP_STATUS_OK: log.error("Failed to get a lists of gists for user %s: error code %s, message = %s", username, status, gists) else: log.info("Found %s gists", len(gists)) regex = re.compile(r"(EasyBuild test report|EasyBuild log for failed build).*?(?:PR #(?P<PR>[0-9]+))?\)?$") pr_cache = {} num_deleted = 0 for gist in gists: if not gist["description"]: continue re_pr_num = regex.search(gist["description"]) delete_gist = False if re_pr_num: log.debug("Found a Easybuild gist (id=%s)", gist["id"]) pr_num = re_pr_num.group("PR") if go.options.all: delete_gist = True elif pr_num and go.options.closed_pr: log.debug("Found Easybuild test report for PR #%s", pr_num) if pr_num not in pr_cache: status, pr = gh.repos[GITHUB_EB_MAIN][GITHUB_EASYCONFIGS_REPO].pulls[pr_num].get() if status != HTTP_STATUS_OK: log.error("Failed to get pull-request #%s: error code %s, message = %s", pr_num, status, pr) pr_cache[pr_num] = pr["state"] if pr_cache[pr_num] == "closed": log.debug("Found report from closed PR #%s (id=%s)", pr_num, gist["id"]) delete_gist = True elif not pr_num and go.options.orphans: log.debug("Found Easybuild test report without PR (id=%s)", gist["id"]) delete_gist = True if delete_gist: status, del_gist = gh.gists[gist["id"]].delete() if status != HTTP_DELETE_OK: log.error("Unable to remove gist (id=%s): error code %s, message = %s", gist["id"], status, del_gist) else: log.info("Delete gist with id=%s", gist["id"]) num_deleted += 1 log.info("Deleted %s gists", num_deleted)
def main(testing_data=(None, None, None)): """ Main function: @arg options: a tuple: (options, paths, logger, logfile, hn) as defined in parse_options This function will: - read easyconfig - build software """ # purposely session state very early, to avoid modules loaded by EasyBuild meddling in init_session_state = session_state() # disallow running EasyBuild as root if os.getuid() == 0: sys.stderr.write( "ERROR: You seem to be running EasyBuild with root privileges.\n" "That's not wise, so let's end this here.\n" "Exiting.\n") sys.exit(1) # steer behavior when testing main testing = testing_data[0] is not None args, logfile, do_build = testing_data # initialise options eb_go = eboptions.parse_options(args=args) options = eb_go.options orig_paths = eb_go.args eb_config = eb_go.generate_cmd_line(add_default=True) init_session_state.update({'easybuild_configuration': eb_config}) # set umask (as early as possible) if options.umask is not None: new_umask = int(options.umask, 8) old_umask = os.umask(new_umask) # set temporary directory to use eb_tmpdir = set_tmpdir(options.tmpdir) # initialise logging for main if options.logtostdout: fancylogger.logToScreen(enable=True, stdout=True) else: if logfile is None: # mkstemp returns (fd,filename), fd is from os.open, not regular open! fd, logfile = tempfile.mkstemp(suffix='.log', prefix='easybuild-') os.close(fd) fancylogger.logToFile(logfile) print_msg('temporary log file in case of crash %s' % (logfile), log=None, silent=testing) global _log _log = fancylogger.getLogger(fname=False) if options.umask is not None: _log.info("umask set to '%s' (used to be '%s')" % (oct(new_umask), oct(old_umask))) # hello world! _log.info(this_is_easybuild()) # how was EB called? eb_command_line = eb_go.generate_cmd_line() + eb_go.args _log.info("Command line: %s" % (" ".join(eb_command_line))) _log.info("Using %s as temporary directory" % eb_tmpdir) if not options.robot is None: if options.robot: _log.info("Using robot path(s): %s" % options.robot) else: _log.error( "No robot paths specified, and unable to determine easybuild-easyconfigs install path." ) # do not pass options.robot, it's not a list instance (and it shouldn't be modified) robot_path = None if options.robot: robot_path = list(options.robot) # determine easybuild-easyconfigs package install path easyconfigs_paths = get_paths_for("easyconfigs", robot_path=robot_path) # keep track of paths for install easyconfigs, so we can obtain find specified easyconfigs easyconfigs_pkg_full_paths = easyconfigs_paths[:] if not easyconfigs_paths: _log.warning( "Failed to determine install path for easybuild-easyconfigs package." ) # process software build specifications (if any), i.e. # software name/version, toolchain name/version, extra patches, ... (try_to_generate, build_specs) = process_software_build_specs(options) # specified robot paths are preferred over installed easyconfig files # --try-X and --dep-graph both require --robot, so enable it with path of installed easyconfigs if robot_path or try_to_generate or options.dep_graph: if robot_path is None: robot_path = [] robot_path.extend(easyconfigs_paths) easyconfigs_paths = robot_path[:] _log.info( "Extended list of robot paths with paths for installed easyconfigs: %s" % robot_path) # initialise the easybuild configuration config.init(options, eb_go.get_options_by_section('config')) # building a dependency graph implies force, so that all dependencies are retained # and also skips validation of easyconfigs (e.g. checking os dependencies) retain_all_deps = False if options.dep_graph: _log.info("Enabling force to generate dependency graph.") options.force = True retain_all_deps = True config.init_build_options({ 'aggregate_regtest': options.aggregate_regtest, 'allow_modules_tool_mismatch': options.allow_modules_tool_mismatch, 'check_osdeps': not options.ignore_osdeps, 'cleanup_builddir': options.cleanup_builddir, 'command_line': eb_command_line, 'debug': options.debug, 'dry_run': options.dry_run, 'easyblock': options.easyblock, 'experimental': options.experimental, 'force': options.force, 'github_user': options.github_user, 'group': options.group, 'ignore_dirs': options.ignore_dirs, 'modules_footer': options.modules_footer, 'only_blocks': options.only_blocks, 'recursive_mod_unload': options.recursive_module_unload, 'regtest_output_dir': options.regtest_output_dir, 'retain_all_deps': retain_all_deps, 'robot_path': robot_path, 'sequential': options.sequential, 'silent': testing, 'set_gid_bit': options.set_gid_bit, 'skip': options.skip, 'skip_test_cases': options.skip_test_cases, 'sticky_bit': options.sticky_bit, 'stop': options.stop, 'umask': options.umask, 'valid_module_classes': module_classes(), 'valid_stops': [x[0] for x in EasyBlock.get_steps()], 'validate': not options.force, }) # obtain list of loaded modules, build options must be initialized first modlist = session_module_list() init_session_state.update({'module_list': modlist}) _log.debug("Initial session state: %s" % init_session_state) # search for easyconfigs if options.search or options.search_short: search_path = [os.getcwd()] if easyconfigs_paths: search_path = easyconfigs_paths query = options.search or options.search_short ignore_dirs = config.build_option('ignore_dirs') silent = config.build_option('silent') search_file(search_path, query, short=not options.search, ignore_dirs=ignore_dirs, silent=silent) paths = [] if len(orig_paths) == 0: if options.from_pr: pr_path = os.path.join(eb_tmpdir, "files_pr%s" % options.from_pr) pr_files = fetch_easyconfigs_from_pr( options.from_pr, path=pr_path, github_user=options.github_user) paths = [(path, False) for path in pr_files if path.endswith('.eb')] elif 'name' in build_specs: paths = [ obtain_path(build_specs, easyconfigs_paths, try_to_generate=try_to_generate, exit_on_error=not testing) ] elif not any([ options.aggregate_regtest, options.search, options.search_short, options.regtest ]): print_error(( "Please provide one or multiple easyconfig files, or use software build " "options to make EasyBuild search for easyconfigs"), log=_log, opt_parser=eb_go.parser, exit_on_error=not testing) else: # look for easyconfigs with relative paths in easybuild-easyconfigs package, # unless they were found at the given relative paths if easyconfigs_pkg_full_paths: # determine which easyconfigs files need to be found, if any ecs_to_find = [] for idx, orig_path in enumerate(orig_paths): if orig_path == os.path.basename( orig_path) and not os.path.exists(orig_path): ecs_to_find.append((idx, orig_path)) _log.debug("List of easyconfig files to find: %s" % ecs_to_find) # find missing easyconfigs by walking paths with installed easyconfig files for path in easyconfigs_pkg_full_paths: _log.debug( "Looking for missing easyconfig files (%d left) in %s..." % (len(ecs_to_find), path)) for (subpath, dirnames, filenames) in os.walk(path, topdown=True): for idx, orig_path in ecs_to_find[:]: if orig_path in filenames: full_path = os.path.join(subpath, orig_path) _log.info("Found %s in %s: %s" % (orig_path, path, full_path)) orig_paths[idx] = full_path # if file was found, stop looking for it (first hit wins) ecs_to_find.remove((idx, orig_path)) # stop os.walk insanity as soon as we have all we need (os.walk loop) if len(ecs_to_find) == 0: break # ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk dirnames[:] = [ d for d in dirnames if not d in options.ignore_dirs ] # stop os.walk insanity as soon as we have all we need (paths loop) if len(ecs_to_find) == 0: break # indicate that specified paths do not contain generated easyconfig files paths = [(path, False) for path in orig_paths] _log.debug("Paths: %s" % paths) # run regtest if options.regtest or options.aggregate_regtest: _log.info("Running regression test") if paths: ec_paths = [path[0] for path in paths] else: # fallback: easybuild-easyconfigs install path ec_paths = easyconfigs_pkg_full_paths regtest_ok = regtest(ec_paths) if not regtest_ok: _log.info("Regression test failed (partially)!") sys.exit(31) # exit -> 3x1t -> 31 # read easyconfig files easyconfigs = [] for (path, generated) in paths: path = os.path.abspath(path) if not os.path.exists(path): print_error("Can't find path %s" % path) try: ec_files = find_easyconfigs(path, ignore_dirs=options.ignore_dirs) for ec_file in ec_files: # only pass build specs when not generating easyconfig files if try_to_generate: ecs = process_easyconfig(ec_file) else: ecs = process_easyconfig(ec_file, build_specs=build_specs) easyconfigs.extend(ecs) except IOError, err: _log.error("Processing easyconfigs in path %s failed: %s" % (path, err))
def main(): """Main""" options = { 'nagios': ('Report in nagios format', None, 'store_true', False, 'n'), 'regex': ('Filter on regexp, data for first match', None, 'regex', None, 'r'), 'allregex': ('Combined with --regex/-r, return all data', None, 'store_true', False, 'A'), 'anystate': ('Matches any state (eg down_on_error node will also list as error)', None, 'store_true', False, 'a'), 'down': ('Down nodes', None, 'store_true', False, 'D'), 'downonerror': ('Down on error nodes', None, 'store_true', False, 'E'), 'offline': ('Offline nodes', None, 'store_true', False, 'o'), 'offline_idle': ('Offline idle nodes', None, 'store_true', False, 'O'), 'partial': ('Partial nodes (one or more running job(s), jobslot(s) available)', None, 'store_true', False, 'p'), 'job-exclusive': ('Job-exclusive nodes (no jobslots available)', None, 'store_true', False, 'x'), 'free': ('Free nodes (0 or more running jobs, jobslot(s) available)', None, 'store_true', False, 'f'), 'unknown': ('State unknown nodes', None, 'store_true', False, 'u'), 'bad': ('Bad nodes (broken jobregex)', None, 'store_true', False, 'b'), 'error': ('Error nodes', None, 'store_true', False, 'e'), 'idle': ('Idle nodes (No running jobs, jobslot(s) available)', None, 'store_true', False, 'i'), 'singlenodeinfo': (('Single (most-frequent) node information in key=value format' '(no combination with other options)'), None, 'store_true', False, 'I'), 'reportnodeinfo': ('Report node information (no combination with other options)', None, 'store_true', False, 'R'), 'moab': ('Use moab information (mdiag -n)', None, 'store_true', False, 'm'), 'moabxml': ('Use xml moab data from file (for testing)', None, 'store', None), 'shorthost': ('Return (short) hostname', None, 'store_true', False, 's'), 'invert': ('Return inverted selection', None, 'store_true', False, 'v'), } go = simple_option(options) if go.options.nagios and not go.options.debug: fancylogger.logToDevLog(enable=True) fancylogger.logToScreen(enable=False) fancylogger.setLogLevelInfo() all_states = ND_NAGIOS_CRITICAL + ND_NAGIOS_WARNING + ND_NAGIOS_OK report_states = [] if go.options.down: report_states.append(ND_down) if go.options.downonerror: report_states.append(ND_down_on_error) if go.options.offline: report_states.append(ND_offline) if go.options.free: report_states.append(ND_free) if go.options.partial: report_states.append(ND_free_and_job) if go.options.job_exclusive: report_states.append(ND_job_exclusive) if go.options.unknown: report_states.append(ND_state_unknown) if go.options.bad: report_states.append(ND_bad) if go.options.error: report_states.append(ND_error) if go.options.idle: report_states.append(ND_idle) if go.options.offline_idle: report_states.append(ND_offline_idle) if len(report_states) == 0: report_states = all_states if go.options.singlenodeinfo or go.options.reportnodeinfo: nodeinfo = collect_nodeinfo()[2] if len(nodeinfo) == 0: _log.error('No nodeinfo found') sys.exit(1) ordered = sorted(nodeinfo.items(), key=lambda x: len(x[1]), reverse=True) if go.options.singlenodeinfo: if len(nodeinfo) > 1: msg = "Not all nodes have same parameters. Using most frequent ones." if go.options.reportnodeinfo: _log.warning(msg) else: _log.error(msg) # usage: export `./show_nodes -I` ; env |grep SHOWNODES_ most_freq = ordered[0][0] msg = [] msg.append("SHOWNODES_PPN=%d" % most_freq[0]) msg.append("SHOWNODES_PHYSMEMMB=%d" % (most_freq[1] * 1024)) else: msg = [] for info, nodes in ordered: txt = "%d nodes with %d cores, %s MB physmem, %s GB swap and %s GB local disk" % ( len(nodes), info[0], info[1] * 1024, info[2], info[3]) msg.append(txt) # print and _log are dumped to stdout at different moment, repeat the txt in the debug log _log.debug("Found %s with matching nodes: %s" % (txt, nodes)) print "\n".join(msg) sys.exit(0) if go.options.moab: if go.options.moabxml: try: moabxml = open(go.options.moabxml).read() except (OSError, IOError): _log.error('Failed to read moab xml from %s' % go.options.moabxml) else: moabxml = None nodes_dict = moab_get_nodes_dict(xml=moabxml) nodes = get_nodes(nodes_dict) else: nodes = get_nodes() nagiosexit = { NDNAG_WARNING: warning_exit, NDNAG_CRITICAL: critical_exit, NDNAG_OK: ok_exit, } nagios_res = {} detailed_res = {} nodes_found = [] all_nodes = [] for name, full_state in nodes: all_nodes.append(name) if go.options.regex and not go.options.regex.search(name): continue nagios_state = full_state['derived']['nagiosstate'] if nagios_state not in nagios_res: nagios_res[nagios_state] = [] state = full_state['derived']['state'] states = full_state['derived']['states'] if state == ND_free and ND_idle in states: state = ND_idle # special case for idle if state == ND_offline and ND_idle in states: state = ND_offline_idle if state not in detailed_res: detailed_res[state] = [] if go.options.anystate: states_to_check = states else: states_to_check = [state] # filter the allowed states if any(x for x in states_to_check if x in report_states): nagios_res[nagios_state].append(states) detailed_res[state].append(states) nodes_found.append(name) if go.options.regex and not go.options.allregex: break if go.options.invert: nodes_found = [x for x in all_nodes if x not in nodes_found] if go.options.regex and not go.options.allregex: # there should only be one node nagios_state, all_states = nagios_res.items()[0] states = all_states[0] if go.options.nagios: msg = "show_nodes - %s" % ",".join(states) nagiosexit[nagios_state](msg) else: txt = "%s %s" % (nagios_state, ",".join(states)) print txt else: if go.options.nagios: msg = NagiosResult('show_nodes') txt = [] total = 0 for state in all_states: if state in detailed_res: nr = len(detailed_res[state]) else: nr = 0 total += nr setattr(msg, state, nr) msg.total = total reported_state = [str(NDNAG_OK), ''] if ND_bad in detailed_res: reported_state[0] = NDNAG_CRITICAL msg.message += ' - %s bad nodes' % (len(detailed_res[ND_bad])) nagiosexit[reported_state[0]](msg) else: # just print the nodes if go.options.shorthost: nodes_found = [x.split('.')[0] for x in nodes_found] print ' '.join(nodes_found)
def main(): """the main function""" fancylogger.logToScreen(enable=True, stdout=True) fancylogger.setLogLevelInfo() options = { 'github-user': ('Your github username to use', None, 'store', None, 'g'), 'closed-pr': ('Delete all gists from closed pull-requests', None, 'store_true', True, 'p'), 'all': ('Delete all gists from Easybuild ', None, 'store_true', False, 'a'), 'orphans': ('Delete all gists without a pull-request', None, 'store_true', False, 'o'), } go = simple_option(options) log = go.log if not (go.options.all or go.options.closed_pr or go.options.orphans): raise EasyBuildError("Please tell me what to do?") if go.options.github_user is None: eb_go = EasyBuildOptions(envvar_prefix='EASYBUILD', go_args=[]) username = eb_go.options.github_user log.debug("Fetch github username from easybuild, found: %s", username) else: username = go.options.github_user if username is None: raise EasyBuildError("Could not find a github username") else: log.info("Using username = %s", username) token = fetch_github_token(username) gh = RestClient(GITHUB_API_URL, username=username, token=token) # ToDo: add support for pagination status, gists = gh.gists.get(per_page=100) if status != HTTP_STATUS_OK: raise EasyBuildError("Failed to get a lists of gists for user %s: error code %s, message = %s", username, status, gists) else: log.info("Found %s gists", len(gists)) regex = re.compile(r"(EasyBuild test report|EasyBuild log for failed build).*?(?:PR #(?P<PR>[0-9]+))?\)?$") pr_cache = {} num_deleted = 0 for gist in gists: if not gist["description"]: continue re_pr_num = regex.search(gist["description"]) delete_gist = False if re_pr_num: log.debug("Found a Easybuild gist (id=%s)", gist["id"]) pr_num = re_pr_num.group("PR") if go.options.all: delete_gist = True elif pr_num and go.options.closed_pr: log.debug("Found Easybuild test report for PR #%s", pr_num) if pr_num not in pr_cache: status, pr = gh.repos[GITHUB_EB_MAIN][GITHUB_EASYCONFIGS_REPO].pulls[pr_num].get() if status != HTTP_STATUS_OK: raise EasyBuildError("Failed to get pull-request #%s: error code %s, message = %s", pr_num, status, pr) pr_cache[pr_num] = pr["state"] if pr_cache[pr_num] == "closed": log.debug("Found report from closed PR #%s (id=%s)", pr_num, gist["id"]) delete_gist = True elif not pr_num and go.options.orphans: log.debug("Found Easybuild test report without PR (id=%s)", gist["id"]) delete_gist = True if delete_gist: status, del_gist = gh.gists[gist["id"]].delete() if status != HTTP_DELETE_OK: raise EasyBuildError("Unable to remove gist (id=%s): error code %s, message = %s", gist["id"], status, del_gist) else: log.info("Delete gist with id=%s", gist["id"]) num_deleted += 1 log.info("Deleted %s gists", num_deleted)
def main(testing_data=(None, None, None)): """ Main function: @arg options: a tuple: (options, paths, logger, logfile, hn) as defined in parse_options This function will: - read easyconfig - build software """ # purposely session state very early, to avoid modules loaded by EasyBuild meddling in init_session_state = session_state() # disallow running EasyBuild as root if os.getuid() == 0: sys.stderr.write("ERROR: You seem to be running EasyBuild with root privileges.\n" "That's not wise, so let's end this here.\n" "Exiting.\n") sys.exit(1) # steer behavior when testing main testing = testing_data[0] is not None args, logfile, do_build = testing_data # initialise options eb_go = eboptions.parse_options(args=args) options = eb_go.options orig_paths = eb_go.args eb_config = eb_go.generate_cmd_line(add_default=True) init_session_state.update({'easybuild_configuration': eb_config}) # set umask (as early as possible) if options.umask is not None: new_umask = int(options.umask, 8) old_umask = os.umask(new_umask) # set temporary directory to use eb_tmpdir = set_tmpdir(options.tmpdir) # initialise logging for main if options.logtostdout: fancylogger.logToScreen(enable=True, stdout=True) else: if logfile is None: # mkstemp returns (fd,filename), fd is from os.open, not regular open! fd, logfile = tempfile.mkstemp(suffix='.log', prefix='easybuild-') os.close(fd) fancylogger.logToFile(logfile) print_msg('temporary log file in case of crash %s' % (logfile), log=None, silent=testing) global _log _log = fancylogger.getLogger(fname=False) if options.umask is not None: _log.info("umask set to '%s' (used to be '%s')" % (oct(new_umask), oct(old_umask))) # hello world! _log.info(this_is_easybuild()) # how was EB called? eb_command_line = eb_go.generate_cmd_line() + eb_go.args _log.info("Command line: %s" % (" ".join(eb_command_line))) _log.info("Using %s as temporary directory" % eb_tmpdir) if not options.robot is None: if options.robot: _log.info("Using robot path(s): %s" % options.robot) else: _log.error("No robot paths specified, and unable to determine easybuild-easyconfigs install path.") # do not pass options.robot, it's not a list instance (and it shouldn't be modified) robot_path = [] if options.robot: robot_path = list(options.robot) # determine easybuild-easyconfigs package install path easyconfigs_paths = get_paths_for("easyconfigs", robot_path=robot_path) # keep track of paths for install easyconfigs, so we can obtain find specified easyconfigs easyconfigs_pkg_full_paths = easyconfigs_paths[:] if not easyconfigs_paths: _log.warning("Failed to determine install path for easybuild-easyconfigs package.") # process software build specifications (if any), i.e. # software name/version, toolchain name/version, extra patches, ... (try_to_generate, build_specs) = process_software_build_specs(options) # specified robot paths are preferred over installed easyconfig files # --try-X and --dep-graph both require --robot, so enable it with path of installed easyconfigs if robot_path or try_to_generate or options.dep_graph: robot_path.extend(easyconfigs_paths) easyconfigs_paths = robot_path[:] _log.info("Extended list of robot paths with paths for installed easyconfigs: %s" % robot_path) # prepend robot path with location where tweaked easyconfigs will be placed tweaked_ecs_path = None if try_to_generate and build_specs: tweaked_ecs_path = os.path.join(eb_tmpdir, 'tweaked_easyconfigs') robot_path.insert(0, tweaked_ecs_path) # initialise the easybuild configuration config.init(options, eb_go.get_options_by_section('config')) # building a dependency graph implies force, so that all dependencies are retained # and also skips validation of easyconfigs (e.g. checking os dependencies) retain_all_deps = False if options.dep_graph: _log.info("Enabling force to generate dependency graph.") options.force = True retain_all_deps = True if options.dep_graph or options.dry_run or options.dry_run_short: options.ignore_osdeps = True pr_path = None if options.from_pr: # extend robot search path with location where files touch in PR will be downloaded to pr_path = os.path.join(eb_tmpdir, "files_pr%s" % options.from_pr) robot_path.insert(0, pr_path) _log.info("Prepended list of robot search paths with %s: %s" % (pr_path, robot_path)) config.init_build_options({ 'aggregate_regtest': options.aggregate_regtest, 'allow_modules_tool_mismatch': options.allow_modules_tool_mismatch, 'check_osdeps': not options.ignore_osdeps, 'filter_deps': options.filter_deps, 'cleanup_builddir': options.cleanup_builddir, 'command_line': eb_command_line, 'debug': options.debug, 'dry_run': options.dry_run or options.dry_run_short, 'easyblock': options.easyblock, 'experimental': options.experimental, 'force': options.force, 'github_user': options.github_user, 'group': options.group, 'hidden': options.hidden, 'ignore_dirs': options.ignore_dirs, 'modules_footer': options.modules_footer, 'only_blocks': options.only_blocks, 'optarch': options.optarch, 'recursive_mod_unload': options.recursive_module_unload, 'regtest_output_dir': options.regtest_output_dir, 'retain_all_deps': retain_all_deps, 'robot_path': robot_path, 'sequential': options.sequential, 'silent': testing, 'set_gid_bit': options.set_gid_bit, 'skip': options.skip, 'skip_test_cases': options.skip_test_cases, 'sticky_bit': options.sticky_bit, 'stop': options.stop, 'suffix_modules_path': options.suffix_modules_path, 'test_report_env_filter': options.test_report_env_filter, 'umask': options.umask, 'valid_module_classes': module_classes(), 'valid_stops': [x[0] for x in EasyBlock.get_steps()], 'validate': not options.force, }) # obtain list of loaded modules, build options must be initialized first modlist = session_module_list(testing=testing) init_session_state.update({'module_list': modlist}) _log.debug("Initial session state: %s" % init_session_state) # search for easyconfigs if options.search or options.search_short: search_path = [os.getcwd()] if easyconfigs_paths: search_path = easyconfigs_paths query = options.search or options.search_short ignore_dirs = config.build_option('ignore_dirs') silent = config.build_option('silent') search_file(search_path, query, short=not options.search, ignore_dirs=ignore_dirs, silent=silent) paths = [] if len(orig_paths) == 0: if options.from_pr: pr_files = fetch_easyconfigs_from_pr(options.from_pr, path=pr_path, github_user=options.github_user) paths = [(path, False) for path in pr_files if path.endswith('.eb')] elif 'name' in build_specs: paths = [obtain_path(build_specs, easyconfigs_paths, try_to_generate=try_to_generate, exit_on_error=not testing)] elif not any([options.aggregate_regtest, options.search, options.search_short, options.regtest]): print_error(("Please provide one or multiple easyconfig files, or use software build " "options to make EasyBuild search for easyconfigs"), log=_log, opt_parser=eb_go.parser, exit_on_error=not testing) else: # look for easyconfigs with relative paths in easybuild-easyconfigs package, # unless they were found at the given relative paths if easyconfigs_pkg_full_paths: # determine which easyconfigs files need to be found, if any ecs_to_find = [] for idx, orig_path in enumerate(orig_paths): if orig_path == os.path.basename(orig_path) and not os.path.exists(orig_path): ecs_to_find.append((idx, orig_path)) _log.debug("List of easyconfig files to find: %s" % ecs_to_find) # find missing easyconfigs by walking paths with installed easyconfig files for path in easyconfigs_pkg_full_paths: _log.debug("Looking for missing easyconfig files (%d left) in %s..." % (len(ecs_to_find), path)) for (subpath, dirnames, filenames) in os.walk(path, topdown=True): for idx, orig_path in ecs_to_find[:]: if orig_path in filenames: full_path = os.path.join(subpath, orig_path) _log.info("Found %s in %s: %s" % (orig_path, path, full_path)) orig_paths[idx] = full_path # if file was found, stop looking for it (first hit wins) ecs_to_find.remove((idx, orig_path)) # stop os.walk insanity as soon as we have all we need (os.walk loop) if len(ecs_to_find) == 0: break # ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk dirnames[:] = [d for d in dirnames if not d in options.ignore_dirs] # stop os.walk insanity as soon as we have all we need (paths loop) if len(ecs_to_find) == 0: break # indicate that specified paths do not contain generated easyconfig files paths = [(path, False) for path in orig_paths] _log.debug("Paths: %s" % paths) # run regtest if options.regtest or options.aggregate_regtest: _log.info("Running regression test") if paths: ec_paths = [path[0] for path in paths] else: # fallback: easybuild-easyconfigs install path ec_paths = easyconfigs_pkg_full_paths regtest_ok = regtest(ec_paths) if not regtest_ok: _log.info("Regression test failed (partially)!") sys.exit(31) # exit -> 3x1t -> 31 # read easyconfig files easyconfigs = [] generated_ecs = False for (path, generated) in paths: path = os.path.abspath(path) # keep track of whether any files were generated generated_ecs |= generated if not os.path.exists(path): print_error("Can't find path %s" % path) try: ec_files = find_easyconfigs(path, ignore_dirs=options.ignore_dirs) for ec_file in ec_files: # only pass build specs when not generating easyconfig files if try_to_generate: ecs = process_easyconfig(ec_file) else: ecs = process_easyconfig(ec_file, build_specs=build_specs) easyconfigs.extend(ecs) except IOError, err: _log.error("Processing easyconfigs in path %s failed: %s" % (path, err))