def __createconfig(self): """Create config, add in options... """ if self.options.confFiles != None: try: self.fcl = ConfigManager().getConfig(self.options.confFiles) except ConfigFailure: self.log.error('Failed to create FactoryConfigLoader') sys.exit(1) self.fcl.set("Factory", "cyclesToDo", str(self.options.cyclesToDo)) self.fcl.set("Factory", "sleepTime", str(self.options.sleepTime)) self.fcl.set("Factory", "confFiles", self.options.confFiles)
def getConfig(self): acf = self.fcl.get('Factory', 'authConf') self.log.debug('authmanager config file(s) = %s' % acf) acl = ConfigManager().getConfig(sources=acf) self.log.debug('successfully read config file(s) %s' % acf) return acl
def __createconfig(self): """Create config, add in options... """ if self.options.confFiles != None: try: self.fcl = ConfigManager().getConfig(self.options.confFiles) except ConfigFailure: self.log.error('Failed to create FactoryConfigLoader') sys.exit(1) self.fcl.set("Factory","cyclesToDo", str(self.options.cyclesToDo)) self.fcl.set("Factory", "sleepTime", str(self.options.sleepTime)) self.fcl.set("Factory", "confFiles", self.options.confFiles)
def _mappings(self): # Handle mappings configuration self.mappingscl = None # mappings config loader object self.mappingscf = self.fcl.generic_get('Factory', 'mappingsConf') self.log.debug("mappings.conf file(s) = %s" % self.mappingscf) try: self.mappingscl = ConfigManager().getConfig(self.mappingscf) except ConfigFailure: self.log.error('Failed to create ConfigLoader object for mappings') sys.exit(0) self.log.debug("mappingscl is %s" % self.mappingscl)
def _queues_monitor_conf(self): # Handle monitor configuration self.mcl = None self.mcf = self.fcl.generic_get('Factory', 'monitorConf') self.log.debug("monitor.conf file(s) = %s" % self.mcf) try: self.mcl = ConfigManager().getConfig(self.mcf) except ConfigFailure: self.log.error('Failed to create MonitorConfigLoader') sys.exit(0) self.log.debug("mcl is %s" % self.mcl)
def _updateInfo(self): qcl = None # 1. we try to read the list of files in queueConf and create a config loader qcf = None try: qcf = self.fcl.get('Factory', 'queueConf') # the configuration files for queues are a list of URIs self.log.debug("queues.conf file(s) = %s" % qcf) qcl_files = ConfigManager().getConfig(sources=qcf) self.log.debug("successfully read config file(s) %s" % qcf) except Exception, e: self.log.error("Exception: %s" % str(e)) self.log.error(traceback.format_exc(None))
class File(ConfigInterface): def __init__(self, factory, config, section): self.log = logging.getLogger('autopyfactory.config') self.factory = factory self.fcl = config self.qcl = None self.log.info('ConfigPlugin: Object initialized.') def _updateInfo(self): qcl = None # 1. we try to read the list of files in queueConf and create a config loader qcf = None try: qcf = self.fcl.get('Factory', 'queueConf') # the configuration files for queues are a list of URIs self.log.debug("queues.conf file(s) = %s" % qcf) qcl_files = ConfigManager().getConfig(sources=qcf) self.log.debug("successfully read config file(s) %s" % qcf) except Exception, e: self.log.error("Exception: %s" % str(e)) self.log.error(traceback.format_exc(None)) # 2. we try to read the directory in queueDirConf and create a config loader qcd = None try: qcd = self.fcl.get('Factory', 'queueDirConf') # the configuration files for queues are in a directory if qcd == "None" or qcd == "": qcd = None if qcd: # FIXME : temporary solution. # The ConfigManager.getConfig( ) method should know how to handle properly empty directories if not os.path.isdir(qcd) or os.listdir(qcd) == []: self.log.warning("queues.conf directory = %s does not exist or it is empty" % qcd) qcd = None else: self.log.debug("queues.conf directory = %s" % qcd) qcl_dir = ConfigManager().getConfig(configdir=qcd) except Exception, e: self.log.error("Exception: %s" % str(e)) self.log.error(traceback.format_exc(None))
if o == '--conf': conffile = a if o == '--activated': activated = int(a) if o == '--pending': pending = int(a) if o == '--running': running = int(a) if o == '--status': status = a # ------------------------------------------------------------------------------------------------- # OPEN THE CONF FILE # ------------------------------------------------------------------------------------------------- conf = ConfigManager().getConfig(conffile) section_name = conf.sections()[0] # ------------------------------------------------------------------------------------------------- # MOCKS # ------------------------------------------------------------------------------------------------- class sitestatus(object): def __init__(self): self.status = status self.cloud = section_name class wmsinfo(object): def __init__(self):
class FactoryCLI(object): """class to handle the command line invocation of APF. parse the input options, setup everything, and run Factory class """ def __init__(self): self.options = None self.args = None self.log = None self.fcl = None self.__presetups() self.__parseopts() self.__setuplogging() self.__platforminfo() self.__checkroot() self.__createconfig() def __presetups(self): """ we put here some preliminary steps that for one reason or another must be done before anything else """ def __parseopts(self): parser = OptionParser( usage="""%prog [OPTIONS] autopyfactory is an ATLAS pilot factory. This program is licenced under the GPL, as set out in LICENSE file. Author(s): Graeme A Stewart <*****@*****.**> Peter Love <*****@*****.**> John Hover <*****@*****.**> Jose Caballero <*****@*****.**> """, version="%prog $Id: factory.py 7680 2011-04-07 23:58:06Z jhover $") parser.add_option("-d", "--debug", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.DEBUG, help="Set logging level to DEBUG [default WARNING]") parser.add_option("-v", "--info", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.INFO, help="Set logging level to INFO [default WARNING]") parser.add_option( "--console", dest="console", default=False, action="store_true", help="Forces debug and info messages to be sent to the console") parser.add_option("--quiet", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.WARNING, help="Set logging level to WARNING [default]") parser.add_option("--oneshot", "--one-shot", dest="cyclesToDo", default=0, action="store_const", const=1, help="Run one cycle only") parser.add_option( "--cycles", dest="cyclesToDo", action="store", type="int", metavar="CYCLES", help="Run CYCLES times, then exit [default infinite]") parser.add_option( "--sleep", dest="sleepTime", default=120, action="store", type="int", metavar="TIME", help="Sleep TIME seconds between cycles [default %default]") parser.add_option( "--conf", dest="confFiles", default="/etc/autopyfactory/autofactory.conf", action="store", metavar="FILE1[,FILE2,FILE3]", help="Load configuration from FILEs (comma separated list)") parser.add_option( "--log", dest="logfile", metavar="LOGFILE", action="store", default="stdout", help= "Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]" ) parser.add_option( "--runas", dest="runAs", # # By default # default=pwd.getpwuid(os.getuid())[0], action="store", metavar="USERNAME", help="If run as root, drop privileges to USER") (self.options, self.args) = parser.parse_args() #self.options.confFiles = self.options.confFiles.split(',') def __setuplogging(self): """ Setup logging General principles we have tried to used for logging: -- Logging syntax and semantics should be uniform throughout the program, based on whatever organization scheme is appropriate. -- Have sufficient DEBUG messages to show domain problem calculations input and output. DEBUG messages should never span more than one line. -- A moderate number of INFO messages should be logged to mark major functional steps in the operation of the program, e.g. when a persistent object is instantiated and initialized, when a functional cycle/loop is complete. It would be good if these messages note summary statistics, e.g. "the last submit cycle submitted 90 jobs and 10 jobs finished". A program being run with INFO log level should provide enough output that the user can watch the program function and quickly observe interesting events. -- Initially, all logging should be directed to a single file. But provision should be made for eventually directing logging output from different subsystems (submit, info, proxy management) to different files, and at different levels of verbosity (DEBUG, INFO, WARN), and with different formatters. Control of this distribution should use the standard Python "logging.conf" format file: -- All messages are always printed out in the logs files, but also to the stderr when DEBUG or INFO levels are selected. -- We keep the original python levels meaning, including WARNING as being the default level. DEBUG Detailed domain problem information related to scheduling, calculations, program state. INFO High level confirmation that things are working as expected. WARNING An indication that something unexpected happened, or indicative of some problem in the near future (e.g. 'disk space low'). The software is still working as expected. ERROR Due to a more serious problem, the software has not been able to perform some function. CRITICAL A serious error, indicating that the program itself may be unable to continue running. """ self.log = logging.getLogger() self.options.logfile = os.path.expanduser(self.options.logfile) if self.options.logfile == 'syslog': logStream = logging.handlers.SysLogHandler('/dev/log') elif self.options.logfile == 'stdout': logStream = logging.StreamHandler() else: lf = os.path.expanduser(self.options.logfile) logdir = os.path.dirname(lf) if not os.path.exists(logdir): os.makedirs(logdir) runuid = pwd.getpwnam(self.options.runAs).pw_uid rungid = pwd.getpwnam(self.options.runAs).pw_gid os.chown(logdir, runuid, rungid) logStream = logging.FileHandler(filename=lf) if major == 2 and minor == 4: FORMAT = '%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d : %(message)s' else: FORMAT = '%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s' formatter = logging.Formatter(FORMAT) formatter.converter = time.gmtime # to convert timestamps to UTC logStream.setFormatter(formatter) self.log.addHandler(logStream) # adding a new Handler for the console, # to be used only for DEBUG and INFO modes. if self.options.logLevel in [logging.DEBUG, logging.INFO]: if self.options.console: console = logging.StreamHandler(sys.stdout) console.setFormatter(formatter) console.setLevel(self.options.logLevel) self.log.addHandler(console) self.log.setLevel(self.options.logLevel) self.log.info('Logging initialized.') def _printenv(self): envmsg = '' for k in sorted(os.environ.keys()): envmsg += '\n%s=%s' % (k, os.environ[k]) self.log.debug('Environment : %s' % envmsg) def __platforminfo(self): """ display basic info about the platform, for debugging purposes """ self.log.info('platform: uname = %s %s %s %s %s %s' % platform.uname()) self.log.info('platform: platform = %s' % platform.platform()) self.log.info('platform: python version = %s' % platform.python_version()) self._printenv() def __checkroot(self): """ If running as root, drop privileges to --runas' account. """ starting_uid = os.getuid() starting_gid = os.getgid() starting_uid_name = pwd.getpwuid(starting_uid)[0] hostname = socket.gethostname() if os.getuid() != 0: self.log.info("Already running as unprivileged user %s at %s" % (starting_uid_name, hostname)) if os.getuid() == 0: try: runuid = pwd.getpwnam(self.options.runAs).pw_uid rungid = pwd.getpwnam(self.options.runAs).pw_gid os.chown(self.options.logfile, runuid, rungid) os.setgid(rungid) os.setuid(runuid) os.seteuid(runuid) os.setegid(rungid) self._changehome() self._changewd() self.log.info("Now running as user %d:%d at %s..." % (runuid, rungid, hostname)) self._printenv() except KeyError as e: self.log.error( 'No such user %s, unable run properly. Error: %s' % (self.options.runAs, e)) sys.exit(1) except OSError as e: self.log.error( 'Could not set user or group id to %s:%s. Error: %s' % (runuid, rungid, e)) sys.exit(1) def _changehome(self): """ at some point, proxyManager will make use of method os.path.expanduser() to find out the absolute path of the usercert and userkey files in order to renew proxy. The thing is that expanduser() uses the value of $HOME as it is stored in os.environ, and that value still is /root/ Ergo, if we want the path to be expanded to a different user, i.e. autopyfactory, we need to change by hand the value of $HOME in the environment """ runAs_home = pwd.getpwnam(self.options.runAs).pw_dir os.environ['HOME'] = runAs_home self.log.debug('Setting up environment variable HOME to %s' % runAs_home) def _changewd(self): """ changing working directory to the HOME directory of the new user, typically "autopyfactory". When APF starts as a daemon, working directory may be "/". If APF was called from the command line as root, working directory is "/root". It is better is current working directory is just the HOME of the running user, so it is easier to debug in case of failures. """ runAs_home = pwd.getpwnam(self.options.runAs).pw_dir os.chdir(runAs_home) self.log.debug('Switching working directory to %s' % runAs_home) def __createconfig(self): """Create config, add in options... """ if self.options.confFiles != None: try: self.fcl = ConfigManager().getConfig(self.options.confFiles) except ConfigFailure: self.log.error('Failed to create FactoryConfigLoader') sys.exit(1) self.fcl.set("Factory", "cyclesToDo", str(self.options.cyclesToDo)) self.fcl.set("Factory", "sleepTime", str(self.options.sleepTime)) self.fcl.set("Factory", "confFiles", self.options.confFiles) def run(self): """Create Factory and enter main loop """ from autopyfactory.factory import Factory try: self.log.info('Creating Factory and entering main loop...') f = Factory(self.fcl) f.run() except KeyboardInterrupt: self.log.info('Caught keyboard interrupt - exitting') f.stop() sys.exit(0) except FactoryConfigurationFailure as e: self.log.error('Factory configuration failure: %s', e) sys.exit(1) except ImportError as e: self.log.error('Failed to import necessary python module: %s' % e) sys.exit(1) except: # TODO - make this a logger.exception() call self.log.error( """Please report to Jose <*****@*****.**> and John <*****@*****.**>.""" ) # The following line prints the exception to the logging module self.log.error(traceback.format_exc(None)) print(traceback.format_exc(None)) sys.exit(1)
class FactoryCLI(object): """class to handle the command line invocation of APF. parse the input options, setup everything, and run Factory class """ def __init__(self): self.options = None self.args = None self.log = None self.fcl = None self.__presetups() self.__parseopts() self.__setuplogging() self.__platforminfo() self.__checkroot() self.__createconfig() def __presetups(self): """ we put here some preliminary steps that for one reason or another must be done before anything else """ def __parseopts(self): parser = OptionParser(usage="""%prog [OPTIONS] autopyfactory is an ATLAS pilot factory. This program is licenced under the GPL, as set out in LICENSE file. Author(s): Graeme A Stewart <*****@*****.**> Peter Love <*****@*****.**> John Hover <*****@*****.**> Jose Caballero <*****@*****.**> """, version="%prog $Id: factory.py 7680 2011-04-07 23:58:06Z jhover $" ) parser.add_option("-d", "--debug", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.DEBUG, help="Set logging level to DEBUG [default WARNING]") parser.add_option("-v", "--info", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.INFO, help="Set logging level to INFO [default WARNING]") parser.add_option("--console", dest="console", default=False, action="store_true", help="Forces debug and info messages to be sent to the console") parser.add_option("--quiet", dest="logLevel", default=logging.WARNING, action="store_const", const=logging.WARNING, help="Set logging level to WARNING [default]") parser.add_option("--oneshot", "--one-shot", dest="cyclesToDo", default=0, action="store_const", const=1, help="Run one cycle only") parser.add_option("--cycles", dest="cyclesToDo", action="store", type="int", metavar="CYCLES", help="Run CYCLES times, then exit [default infinite]") parser.add_option("--sleep", dest="sleepTime", default=120, action="store", type="int", metavar="TIME", help="Sleep TIME seconds between cycles [default %default]") parser.add_option("--conf", dest="confFiles", default="/etc/autopyfactory/autofactory.conf", action="store", metavar="FILE1[,FILE2,FILE3]", help="Load configuration from FILEs (comma separated list)") parser.add_option("--log", dest="logfile", metavar="LOGFILE", action="store", default="stdout", help="Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]") parser.add_option("--runas", dest="runAs", # # By default # default=pwd.getpwuid(os.getuid())[0], action="store", metavar="USERNAME", help="If run as root, drop privileges to USER") (self.options, self.args) = parser.parse_args() #self.options.confFiles = self.options.confFiles.split(',') def __setuplogging(self): """ Setup logging General principles we have tried to used for logging: -- Logging syntax and semantics should be uniform throughout the program, based on whatever organization scheme is appropriate. -- Have sufficient DEBUG messages to show domain problem calculations input and output. DEBUG messages should never span more than one line. -- A moderate number of INFO messages should be logged to mark major functional steps in the operation of the program, e.g. when a persistent object is instantiated and initialized, when a functional cycle/loop is complete. It would be good if these messages note summary statistics, e.g. "the last submit cycle submitted 90 jobs and 10 jobs finished". A program being run with INFO log level should provide enough output that the user can watch the program function and quickly observe interesting events. -- Initially, all logging should be directed to a single file. But provision should be made for eventually directing logging output from different subsystems (submit, info, proxy management) to different files, and at different levels of verbosity (DEBUG, INFO, WARN), and with different formatters. Control of this distribution should use the standard Python "logging.conf" format file: -- All messages are always printed out in the logs files, but also to the stderr when DEBUG or INFO levels are selected. -- We keep the original python levels meaning, including WARNING as being the default level. DEBUG Detailed domain problem information related to scheduling, calculations, program state. INFO High level confirmation that things are working as expected. WARNING An indication that something unexpected happened, or indicative of some problem in the near future (e.g. 'disk space low'). The software is still working as expected. ERROR Due to a more serious problem, the software has not been able to perform some function. CRITICAL A serious error, indicating that the program itself may be unable to continue running. """ self.log = logging.getLogger() self.options.logfile = os.path.expanduser(self.options.logfile) if self.options.logfile == 'syslog': logStream = logging.handlers.SysLogHandler('/dev/log') elif self.options.logfile == 'stdout': logStream = logging.StreamHandler() else: lf = os.path.expanduser(self.options.logfile) logdir = os.path.dirname(lf) if not os.path.exists(logdir): os.makedirs(logdir) runuid = pwd.getpwnam(self.options.runAs).pw_uid rungid = pwd.getpwnam(self.options.runAs).pw_gid os.chown(logdir, runuid, rungid) logStream = logging.FileHandler(filename=lf) if major == 2 and minor == 4: FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d : %(message)s' else: FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s' formatter = logging.Formatter(FORMAT) formatter.converter = time.gmtime # to convert timestamps to UTC logStream.setFormatter(formatter) self.log.addHandler(logStream) # adding a new Handler for the console, # to be used only for DEBUG and INFO modes. if self.options.logLevel in [logging.DEBUG, logging.INFO]: if self.options.console: console = logging.StreamHandler(sys.stdout) console.setFormatter(formatter) console.setLevel(self.options.logLevel) self.log.addHandler(console) self.log.setLevel(self.options.logLevel) self.log.info('Logging initialized.') def _printenv(self): envmsg = '' for k in sorted(os.environ.keys()): envmsg += '\n%s=%s' %(k, os.environ[k]) self.log.debug('Environment : %s' %envmsg) def __platforminfo(self): """ display basic info about the platform, for debugging purposes """ self.log.info('platform: uname = %s %s %s %s %s %s' %platform.uname()) self.log.info('platform: platform = %s' %platform.platform()) self.log.info('platform: python version = %s' %platform.python_version()) self._printenv() def __checkroot(self): """ If running as root, drop privileges to --runas' account. """ starting_uid = os.getuid() starting_gid = os.getgid() starting_uid_name = pwd.getpwuid(starting_uid)[0] hostname = socket.gethostname() if os.getuid() != 0: self.log.info("Already running as unprivileged user %s at %s" % (starting_uid_name, hostname)) if os.getuid() == 0: try: runuid = pwd.getpwnam(self.options.runAs).pw_uid rungid = pwd.getpwnam(self.options.runAs).pw_gid os.chown(self.options.logfile, runuid, rungid) os.setgid(rungid) os.setuid(runuid) os.seteuid(runuid) os.setegid(rungid) self._changehome() self._changewd() self.log.info("Now running as user %d:%d at %s..." % (runuid, rungid, hostname)) self._printenv() except KeyError as e: self.log.error('No such user %s, unable run properly. Error: %s' % (self.options.runAs, e)) sys.exit(1) except OSError as e: self.log.error('Could not set user or group id to %s:%s. Error: %s' % (runuid, rungid, e)) sys.exit(1) def _changehome(self): """ at some point, proxyManager will make use of method os.path.expanduser() to find out the absolute path of the usercert and userkey files in order to renew proxy. The thing is that expanduser() uses the value of $HOME as it is stored in os.environ, and that value still is /root/ Ergo, if we want the path to be expanded to a different user, i.e. autopyfactory, we need to change by hand the value of $HOME in the environment """ runAs_home = pwd.getpwnam(self.options.runAs).pw_dir os.environ['HOME'] = runAs_home self.log.debug('Setting up environment variable HOME to %s' %runAs_home) def _changewd(self): """ changing working directory to the HOME directory of the new user, typically "autopyfactory". When APF starts as a daemon, working directory may be "/". If APF was called from the command line as root, working directory is "/root". It is better is current working directory is just the HOME of the running user, so it is easier to debug in case of failures. """ runAs_home = pwd.getpwnam(self.options.runAs).pw_dir os.chdir(runAs_home) self.log.debug('Switching working directory to %s' %runAs_home) def __createconfig(self): """Create config, add in options... """ if self.options.confFiles != None: try: self.fcl = ConfigManager().getConfig(self.options.confFiles) except ConfigFailure: self.log.error('Failed to create FactoryConfigLoader') sys.exit(1) self.fcl.set("Factory","cyclesToDo", str(self.options.cyclesToDo)) self.fcl.set("Factory", "sleepTime", str(self.options.sleepTime)) self.fcl.set("Factory", "confFiles", self.options.confFiles) def run(self): """Create Factory and enter main loop """ from autopyfactory.factory import Factory try: self.log.info('Creating Factory and entering main loop...') f = Factory(self.fcl) f.run() except KeyboardInterrupt: self.log.info('Caught keyboard interrupt - exitting') f.stop() sys.exit(0) except FactoryConfigurationFailure as e: self.log.error('Factory configuration failure: %s', e) sys.exit(1) except ImportError as e: self.log.error('Failed to import necessary python module: %s' % e) sys.exit(1) except: # TODO - make this a logger.exception() call self.log.error("""Please report to Jose <*****@*****.**> and John <*****@*****.**>.""") # The following line prints the exception to the logging module self.log.error(traceback.format_exc(None)) print(traceback.format_exc(None)) sys.exit(1)
def _updateInfo(self): qcl = None # 1. we try to read the list of files in queueConf and create a config loader qcf = None try: qcf = self.fcl.get( 'Factory', 'queueConf' ) # the configuration files for queues are a list of URIs self.log.debug("queues.conf file(s) = %s" % qcf) qcl_files = ConfigManager().getConfig(sources=qcf) self.log.debug("successfully read config file(s) %s" % qcf) except Exception as e: self.log.error("Exception: %s" % str(e)) self.log.error(traceback.format_exc(None)) # 2. we try to read the directory in queueDirConf and create a config loader qcd = None try: qcd = self.fcl.get( 'Factory', 'queueDirConf' ) # the configuration files for queues are in a directory if qcd == "None" or qcd == "": qcd = None if qcd: # FIXME : temporary solution. # The ConfigManager.getConfig( ) method should know how to handle properly empty directories if not os.path.isdir(qcd) or os.listdir(qcd) == []: self.log.warning( "queues.conf directory = %s does not exist or it is empty" % qcd) qcd = None else: self.log.debug("queues.conf directory = %s" % qcd) qcl_dir = ConfigManager().getConfig(configdir=qcd) except Exception as e: self.log.error("Exception: %s" % str(e)) self.log.error(traceback.format_exc(None)) # 3. we merge both loader objects try: if qcf and qcd: self.log.debug("both queues file(s) and dir") qcl = qcl_files qcl.merge(qcl_dir) elif qcf and not qcd: self.log.debug("queues file(s) only") qcl = qcl_files elif not qcf and qcd: self.log.debug("queues dir only") qcl = qcl_dir else: self.log.error( 'no files or directory with queues configuration specified' ) raise ConfigFailure( 'no files or directory with queues configuration specified' ) except Exception as err: self.log.error('Failed to create queues ConfigLoader object') raise ConfigFailure('Failed to create queues ConfigLoader: %s' % err) self.log.info('queues ConfigLoader object created') self.qcl = qcl