示例#1
0
def multi_process_log(message, logname, cfgdir):
    """
  <Purpose>
    Logs the given message to a log.  Does some trickery to make sure there
    no more than 10 logs are ever there.   If init hasn't been called, this 
    will perform the same actions.
    
  <Arguments>
    message - The message that should be written to the log.
    logname - The name to be used for the logfile.
    cfgdir - The directory that contains the vesseldict
  
  <Exceptions>
    Exception if there is a problem reading from cfgdir/nodeman.cfg or writing
    to the circular log.
      
  <Side Effects>
    The given message might be written to the log.
    
  <Returns>
    True if the message is logged.   False if the message isn't written because
    there are too many logs.
  """
    global servicevessel
    global logfile

    # If we've initialized, then log and continue...
    if logfile != None and servicevessel != None:
        log(message)
        return True

    if servicevessel == None:
        servicevessel = get_servicevessel(cfgdir)

    logcount = 0

    servicefiles = os.listdir(cfgdir + '/' + servicevessel)
    for servicefile in servicefiles:
        # Count all the log files.  There is always either a .old or .new for
        # every log
        if servicefile.endswith('.old'):
            logcount = logcount + 1
        elif servicefile.endswith('.new'):
            if (servicefile[:-4] + ".old") not in servicefiles:
                # If there is a new file but no old file, we will count it
                logcount = logcount + 1

    if logcount >= 10:
        # If there are 10 or more logfiles already present, we don't want to create
        # another.  We'll ignore any race conditions with this because this should
        # be a rare case.
        return False
    else:
        # set up the circular logger, log the message, and return
        logfile = loggingrepy_core.circular_logger_core(cfgdir + '/' +
                                                        servicevessel + '/' +
                                                        logname)
        log(message)

        return True
def multi_process_log(message, logname, cfgdir):
  """
  <Purpose>
    Logs the given message to a log.  Does some trickery to make sure there
    no more than 10 logs are ever there.   If init hasn't been called, this 
    will perform the same actions.
    
  <Arguments>
    message - The message that should be written to the log.
    logname - The name to be used for the logfile.
    cfgdir - The directory that contains the vesseldict
  
  <Exceptions>
    Exception if there is a problem reading from cfgdir/nodeman.cfg or writing
    to the circular log.
      
  <Side Effects>
    The given message might be written to the log.
    
  <Returns>
    True if the message is logged.   False if the message isn't written because
    there are too many logs.
  """
  global servicevessel
  global logfile
  
  # If we've initialized, then log and continue...
  if logfile != None and servicevessel != None:
    log(message)
    return True
 
  
  if servicevessel == None:
    servicevessel = get_servicevessel(cfgdir)
  
  logcount = 0

  servicefiles = os.listdir(cfgdir + '/' + servicevessel)
  for servicefile in servicefiles:
    # Count all the log files.  There is always either a .old or .new for
    # every log
    if servicefile.endswith('.old'):
      logcount = logcount + 1
    elif servicefile.endswith('.new'):
      if (servicefile[:-4] + ".old") not in servicefiles:
        # If there is a new file but no old file, we will count it
        logcount = logcount + 1

      
  if logcount >= 10:
    # If there are 10 or more logfiles already present, we don't want to create
    # another.  We'll ignore any race conditions with this because this should
    # be a rare case.   
    return False
  else:
    # set up the circular logger, log the message, and return
    logfile = loggingrepy_core.circular_logger_core(cfgdir + '/' + servicevessel + '/' + logname)
    log(message)

    return True
示例#3
0
def init(logname, cfgdir='.', maxbuffersize=1024 * 1024):
    """
  <Purpose>
    Sets up the service logger to use the given logname, and the nodeman.cfg
    is in the given directory.
    
  <Arguments>
    logname - The name of the log file, as well as the name of the process lock
              to be used in the event of multi process locking
    cfgdir - The directory containing nodeman.cfg, by default it is the current
             directory
    maxbuffersize - The size of the circular logging buffer.
             
  <Exceptions>
    Exception if there is a problem reading from cfgdir/nodeman.cfg
    
  <Side Effects>
    All future calls to log will log to the given logfile.
    
  <Returns>
    None
  """

    global logfile
    global servicevessel

    servicevessel = get_servicevessel(cfgdir)

    logfile = loggingrepy_core.circular_logger_core(servicevessel + '/' +
                                                    logname,
                                                    mbs=maxbuffersize)
def init(logname,cfgdir = '.', maxbuffersize=1024*1024):
  """
  <Purpose>
    Sets up the service logger to use the given logname, and the nodeman.cfg
    is in the given directory.
    
  <Arguments>
    logname - The name of the log file, as well as the name of the process lock
              to be used in the event of multi process locking
    cfgdir - The directory containing nodeman.cfg, by default it is the current
             directory
    maxbuffersize - The size of the circular logging buffer.
             
  <Exceptions>
    Exception if there is a problem reading from cfgdir/nodeman.cfg
    
  <Side Effects>
    All future calls to log will log to the given logfile.
    
  <Returns>
    None
  """

  global logfile
  global servicevessel
  
  servicevessel = get_servicevessel(cfgdir)
  
  logfile = loggingrepy_core.circular_logger_core(servicevessel + '/' + logname, mbs = maxbuffersize)