Example #1
1
 def create_sitemap_index(self, sitemaps, cur_time):
     """
     Creates a sitemap index file from the sitemaps passed in
     @param sitemaps: a list of strings of sitemap names
     """
     if os.path.isfile(self.path):
         logging.getLogger("everything").info("%s already exists" % self.path)
     else:
         root = etree.Element("sitemapindex", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
         if not sitemaps:
             pass
         else:
             for i in sitemaps:
                 sm = etree.SubElement(root, "sitemap")
                 loc = etree.SubElement(sm, "loc")
                 loc.text = "/".join(["http:/", self.host, i])
                 lastmod = etree.SubElement(sm, "lastmod")
                 lastmod.text = cur_time
             try:
                 with open(self.path, "w") as f:
                     f.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
                     f.write(etree.tostring(root))
                     f.close()
             except IOError:
                 logging.getLogger("error").error("Error creating %s" % self.path)
Example #2
0
def _init_python():
    if PY2 or is_release():
        MinVersions.PYTHON2.check(sys.version_info)
    else:
        # for non release builds we allow Python3
        MinVersions.PYTHON3.check(sys.version_info)

    if is_osx():
        # We build our own openssl on OSX and need to make sure that
        # our own ca file is used in all cases as the non-system openssl
        # doesn't use the system certs
        install_urllib2_ca_file()

    if is_windows():
        # Not really needed on Windows as pygi-aio seems to work fine, but
        # wine doesn't have certs which we use for testing.
        install_urllib2_ca_file()

    if is_windows() and os.sep != "\\":
        # In the MSYS2 console MSYSTEM is set, which breaks os.sep/os.path.sep
        # If you hit this do a "setup.py clean -all" to get rid of the
        # bytecode cache then start things with "MSYSTEM= ..."
        raise AssertionError("MSYSTEM is set (%r)" % environ.get("MSYSTEM"))

    if is_windows():
        # gdbm is broken under msys2, this makes shelve use another backend
        sys.modules["gdbm"] = None
        sys.modules["_gdbm"] = None

    logging.getLogger().addHandler(PrintHandler())
Example #3
0
def set_logger_level(logger_name, log_level='error'):
    '''
    Tweak a specific logger's logging level
    '''
    logging.getLogger(logger_name).setLevel(
        LOG_LEVELS.get(log_level.lower(), logging.ERROR)
    )
Example #4
0
    def do_bugout(self, args):
        """bugout [ <logger> ]  - remove a console logging handler from a logger"""
        args = args.split()
        if _debug: ConsoleCmd._debug("do_bugout %r", args)

        # get the logger name and logger
        if args:
            loggerName = args[0]
            if loggerName in logging.Logger.manager.loggerDict:
                logger = logging.getLogger(loggerName)
            else:
                logger = None
        else:
            loggerName = '__root__'
            logger = logging.getLogger()

        # remove the logging handler
        if not logger:
            self.stdout.write("not a valid logger name\n")
        elif not loggerName in self.handlers:
            self.stdout.write("no handler for %s\n" % loggerName)
        else:
            handler = self.handlers[loggerName]
            del self.handlers[loggerName]

            # see if this (or its parent) is a module level logger
            if hasattr(logger, 'globs'):
                logger.globs['_debug'] -= 1
            elif hasattr(logger.parent, 'globs'):
                logger.parent.globs['_debug'] -= 1

            # remove it from the logger
            logger.removeHandler(handler)
            self.stdout.write("handler to %s removed\n" % loggerName)
        self.stdout.write("\n")
Example #5
0
def main():
    # Init the logger
    init_logging(config_get(('logging', 'path'), 'log.txt'),
                 config_get(('logging', 'verbose'), False),
                 config_get(('logging', 'format'), None))

    # Set the datasource and init it
    wpmlangs = config_get(('wpm', 'languages'))
    settings = config_get(('settings'), {})
    init_datasource(wpmlangs, settings)

    # Start the server
    try:
        start_server(config_get(('wpm', 'languages')).keys(),
                     config_get(('server', 'host'), '0.0.0.0'),
                     config_get(('server', 'port'), 5000),
                     config_get(('server', 'use_reloader'), False),
                     config_get(('logging', 'verbose'), False),
                     config_get(('logging', 'format'), None),
                     config_get(('linkprocs', 'includefeatures'), False),
                     config_get(('server', 'debug'), False))
    except ValueError as e:
        logging.getLogger().fatal("Error running Semanticizer server: %s" \
                                  % e.message)
        raise
Example #6
0
def configure_logging():
    format = logging.Formatter('%(asctime)s :: PID %(process)d :: %(name)s (%(levelname)s) :: %(message)s')
    logger = logging.StreamHandler()
    logger.setFormatter(format)
    logger.setLevel(logging.DEBUG)
    logging.getLogger().addHandler(logger)
    logging.getLogger().setLevel(logging.DEBUG)
Example #7
0
    def __call__(self, *args, **kwargs):

        self._reply_arrived_event.clear()
        self.result = None

        wait = kwargs.get('wait', False)

        if self.door is None:
            self.init_device()

        logging.getLogger('HWR').debug("Executing sardana macro: %s" % self.macro_format)
        
        try:
            fullcmd = self.macro_format % args 
        except:
            logging.getLogger('HWR').info("  - Wrong format for macro arguments. Macro is %s / args are (%s)" % (self.macro_format, str(args)))
            return
   
        try:
            import time
            self.t0 = time.time()
            if (self.doorstate in ["ON","ALARM"]):
                self.door.runMacro( (fullcmd).split()  )
                self.macrostate = SardanaMacro.STARTED
                self.emit('commandBeginWaitReply', (str(self.name()), ))
            else:
                logging.getLogger('HWR').error("%s. Cannot execute. Door is not READY", str(self.name()) )
                self.emit('commandFailed', (-1, self.name()))
        except TypeError:
            logging.getLogger('HWR').error("%s. Cannot properly format macro code. Format is: %s, args are %s", str(self.name()), self.macro_format, str(args)) 
            self.emit('commandFailed', (-1, self.name()))
        except DevFailed, error_dict:
            logging.getLogger('HWR').error("%s: Cannot run macro. %s", str(self.name()), error_dict) 
            self.emit('commandFailed', (-1, self.name()))
Example #8
0
def init_logger(location, config):
    """ Initialize the logger with settings from config. """

    class NullHandler(logging.Handler):
        def emit(self, record):
            pass

    if get_conf(config, 'Logging.enabled', False) == False:
        handler = NullHandler()
        logging.getLogger("dagobah").addHandler(handler)
        return

    if get_conf(config, 'Logging.logfile', 'default') == 'default':
        path = os.path.join(location, 'dagobah.log')
    else:
        path = config['Logging']['logfile']

    level_string = get_conf(config, 'Logging.loglevel', 'info').upper()
    numeric_level = getattr(logging, level_string, None)

    logging.basicConfig(filename=path, level=numeric_level)

    root = logging.getLogger()
    stdout_logger = logging.StreamHandler(sys.stdout)
    stdout_logger.setLevel(logging.INFO)
    root.addHandler(stdout_logger)

    print 'Logging output to %s' % path
    logging.info('Logger initialized at level %s' % level_string)
 def setUp(self):
     yield super(TestServerScan, self).setUp()
     yield self.get_client()
     yield self.do_create_lots_of_files('_pre')
     self.handler = handler = MementoHandler()
     handler.setLevel(logging.DEBUG)
     logging.getLogger('fsyncsrvr.SyncDaemon').addHandler(handler)
Example #10
0
 def isLocalProxy(self):
     # I'm a server if:
     # order_in_proxy is set (ie I have chance to become a server)
     # fk_use_as_proxy is equal to my id (ie the proxy server is me)
     result = (self.order_in_proxy != None)
     logging.getLogger().debug("isLocalProxy(#%s): %s" % (self.getId(), result))
     return result
Example #11
0
 def isProxyClient(self):
     # I'm a client if:
     # fk_use_as_proxy is set (ie I found a proxy server)
     # fk_use_as_proxy is not equal to my id (ie the proxy server is not me)
     result = (self.fk_use_as_proxy != None and self.fk_use_as_proxy != self.id)
     logging.getLogger().debug("isProxyClient(#%s): %s" % (self.getId(), result))
     return result
Example #12
0
 def isStateStopped(self):
     if self.getCommandStatut() == 'stop': # 'stop' deprecated a while ago, but may still be present, so we take the opportunity to fix it here
         logging.getLogger().warn("Detected command #%s in deprecated state 'stop', setting it to 'stopped'")
         self.setStateStopped()
     result = (self.getCommandStatut() == 'stop' or self.getCommandStatut() == 'stopped')
     logging.getLogger().debug("isStateStopped(#%s): %s" % (self.getId(), result))
     return result
Example #13
0
    def __init__(self):
        from django.conf import settings
        from django.core.exceptions import ImproperlyConfigured
        import logging

        if hasattr(settings, "LOGGING"):
            for module, properties in settings.LOGGING.items():
                logger = logging.getLogger(module)

                if "level" in properties:
                    logger.setLevel(properties["level"])
                elif hasattr(settings, "GLOBAL_LOG_LEVEL") and "handlers" not in properties:
                    logger.setLevel(settings.GLOBAL_LOG_LEVEL)
                else:
                    raise ImproperlyConfigured(
                        "A logger in settings.LOGGING doesn't have its log level set. "
                        + "Either set a level on that logger, or set GLOBAL_LOG_LEVEL."
                    )

                handlers = []
                if "handler" in properties:
                    handlers = [properties["handler"]]
                elif "handlers" in properties:
                    handlers = properties["handlers"]
                elif hasattr(settings, "GLOBAL_LOG_HANDLERS"):
                    handlers = settings.GLOBAL_LOG_HANDLERS

                self.add_handlers(logger, handlers)

        elif hasattr(settings, "GLOBAL_LOG_LEVEL") and hasattr(settings, "GLOBAL_LOG_HANDLERS"):
            logger = logging.getLogger("")
            logger.setLevel(settings.GLOBAL_LOG_LEVEL)
            handlers = settings.GLOBAL_LOG_HANDLERS

            self.add_handlers(logger, handlers)
Example #14
0
    def test_notifier(self):
        self.config(notification_driver=['log'])

        transport = _FakeTransport(self.conf)

        notifier = messaging.Notifier(transport, 'test.localhost')

        message_id = uuid.uuid4()
        self.mox.StubOutWithMock(uuid, 'uuid4')
        uuid.uuid4().AndReturn(message_id)

        timeutils.set_time_override()

        message = {
            'message_id': str(message_id),
            'publisher_id': 'test.localhost',
            'event_type': 'test.notify',
            'priority': 'INFO',
            'payload': 'bar',
            'timestamp': str(timeutils.utcnow.override_time),
        }

        logger = self.mox.CreateMockAnything()

        self.mox.StubOutWithMock(logging, 'getLogger')
        logging.getLogger('oslo.messaging.notification.test.notify').\
            AndReturn(logger)

        logger.info(jsonutils.dumps(message))

        self.mox.ReplayAll()

        notifier.info({}, 'test.notify', 'bar')
def configureBasicLogger(logDir,logName=""):
    # start logger:
    fileLogPath = "sim_" + strftime("%H-%M", gmtime()) + ".log" if len(logName) == 0 else logName
    fileLogPath = os.path.join(logDir, fileLogPath)
    if not os.path.exists(logDir):
        os.makedirs(logDir)
    #     flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
    #     os.open(fileLogPath, flags)
    #     os.close(fileLogPath)
    # set up logging to file - see previous section for more details
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s [%(processName)-12.12s] [%(levelname)-5.5s]  %(message)s",
                        datefmt='%m-%d %H:%M:%S',
                        filename=fileLogPath,
                        filemode='w')
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    # set a format which is simpler for console use
    formatter = logging.Formatter('%(asctime)s [%(processName)-12.12s] [%(levelname)-5.5s] %(message)s',
                                  datefmt='%m-%d %H:%M:%S')
    # tell the handler to use this format
    console.setFormatter(formatter)
    # add the handler to the root logger
    logging.getLogger().addHandler(console)
Example #16
0
    def propertyChanged(self, property_name, old_value, new_value):
        """
        Overriding BaseComponents.BlissWidget (propertyChanged object) 
        run method.
        """
        if property_name == 'beamline_setup':
            self.beamline_setup_hwobj = self.getHardwareObject(new_value)

            if self.beamline_setup_hwobj:
                self.diffractometer_hwobj = self.beamline_setup_hwobj.diffractometer_hwobj
                
                if self.diffractometer_hwobj:
                    self.diffractometer_hwobj.connect("minidiffStateChanged",
                                                      self.diffractometer_changed)
                    
                self.shape_history = self.beamline_setup_hwobj.shape_history_hwobj

                if self.queue_model_hwobj:
                    self.beamline_setup_hwobj.queue_model_hwobj = self.queue_model_hwobj
                    self.task_tool_box_widget.set_beamline_setup(self.beamline_setup_hwobj)
            else:
                logging.getLogger('user_level_log').error('Could not load beamline setup '+\
                                                          'check configuration !.')
        elif property_name == 'queue_model':
            self.queue_model_hwobj = self.getHardwareObject(new_value)

            if self.beamline_setup_hwobj:
                self.beamline_setup_hwobj.queue_model_hwobj = self.queue_model_hwobj
                self.task_tool_box_widget.set_beamline_setup(self.beamline_setup_hwobj)
Example #17
0
def IMDb(accessSystem=None, *arguments, **keywords):
    """Return an instance of the appropriate class.
    The accessSystem parameter is used to specify the kind of
    the preferred access system."""
    if accessSystem is None or accessSystem in ('auto', 'config'):
        try:
            cfg_file = ConfigParserWithCase(*arguments, **keywords)
            # Parameters set by the code take precedence.
            kwds = cfg_file.getDict('imdbpy')
            if 'accessSystem' in kwds:
                accessSystem = kwds['accessSystem']
                del kwds['accessSystem']
            else:
                accessSystem = 'http'
            kwds.update(keywords)
            keywords = kwds
        except Exception, e:
            import logging
            logging.getLogger('imdbpy').warn('Unable to read configuration' \
                                            ' file; complete error: %s' % e)
            # It just LOOKS LIKE a bad habit: we tried to read config
            # options from some files, but something is gone horribly
            # wrong: ignore everything and pretend we were called with
            # the 'http' accessSystem.
            accessSystem = 'http'
Example #18
0
def main(argv):
  """Runs the development application server."""
  args, option_dict = ParseArguments(argv)

  if len(args) != 1:
    print >>sys.stderr, 'Invalid arguments'
    PrintUsageExit(1)

  root_path = args[0]

  if '_DEFAULT_ENV_AUTH_DOMAIN' in option_dict:
    auth_domain = option_dict['_DEFAULT_ENV_AUTH_DOMAIN']
    dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = auth_domain
  if '_ENABLE_LOGGING' in option_dict:
    enable_logging = option_dict['_ENABLE_LOGGING']
    dev_appserver.HardenedModulesHook.ENABLE_LOGGING = enable_logging

  log_level = option_dict[ARG_LOG_LEVEL]



  option_dict['root_path'] = os.path.realpath(root_path)


  logging.getLogger().setLevel(log_level)

  default_partition = option_dict[ARG_DEFAULT_PARTITION]
  appinfo = None
  try:
    appinfo, _, _ = dev_appserver.LoadAppConfig(
        root_path, {}, default_partition=default_partition)
  except yaml_errors.EventListenerError, e:
    logging.error('Fatal error when loading application configuration:\n%s', e)
    return 1
Example #19
0
def _create_lastmodified(path, recursive):
	if path.endswith("/files"):
		# all storages involved
		lms = [0]
		for storage in fileManager.registered_storages:
			try:
				lms.append(fileManager.last_modified(storage, recursive=recursive))
			except:
				logging.getLogger(__name__).exception("There was an error retrieving the last modified data from storage {}".format(storage))
				lms.append(None)

		if filter(lambda x: x is None, lms):
			# we return None if ANY of the involved storages returned None
			return None

		# if we reach this point, we return the maximum of all dates
		return max(lms)

	elif path.endswith("/files/local"):
		# only local storage involved
		try:
			return fileManager.last_modified(FileDestinations.LOCAL, recursive=recursive)
		except:
			logging.getLogger(__name__).exception("There was an error retrieving the last modified data from storage {}".format(FileDestinations.LOCAL))
			return None

	else:
		return None
Example #20
0
 def setLogBase(self, logBase):
     self.logBase = logBase
     if logBase:
         self.stdoutLog = logging.getLogger(logBase + '.stdout')
         self.stderrLog = logging.getLogger(logBase + '.stderr')
     else:
         self.stdoutLog = self.stderrLog = None
Example #21
0
 def __init__(self):
     self.log = logging.getLogger("pyzord")
     self.usage_log = logging.getLogger("pyzord-usage")
     self.log.addHandler(logging.NullHandler())
     self.usage_log.addHandler(logging.NullHandler())
     self.forwarder = None
     self.one_step = False
Example #22
0
    def new_html(self, html_path, image_prefix, run_number):
	logging.getLogger().debug('got a new html page: %s, prefix: %r, run number: %s', html_path, image_prefix, run_number)

        # prepend the time and date to the path we just got so
        # the history is more readable
        time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        index = (time_string, str(image_prefix), str(run_number))
        self.history_map[index] = html_path
        # synchronize the history prop
        if self.current_user is not None:
            whole_history = pickle.loads(self.getProperty('history').getValue())
            whole_history[self.current_user] = self.history_map
            self.getProperty('history').setValue(pickle.dumps(whole_history))
                
        self.history.insertRows(self.history.numRows())
        logging.debug('numRows() is %d', self.history.numRows())
        rows = self.history.numRows() - 1

        self.history.setText(rows, 0, QString(time_string))
        self.history.setText(rows, 1, QString(str(image_prefix)))
        self.history.setText(rows, 2, QString(str(run_number)))

        logging.debug('numRows() is %d', self.history.numRows())

        self.load_file(html_path)
Example #23
0
def start_server(langcodes,
                 host,
                 port,
                 use_reloader,
                 verbose=False,
                 logformat='[%(asctime)-15s][%(levelname)s][%(module)s][%(pathname)s:%(lineno)d]: %(message)s',
                 use_features=False,
                 debug=False):
    """
    Start a SemanticizerFlaskServer with all processors loaded into the
    pipeline.

    @param verbose: Set whether the Flask server should be verbose
    @param logformat: The logformat used by the Flask server
    """
    # Initialize the pipeline
    pipeline = procpipeline.build(langcodes, use_features, debug=debug)
    # Create the FlaskServer
    logging.getLogger().info("Setting up server")
    server = Server()
    server.set_debug(verbose, logformat)
    # Setup all available routes / namespaces for the HTTP server
    server.setup_all_routes(pipeline, langcodes)
    logging.getLogger().info("Done setting up server, now starting...")
    # And finally, start the thing
    server.start(host, port, use_reloader)
def addFeaturesAuthorFreqInReview( ctx, outFeaturesMaps):
    logging.getLogger("Features").info( "author frequency" )
    multiCommentKey="M-C"
    isAuthorKey="I-A"
    reviewStarsKey="R-S"
    reviewStarsDeviationKey = "R-SD"
    for itrComment, (reviewId,author) in enumerate( ctx.mAuthorReviewPerComment ):
        if(ctx.mAuthorFreqPerReview[reviewId][author]>5):
            outFeaturesMaps[ itrComment ][multiCommentKey]=2
        elif(ctx.mAuthorFreqPerReview[reviewId][author]>1):
            outFeaturesMaps[ itrComment ][multiCommentKey]=1
        else:
            outFeaturesMaps[ itrComment ][multiCommentKey]=0
        if(ctx.mReviewAuthorMap[reviewId]==author):
            outFeaturesMaps[ itrComment ][isAuthorKey]=1
        else:
            outFeaturesMaps[ itrComment ][isAuthorKey]=0
            
        outFeaturesMaps[ itrComment ][reviewStarsKey]=float(ctx.mReviewStarMap[reviewId])
        
        if(ctx.mReviewStarMap[reviewId]>ctx.productAvgStars):
            outFeaturesMaps[ itrComment ][reviewStarsDeviationKey]=-1
        elif(ctx.mReviewStarMap[reviewId]>ctx.productAvgStars):
            outFeaturesMaps[ itrComment ][reviewStarsDeviationKey]=1
        else:
            outFeaturesMaps[ itrComment ][reviewStarsDeviationKey]=0
Example #25
0
    def do_bugin(self, args):
        """bugin [ <logger> ]  - add a console logging handler to a logger"""
        args = args.split()
        if _debug: ConsoleCmd._debug("do_bugin %r", args)

        # get the logger name and logger
        if args:
            loggerName = args[0]
            if loggerName in logging.Logger.manager.loggerDict:
                logger = logging.getLogger(loggerName)
            else:
                logger = None
        else:
            loggerName = '__root__'
            logger = logging.getLogger()

        # add a logging handler
        if not logger:
            self.stdout.write("not a valid logger name\n")
        elif loggerName in self.handlers:
            self.stdout.write("%s already has a handler\n" % loggerName)
        else:
            handler = ConsoleLogHandler(logger)
            self.handlers[loggerName] = handler
            self.stdout.write("handler to %s added\n" % loggerName)
        self.stdout.write("\n")
Example #26
0
def main():
  logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
  logging.getLogger().addFilter(ColoredLoggingFilter())

  if len(sys.argv) < 2:
    ShowHelpAndExit()

  # Move to the Mozc root source directory only once since os.chdir
  # affects functions in os.path and that causes troublesome errors.
  os.chdir(MOZC_ROOT)

  command = sys.argv[1]
  args = sys.argv[2:]

  if command == 'gyp':
    (cmd_opts, cmd_args) = ParseGypOptions(args)
    GypMain(cmd_opts, cmd_args)
  elif command == 'build':
    (cmd_opts, cmd_args) = ParseBuildOptions(args)
    BuildMain(cmd_opts, cmd_args)
  elif command == 'runtests':
    (cmd_opts, cmd_args) = ParseRunTestsOptions(args)
    RunTestsMain(cmd_opts, cmd_args)
  elif command == 'clean':
    (cmd_opts, cmd_args) = ParseCleanOptions(args)
    CleanMain(cmd_opts, cmd_args)
  else:
    logging.error('Unknown command: %s', command)
    ShowHelpAndExit()
Example #27
0
 def init_logger(self, pth):
     handler = logging.FileHandler(os.path.join(pth, "log.txt"), 'a')
     formatter = logging.Formatter(
         "%(levelname)-8s | %(asctime)s | %(name)-10s | %(message)s")
     handler.setFormatter(formatter)
     handler.setLevel(logging.DEBUG)
     logging.getLogger("").addHandler(handler)
Example #28
0
    def setup(self, verbose_level, error_level, logdir):
        self.presetup()
        logger_dnf = logging.getLogger("dnf")

        # setup file logger
        logfile = os.path.join(logdir, dnf.const.LOG)
        handler = _create_filehandler(logfile)
        logger_dnf.addHandler(handler)
        # temporarily turn off stdout/stderr handlers:
        self.stdout_handler.setLevel(SUPERCRITICAL)
        self.stderr_handler.setLevel(SUPERCRITICAL)
        # put the marker in the file now:
        _paint_mark(logger_dnf)
        # bring std handlers to the preferred level
        self.stdout_handler.setLevel(verbose_level)
        self.stderr_handler.setLevel(error_level)

        # setup Python warnings
        logging.captureWarnings(True)
        logger_warnings = logging.getLogger("py.warnings")
        logger_warnings.addHandler(self.stderr_handler)
        logger_warnings.addHandler(handler)

        # setup RPM callbacks logger
        logger_rpm = logging.getLogger("dnf.rpm")
        logger_rpm.propagate = False
        logger_rpm.setLevel(SUBDEBUG)
        logfile = os.path.join(logdir, dnf.const.LOG_RPM)
        handler = _create_filehandler(logfile)
        logger_rpm.addHandler(handler)
        _paint_mark(logger_rpm)
Example #29
0
    def readfp(self, fp, platform):
        """The readfp method reads configuration data from a file or file-like
        object for a specific platform.
        """
        parser = ConfigParser.SafeConfigParser(self._config_file_defaults)
        parser.readfp(fp)

        if not parser.has_section(platform):
            logger = logging.getLogger(burton.logger_name)
            logger.error("Unable to parse config file")
            logger.error("Platform " + str(platform) + " does not exist")
            return False

        sections = [ parser.defaults(), dict(parser.items(platform)) ]

        for section in sections:
            for key in section:
                if key not in self._config_file_defaults:
                    logger = logging.getLogger(burton.logger_name)
                    logger.error("Unable to parse config file")
                    logger.error(key + " is not a valid option")
                    return False
                else:
                    value = section[key]
                    if value is not None and value != "None" and len(value) > 0:
                        self.set(key, self._parse_value(value))

        return self._validate_config_file(self._config_file_defaults)
Example #30
0
def getLogger(name, goodLooking=True, logLevel=logging.INFO):
 
  logger = logging.getLogger(name)
  logger.setLevel(logLevel)
  
  ch = logging.StreamHandler()
  ch.setLevel(logLevel)

  if goodLooking == True:
    formatter = PsiopicFormatter()
  else:
    formatter = logging.Formatter()

  ch.setFormatter(formatter)

  logger.addHandler(ch)

  # setup requests lib logger if debug is on
  if logLevel == logging.DEBUG: 
    requests_logger = logging.getLogger('requests.packages.urllib3')
    requests_logger.setLevel(logging.DEBUG)
    requests_logger.propagate = True
    requests_logger.addHandler(ch)


  return logger