Example #1
1
 def isLocalProxy(self):
     # I'm a server if:
     # order_in_proxy is set (ie I have chance to become a server)
     # fk_use_as_proxy is equal to my id (ie the proxy server is me)
     result = self.order_in_proxy != None
     logging.getLogger().debug("isLocalProxy(#%s): %s" % (self.getId(), result))
     return result
def main():
    program_name = sys.argv[2]
    logging.basicConfig(
        level=logging.DEBUG,
        format="%(asctime)s [%(levelname)s] %(message)s",
        filename="process-dtrace.log",
        filemode="w",
    )
    logging.debug("Starting work with %s" % program_name)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    logging.getLogger("").addHandler(console)

    program = Program()
    program.debug = True
    for i in range(int(sys.argv[1]) + 1):
        program.read_program_points("%s.%d" % (program_name, i))

    # program.read_program_invariants("%s.invariants" % program_name)
    for i in range(int(sys.argv[1]) + 1):
        program.read_program_executions("%s.%d" % (program_name, i), True)

    for i in range(int(sys.argv[1]) + 1):
        program.read_program_executions("%s.%d" % (program_name, i))

    logging.debug("Finished work with %s" % program_name)
Example #3
1
 def create_sitemap_index(self, sitemaps, cur_time):
     """
     Creates a sitemap index file from the sitemaps passed in
     @param sitemaps: a list of strings of sitemap names
     """
     if os.path.isfile(self.path):
         logging.getLogger("everything").info("%s already exists" % self.path)
     else:
         root = etree.Element("sitemapindex", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
         if not sitemaps:
             pass
         else:
             for i in sitemaps:
                 sm = etree.SubElement(root, "sitemap")
                 loc = etree.SubElement(sm, "loc")
                 loc.text = "/".join(["http:/", self.host, i])
                 lastmod = etree.SubElement(sm, "lastmod")
                 lastmod.text = cur_time
             try:
                 with open(self.path, "w") as f:
                     f.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
                     f.write(etree.tostring(root))
                     f.close()
             except IOError:
                 logging.getLogger("error").error("Error creating %s" % self.path)
Example #4
1
 def unlink(self, cr, uid, ids, context=None):
     stor = self.pool.get("document.storage")
     unres = []
     # We have to do the unlink in 2 stages: prepare a list of actual
     # files to be unlinked, update the db (safer to do first, can be
     # rolled back) and then unlink the files. The list wouldn't exist
     # after we discard the objects
     ids = self.search(cr, uid, [("id", "in", ids)])
     for f in self.browse(cr, uid, ids, context=context):
         # TODO: update the node cache
         par = f.parent_id
         storage_id = None
         while par:
             if par.storage_id:
                 storage_id = par.storage_id
                 break
             par = par.parent_id
         # assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
         if storage_id:
             r = stor.prepare_unlink(cr, uid, storage_id, f)
             if r:
                 unres.append(r)
         else:
             logging.getLogger("document").warning("Unlinking attachment #%s %s that has no storage", f.id, f.name)
     res = super(document_file, self).unlink(cr, uid, ids, context)
     stor.do_unlink(cr, uid, unres)
     return res
 def test_config_8_ok(self):
     self.apply_config(self.config1)
     logger = logging.getLogger("compiler.parser")
     # Both will output a message
     logger.info(self.next_message())
     logger.error(self.next_message())
     h = logger.handlers[0]
     self.assertTrue(h.matchall([dict(levelname="INFO", message="1"), dict(levelname="ERROR", message="2")]))
     self.apply_config(self.config8)
     logger = logging.getLogger("compiler.parser")
     self.assertFalse(logger.disabled)
     toplogger = logging.getLogger("compiler")
     # Both will output a message
     logger.info(self.next_message())
     logger.error(self.next_message())
     logger = logging.getLogger("compiler.lexer")
     # Both will output a message
     logger.info(self.next_message())
     logger.error(self.next_message())
     h = toplogger.handlers[0]
     self.assertTrue(
         h.matchall(
             [
                 dict(levelname="INFO", message="3"),
                 dict(levelname="ERROR", message="4"),
                 dict(levelname="INFO", message="5"),
                 dict(levelname="ERROR", message="6"),
             ]
         )
     )
Example #6
1
def main():
    console_handler = logging.StreamHandler(stream=sys.stderr)
    console_handler.setFormatter(logging.Formatter())
    console_handler.setLevel(logging.INFO)
    root_logger = logging.getLogger()
    root_logger.addHandler(console_handler)
    root_logger.setLevel(logging.DEBUG)

    # Disable requests logging
    logging.getLogger("requests").propagate = False

    try:
        command = TopLevelCommand()
        command.sys_dispatch()
    except KeyboardInterrupt:
        log.error("\nAborting.")
        sys.exit(1)
    except (UserError, NoSuchService, DependencyError) as e:
        log.error(e.msg)
        sys.exit(1)
    except NoSuchCommand as e:
        log.error("No such command: %s", e.command)
        log.error("")
        log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
        sys.exit(1)
    except APIError as e:
        log.error(e.explanation)
        sys.exit(1)
Example #7
0
def run_appcfg(argv):
    # We don't really want to use that one though, it just executes this one
    from google.appengine.tools import appcfg

    # Reset the logging level to WARN as appcfg will spew tons of logs on INFO
    logging.getLogger().setLevel(logging.WARN)

    new_args = argv[:]
    new_args[1] = "update"
    new_args.append(PROJECT_DIR)
    syncdb = True
    if "--nosyncdb" in new_args:
        syncdb = False
        new_args.remove("--nosyncdb")
    appcfg.main(new_args)

    if syncdb:
        print "Running syncdb."
        # Wait a little bit for deployment to finish
        for countdown in range(9, 0, -1):
            sys.stdout.write("%s\r" % countdown)
            time.sleep(1)
        from django.db import connections

        for connection in connections.all():
            if hasattr(connection, "setup_remote"):
                connection.setup_remote()
        call_command("syncdb", remote=True, interactive=True)

    if getattr(settings, "ENABLE_PROFILER", False):
        print "--------------------------\n" "WARNING: PROFILER ENABLED!\n" "--------------------------"
Example #8
0
def main():
    parser = OptionParser()
    parser.add_option("-d", "--debugraw", action="store_true", default=False, help="print raw adc values")
    parser.add_option("-c", "--cal", action="store_true", default=False, help="perform calibration")
    parser.add_option("-f", "--calfile", action="store", default="cal.dat", help="calibration file to use")
    parser.add_option("-S", "--session", action="store", default=None, help="screen session to receive keypresses")
    parser.add_option(
        "-v", "--verbose", action="count", default=0, dest="verbosity", help="debugging verbosity v:info vv:debug"
    )
    (opts, args) = parser.parse_args()

    if opts.verbosity > 0:
        level = logging.DEBUG if opts.verbosity > 1 else logging.INFO
        logging.getLogger().setLevel(level)

    ADC_CHANNEL = 0
    # ADC channel to read
    SPICLK = 18  # RPI expansion bus GPIO pins to use for SPI bus
    SPIMISO = 23
    SPIMOSI = 24
    SPICS = 25

    with ADC_SPI(SPICLK, SPIMOSI, SPIMISO, SPICS) as adc:
        if opts.debugraw:
            debug_raw(adc, ADC_CHANNEL)
        elif opts.cal:
            calibrate(adc, ADC_CHANNEL, opts.calfile)
        else:
            typewriter(adc, ADC_CHANNEL, opts.calfile, opts.session)
Example #9
0
    def put_formatted(
        self,
        text,
        fg_color=None,
        bg_color=None,
        font=None,
        size=None,
        bold=False,
        italic=False,
        underline=False,
        strike=False,
    ):
        """insert text at the current position with the style defined inside
        text"""
        try:
            result = e3.common.XmlParser.XmlParser(
                #'<span>' + text.replace('\n', '') + '</span>').result
                "<span>"
                + text
                + "</span>"
            ).result
        except xml.parsers.expat.ExpatError:
            logging.getLogger("gtkui.RichWidget").debug("cant parse '%s'" % (text,))
            return

        dct = e3.common.XmlParser.DictObj(result)
        self._put_formatted(dct, fg_color, bg_color, font, size, bold, italic, underline, strike)
def _sigterm_handler(signum, frame):  # pragma no cover
    """ Try to go down gracefully. """
    logging.getLogger().info("sigterm_handler invoked")
    print "sigterm_handler invoked"
    sys.stdout.flush()
    _cleanup()
    sys.exit(1)
Example #11
0
def main():
    # set logging level
    if is_dev_env():
        logging.getLogger().setLevel(logging.DEBUG)

    application = webapp.WSGIApplication(
        [
            (r"/", MainHandler),
            (r"/new", NewHandler),
            (r"/more", MoreHandler),
            (r"/create", CreateHandler),
            (r"/add-comment", CommentHandler),
            (r"/fetch-comments", FetchCommentsHandler),
            (r"/note/([^/]+)", NoteHandler),
            (r"/permlink/([^/]+)", PermLinkHandler),
            (r"/edit/([\d]+)", EditHandler),
            (r"/delete", DeleteHandler),
            (r"/feed", FeedHandler),
            (r"/faq", FaqHandler),
            (r"/.*", NotFoundPageHandler),
        ],
        debug=is_dev_env(),
    )

    run_wsgi_app(application)
Example #12
0
File: cli.py Project: feld/SickRage
def subliminal(ctx, addic7ed, legendastv, opensubtitles, cache_dir, debug):
    """Subtitles, faster than your thoughts."""
    # create cache directory
    try:
        os.makedirs(cache_dir)
    except OSError:
        if not os.path.isdir(cache_dir):
            raise

    # configure cache
    region.configure(
        "dogpile.cache.dbm",
        expiration_time=timedelta(days=30),
        arguments={"filename": os.path.join(cache_dir, cache_file), "lock_factory": MutexLock},
    )

    # configure logging
    if debug:
        handler = logging.StreamHandler()
        handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
        logging.getLogger("subliminal").addHandler(handler)
        logging.getLogger("subliminal").setLevel(logging.DEBUG)

    # provider configs
    ctx.obj = {"provider_configs": {}}
    if addic7ed:
        ctx.obj["provider_configs"]["addic7ed"] = {"username": addic7ed[0], "password": addic7ed[1]}
    if legendastv:
        ctx.obj["provider_configs"]["legendastv"] = {"username": legendastv[0], "password": legendastv[1]}
    if opensubtitles:
        ctx.obj["provider_configs"]["opensubtitles"] = {"username": opensubtitles[0], "password": opensubtitles[1]}
Example #13
0
    def test_unknown_error(self):
        music_map_log = logging.getLogger("music_map")
        music_map_handler = logging.FileHandler("music_map.log", mode="w")
        music_map_log.addHandler(music_map_handler)

        unknown_error_log = logging.getLogger("unknown_error")
        unknown_error_handler = logging.FileHandler("unknown_error.log", mode="w")
        unknown_error_log.addHandler(unknown_error_handler)

        song_path = "./Elliott_Smith/Elliott_Smith/06_Elliott Smith_Coming up Roses.mp3"
        self.assertRaises(UnparseableSongError, Song, song_path, ["."])

        log = open("unknown_error.log")
        for line in log:
            # It shoves a bunch of stacktrsce info. We're just interested in the unknown error part.
            if line.startswith("Unknown"):
                self.assertEquals(
                    "Unknown error on './Elliott_Smith/Elliott_Smith/06_Elliott Smith_Coming up Roses.mp3'. Continuing.",
                    line.strip(),
                )
        log.close()

        test_utils.close_all_handlers("music_map")
        test_utils.close_all_handlers("unknown_error")
        os.remove("music_map.log")
        os.remove("unknown_error.log")
Example #14
0
def main():
    # Choose from: CRITICAL, ERROR, WARNING, INFO, DEBUG
    logging.getLogger().setLevel(logging.DEBUG)

    form = cgi.FieldStorage()
    if form.has_key("q"):
        # Client sending a sync.  Requesting text return.
        print "Content-Type: text/plain"
        print ""
        print parseRequest(form["q"].value)
    elif form.has_key("p"):
        # Client sending a sync.  Requesting JS return.
        print "Content-Type: text/javascript"
        print ""
        value = parseRequest(form["p"].value)
        value = value.replace("\\", "\\\\").replace('"', '\\"')
        value = value.replace("\n", "\\n").replace("\r", "\\r")
        print 'mobwrite.syncRun2_("%s");' % value
    elif form.has_key("clean"):
        # External cron job to clean the database.
        print "Content-Type: text/plain"
        print ""
        cleanup()
    else:
        # Unknown request.
        print "Content-Type: text/plain"
        print ""

    logging.debug("Disconnecting.")
Example #15
0
    def __init__(self):
        from django.conf import settings
        from django.core.exceptions import ImproperlyConfigured
        import logging

        if hasattr(settings, "LOGGING"):
            for module, properties in settings.LOGGING.items():
                logger = logging.getLogger(module)

                if "level" in properties:
                    logger.setLevel(properties["level"])
                elif hasattr(settings, "GLOBAL_LOG_LEVEL") and "handlers" not in properties:
                    logger.setLevel(settings.GLOBAL_LOG_LEVEL)
                else:
                    raise ImproperlyConfigured(
                        "A logger in settings.LOGGING doesn't have its log level set. "
                        + "Either set a level on that logger, or set GLOBAL_LOG_LEVEL."
                    )

                handlers = []
                if "handler" in properties:
                    handlers = [properties["handler"]]
                elif "handlers" in properties:
                    handlers = properties["handlers"]
                elif hasattr(settings, "GLOBAL_LOG_HANDLERS"):
                    handlers = settings.GLOBAL_LOG_HANDLERS

                self.add_handlers(logger, handlers)

        elif hasattr(settings, "GLOBAL_LOG_LEVEL") and hasattr(settings, "GLOBAL_LOG_HANDLERS"):
            logger = logging.getLogger("")
            logger.setLevel(settings.GLOBAL_LOG_LEVEL)
            handlers = settings.GLOBAL_LOG_HANDLERS

            self.add_handlers(logger, handlers)
Example #16
0
def init_logging(app="tyggbot"):
    class LogFilter(logging.Filter):
        def __init__(self, level):
            self.level = level

        def filter(self, record):
            return record.levelno < self.level

    # Uncomment the line below if you want full debug messages from everything!
    # This includes all debug messages from the IRC libraries, which can be useful for debugging.
    # logging.basicConfig(level=logging.DEBUG)

    log = logging.getLogger(app)
    log.setLevel(logging.DEBUG)

    colored_formatter = ColoredFormatter("[%(asctime)s] [%(levelname)-20s] %(message)s")
    log_filter = LogFilter(logging.WARNING)

    logger_stdout = logging.StreamHandler(sys.stdout)
    logger_stdout.setFormatter(colored_formatter)
    logger_stdout.addFilter(log_filter)
    logger_stdout.setLevel(logging.DEBUG)

    logger_stderr = logging.StreamHandler(sys.stderr)
    logger_stderr.setFormatter(colored_formatter)
    logger_stderr.setLevel(logging.WARNING)

    logging.getLogger().addHandler(logger_stdout)
    logging.getLogger().addHandler(logger_stderr)

    return log
Example #17
0
 def set_all_publisher_uris(self, uris_dict):
     """
     @type uris_dict: C{dict} of C{str} : C{str}
     @param uris_dict: A Dictionary of uris for the published data 
     for *all* Publishers in {name : uri} 
     """
     logging.getLogger(__name__).debug("Urls from all publishers: %s" % uris_dict)
def init_logging(file_enabled=False, stdout_enabled=True):
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    logging.getLogger("urllib3.connectionpool").setLevel(logging.WARN)
    logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(logging.WARN)

    log_formatter = logging.Formatter(
        fmt="%(asctime)s | %(name)s | %(processName)s | %(levelname)s | %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p"
    )

    if file_enabled:
        log_file_name = "./UsergridIterator.log"

        rotating_file = logging.handlers.RotatingFileHandler(
            filename=log_file_name, mode="a", maxBytes=204857600, backupCount=10
        )
        rotating_file.setFormatter(log_formatter)
        rotating_file.setLevel(logging.INFO)

        root_logger.addHandler(rotating_file)

    if stdout_enabled:
        stdout_logger = logging.StreamHandler(sys.stdout)
        stdout_logger.setFormatter(log_formatter)
        stdout_logger.setLevel(logging.INFO)

        root_logger.addHandler(stdout_logger)
Example #19
0
    def user_group_changed(self, value):
        if self.saved_group:
            msg = "User group changed, press set to apply change"
            logging.getLogger("user_level_log").warning(msg)
            self.user_group_ledit.setPaletteBackgroundColor(widget_colors.LIGHT_RED)

        self.saved_group = False
Example #20
0
def sub_account_create(loginName, name, password, email):
    cursor = get_cursor()
    uuid = ""
    try:
        uuid = keystone.user_create(name, password, email)
        if uuid == "ERROR":
            return False
        else:
            sql = """insert into t_account values (null,'%s','%s',MD5(MD5('%s')),'%s',0,'%s',null,'%s',null) """ % (
                uuid,
                name,
                password,
                email,
                loginName,
                time.strftime("%Y-%m-%d %X", time.localtime()),
            )
            cursor.execute(sql)
    except:
        logging.getLogger("operate").log(
            30, "create sub-account failed %s %s %s %s ." % (loginName, name, password, email)
        )
        keystone.user_delete(uuid)
        transaction.rollback()
        flag = False
    else:
        logging.getLogger("operate").log(
            20, "create sub-account successed %s %s %s %s." % (loginName, name, password, email)
        )
        transaction.commit()
        flag = True
    finally:
        pass
        # print "Arrive finally"
    return flag
Example #21
0
 def dispatch(self, controller, method):
     params = dict(self.httprequest.args)
     params.update(self.httprequest.form)
     params.update(self.httprequest.files)
     self.init(params)
     akw = {}
     for key, value in self.httprequest.args.iteritems():
         if isinstance(value, basestring) and len(value) < 1024:
             akw[key] = value
         else:
             akw[key] = type(value)
     _logger.debug("%s --> %s.%s %r", self.httprequest.method, controller.__class__.__name__, method.__name__, akw)
     try:
         r = method(controller, self, **self.params)
     except Exception:
         logging.getLogger(__name__ + ".HttpRequest.dispatch").exception(
             "An error occurred while handling a json request"
         )
         r = werkzeug.exceptions.InternalServerError(
             cgi.escape(
                 simplejson.dumps(
                     {
                         "code": 300,
                         "message": "OpenERP WebClient Error",
                         "data": {"type": "client_exception", "debug": "Client %s" % traceback.format_exc()},
                     }
                 )
             )
         )
     if self.debug or 1:
         if isinstance(r, (werkzeug.wrappers.BaseResponse, werkzeug.exceptions.HTTPException)):
             _logger.debug("<-- %s", r)
         else:
             _logger.debug("<-- size: %s", len(r))
     return r
Example #22
0
def data_processing_launch(
    dp_name, tenant, user, algoFile, dataFile, output, serverName, serverCount, serverConfig, serverMeta, hadoopMeta
):
    sql = """INSERT INTO t_dataProcessing VALUES (NULL,'%s',
        (SELECT t_tenant.id FROM t_tenant where t_tenant.name = '%s'),
        (SELECT t_account.id FROM t_account where t_account.loginName = '%s'),
        '%s','%s','%s','%s',%d,'%s','%s','%s',
        'building','%s',NULL)
        """ % (
        dp_name,
        tenant,
        user,
        algoFile,
        dataFile,
        output,
        serverName,
        serverCount,
        serverConfig,
        serverMeta,
        hadoopMeta,
        time.strftime("%Y-%m-%d %X", time.localtime()),
    )
    # print sql
    cursor = get_cursor()
    if cursor.execute(sql) == 1:
        logging.getLogger("operate").log(20, "dataprocessing launch %s successed." % (dp_name))
        flag = True
    else:
        logging.getLogger("operate").log(30, "dataprocessing launch %s failed." % (dp_name))
        flag = False
    return flag
Example #23
0
def _main():
    parser = optparse.OptionParser(__doc__)
    parser.add_option(
        "-p", "--prc_filepath", dest="prc_filepath", help="Download filepath for price volume data", default=None
    )
    parser.add_option(
        "-d", "--dvd_filepath", dest="dvd_filepath", help="Download filepath for dividend data", default=None
    )
    parser.add_option(
        "-s", "--splt_filepath", dest="splt_filepath", help="Download filepath for split data", default=None
    )
    parser.add_option("-q", "--quiet", dest="quiet", help="Not verbose", action="store_true", default=False)
    options, args = parser.parse_args()
    num_args = len(args)
    if num_args == 1:
        ticker = args[0]
        # start_date = '19000101'
        start_date = "20000101"
        end_date = datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d")
    elif num_args == 3:
        ticker, start_date, end_date = tuple(args)
    else:
        parser.error("Failed to parse args; see usage: " "num_args={0}".format(num_args))
    if options.quiet:
        logging.getLogger("").setLevel(logging.CRITICAL)
    if options.prc_filepath is None and options.dvd_filepath is None and options.splt_filepath is None:
        # Default: all of the following are None, so download all
        # data
        stk = StkDataDownload()
    else:
        stk = StkDataDownload(options.prc_filepath, options.dvd_filepath, options.splt_filepath)
    stk.run(ticker, start_date, end_date)
Example #24
0
    def _init(self, force=False):

        if self._feature("debug"):
            logging.getLogger().setLevel(logging.DEBUG)

        # Initialize target
        self._init_target(force)

        # Initialize target Topology for behavior analysis
        CLUSTERS = []

        # Build topology for a big.LITTLE systems
        if self.target.big_core and (self.target.abi == "arm64" or self.target.abi == "armeabi"):
            # Populate cluster for a big.LITTLE platform
            if self.target.big_core:
                # Load cluster of LITTLE cores
                CLUSTERS.append([i for i, t in enumerate(self.target.core_names) if t == self.target.little_core])
                # Load cluster of big cores
                CLUSTERS.append([i for i, t in enumerate(self.target.core_names) if t == self.target.big_core])
        # Build topology for an SMP systems
        elif not self.target.big_core or self.target.abi == "x86_64":
            for c in set(self.target.core_clusters):
                CLUSTERS.append([i for i, v in enumerate(self.target.core_clusters) if v == c])
        self.topology = Topology(clusters=CLUSTERS)
        logging.info(r"%14s - Topology:", "Target")
        logging.info(r"%14s -    %s", "Target", CLUSTERS)

        # Initialize the platform descriptor
        self._init_platform()
Example #25
0
    def prepare_detector_header(self, take_dark, start, osc_range, exptime, npass, number_of_images, comment):

        # Setting MXSETTINGS for the cbf image headers
        ax, bx = self.bl_config.beam_ax, self.bl_config.beam_bx
        ay, by = self.bl_config.beam_ay, self.bl_config.beam_by

        dist = self.bl_control.detector_distance.getPosition()
        wavlen = self.bl_control.energy.getCurrentWavelength()
        kappa_angle = self.kappa_hwo.getPosition()

        _settings = [
            ["Wavelength %.5f", wavlen],
            ["Detector_distance %.4f", dist / 1000.0],
            ["Beam_x %.2f", ax * dist + bx],
            ["Beam_y %.2f", ay * dist + by],
            ["Alpha %.2f", 49.64],
            ["Start_angle %.4f", start],
            ["Angle_increment %.4f", osc_range],
            ["Oscillation_axis %s", self.oscaxis],
            ["Detector_2theta %.4f", 0.0],
            ["Polarization %.3f", 0.990],
            ["Kappa %.4f", kappa_angle],
        ]

        if self.oscaxis == "Phi":
            _settings.append(["Chi %.4f", self.omega_hwo.getPosition()])
            _settings.append(["Phi %.4f", start])
        elif self.oscaxis == "Omega":
            _settings.append(["Phi %.4f", self.phi_hwo.getPosition()])
            _settings.append(["Chi %.4f", start])

        for _setting in _settings:
            _str_set = _setting[0] % _setting[1]
            logging.getLogger().info("MxSettings: " + _str_set)
            self.pilatusServer.SetMxSettings(_str_set)
Example #26
0
    def __init__(self, parent=None):

        super(start, self).__init__(parent)

        self.rootlogger = logging.getLogger("")
        self.logHandler = handlers.RotatingFileHandler(
            config["global"]["logpath"] + "replayServer.log", backupCount=15, maxBytes=524288
        )
        self.logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(name)-20s %(message)s")
        self.logHandler.setFormatter(self.logFormatter)
        self.rootlogger.addHandler(self.logHandler)
        self.rootlogger.setLevel(eval("logging." + config["replayServer"]["loglevel"]))
        self.logger = logging.getLogger(__name__)

        self.db = QtSql.QSqlDatabase.addDatabase("QMYSQL")
        self.db.setHostName(DB_SERVER)
        self.db.setPort(DB_PORT)

        self.db.setDatabaseName(DB_TABLE)
        self.db.setUserName(DB_LOGIN)
        self.db.setPassword(DB_PASSWORD)

        if not self.db.open():
            self.logger.error(self.db.lastError().text())

        self.updater = replayServer(self)
        if not self.updater.listen(QtNetwork.QHostAddress.Any, 11002):
            return
        else:
            self.logger.info(
                "starting the replay server on  %s:%i"
                % (self.updater.serverAddress().toString(), self.updater.serverPort())
            )
Example #27
0
def add_console_logger(level=logging.INFO, logger=""):
    # also redirect logging to stderr
    console = logging.StreamHandler()
    console.setLevel(level)
    formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
    console.setFormatter(formatter)
    logging.getLogger(logger).addHandler(console)
Example #28
0
    def handle(self, record):
        if not self.logkey:
            clsname = self.__class__.__name__
            logging.getLogger().warn("{} called without a logkey.  Messages are ignored.".format(clsname))
            return False

        return super(SinklogHandler, self).handle(record)
Example #29
0
def logSetup(filename, log_size, daemon):
    logger = logging.getLogger("Golden NativeWeb")
    logger.setLevel(-1000)

    handler = logging.handlers.RotatingFileHandler(filename, maxBytes=(log_size * (1 << 16)), backupCount=5)
    fmt = logging.Formatter("[%(asctime)-12s.%(msecs)03d] " "%(levelname)-8s" " %(message)s", "%Y-%m-%d %H:%M:%S")
    handler.setFormatter(fmt)

    console = logging.StreamHandler()
    console.setLevel(-1000)
    logger.addHandler(console)

    logger.addHandler(handler)

    accesslog = logging.getLogger("tornado.access")
    accesslog.addHandler(handler)
    # accesslog.addHandler(console)

    generallog = logging.getLogger("tornado.general")
    generallog.addHandler(handler)

    applicationlog = logging.getLogger("tornado.application")
    applicationlog.addHandler(handler)

    return logger
Example #30
0
 def isProxyClient(self):
     # I'm a client if:
     # fk_use_as_proxy is set (ie I found a proxy server)
     # fk_use_as_proxy is not equal to my id (ie the proxy server is not me)
     result = self.fk_use_as_proxy != None and self.fk_use_as_proxy != self.id
     logging.getLogger().debug("isProxyClient(#%s): %s" % (self.getId(), result))
     return result