예제 #1
0
    def setup(self, cfg):
        self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
        self.error_log.setLevel(self.loglevel)
        self.access_log.setLevel(logging.INFO)

        # set gunicorn.error handler
        if self.cfg.capture_output and cfg.errorlog != "-":
            for stream in sys.stdout, sys.stderr:
                stream.flush()

            self.logfile = open(cfg.errorlog, "a+")
            os.dup2(self.logfile.fileno(), sys.stdout.fileno())
            os.dup2(self.logfile.fileno(), sys.stderr.fileno())

        self._set_handler(self.error_log, cfg.errorlog, logging.Formatter(self.error_fmt, self.datefmt))

        # set gunicorn.access handler
        if cfg.accesslog is not None:
            self._set_handler(self.access_log, cfg.accesslog, fmt=logging.Formatter(self.access_fmt))

        # set syslog handler
        if cfg.syslog:
            self._set_syslog_handler(self.error_log, cfg, self.syslog_fmt, "error")
            self._set_syslog_handler(self.access_log, cfg, self.syslog_fmt, "access")

        if cfg.logconfig:
            if os.path.exists(cfg.logconfig):
                defaults = CONFIG_DEFAULTS.copy()
                defaults["__file__"] = cfg.logconfig
                defaults["here"] = os.path.dirname(cfg.logconfig)
                fileConfig(cfg.logconfig, defaults=defaults, disable_existing_loggers=False)
            else:
                msg = "Error: log config '%s' not found"
                raise RuntimeError(msg % cfg.logconfig)
예제 #2
0
파일: app.py 프로젝트: sequoiar/tproxy
    def configure_logging(self):
        """\
        Set the log level and choose the destination for log output.
        """
        self.logger = logging.getLogger('tproxy')

        fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
        datefmt = r"%Y-%m-%d %H:%M:%S"
        if not self.cfg.logconfig:
            handlers = []
            if self.cfg.logfile != "-":
                handlers.append(logging.FileHandler(self.cfg.logfile))
            else:
                handlers.append(logging.StreamHandler())

            loglevel = self.LOG_LEVELS.get(self.cfg.loglevel.lower(), logging.INFO)
            self.logger.setLevel(loglevel)
            for h in handlers:
                h.setFormatter(logging.Formatter(fmt, datefmt))
                self.logger.addHandler(h)
        else:
            if os.path.exists(self.cfg.logconfig):
                fileConfig(self.cfg.logconfig)
            else:
                raise RuntimeError("Error: logfile '%s' not found." %
                        self.cfg.logconfig)
예제 #3
0
    def setup(self, cfg):
        if not cfg.logconfig:
            loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
            self.error_log.setLevel(loglevel)
            self.access_log.setLevel(logging.INFO)

            if cfg.errorlog != "-":
                # if an error log file is set redirect stdout & stderr to
                # this log file.
                sys.stdout = sys.stderr = LazyWriter(cfg.errorlog, 'a')

            # set gunicorn.error handler
            self._set_handler(self.error_log, cfg.errorlog,
                    logging.Formatter(self.error_fmt, self.datefmt))

            # set gunicorn.access handler
            if cfg.accesslog is not None:
                self._set_handler(self.access_log, cfg.accesslog,
                    fmt=logging.Formatter(self.access_fmt))

            # set syslog handler
            if cfg.syslog:
                self._set_syslog_handler(self.error_log, cfg, self.syslog_fmt)

        else:
            if os.path.exists(cfg.logconfig):
                fileConfig(cfg.logconfig, defaults=CONFIG_DEFAULTS,
                        disable_existing_loggers=False)
            else:
                raise RuntimeError("Error: log config '%s' not found" % cfg.logconfig)
예제 #4
0
def run():
    lconfig.fileConfig('ut/' + loggerConf)
    log = logging.getLogger('root')
    socektFile = '/tmp/temp.socket'
    try: 
        os.unlink(socektFile)
    except:
        pass
    server=guestServer(socektFile)
    toGuest=guestIF(socektFile, log)
    toGuest.start()
    server.waitConnection()
    print repr(server.getMessage())
    #send Powerup
    channel = 1
    mlen = wordSize * 4
    powerupMessage = [channel, protocolMtype.forward, mlen, guestMType.powerup]
    powerdownMessage = [channel, protocolMtype.forward, mlen, guestMType.powerdown]
    heartbeatMessage = [channel, protocolMtype.forward, mlen, guestMType.heartbeat]
    server.channel.send(packMessage(powerupMessage))
    for i in range(0, 10):
        server.channel.send(packMessage(heartbeatMessage))
        time.sleep(5)
    time.sleep(15)
    for i in range(0, 3):
        server.channel.send(packMessage(heartbeatMessage))
        time.sleep(5)
    server.channel.send(packMessage(powerdownMessage))
    toGuest.stop()
    toGuest.join()
예제 #5
0
    def setup_logging(self, level=None, configfile=None, **kwargs):
        """
        Support simple setup of logging subsystem.
        Ensures that the logging level is set.
        But note that the logging setup can only occur once.

        SETUP MODES:
          * :func:`logging.config.fileConfig()`, if ``configfile`` is provided.
          * :func:`logging.basicConfig()`, otherwise.

        .. code-block: python
            # -- FILE: features/environment.py
            def before_all(context):
                context.config.setup_logging()

        :param level:       Logging level of root logger.
                            If None, use :attr:`logging_level` value.
        :param configfile:  Configuration filename for fileConfig() setup.
        :param kwargs:      Passed to :func:`logging.basicConfig()`
        """
        if level is None:
            level = self.logging_level      # pylint: disable=no-member

        if configfile:
            from logging.config import fileConfig
            fileConfig(configfile)
        else:
            # pylint: disable=no-member
            format_ = kwargs.pop("format", self.logging_format)
            datefmt = kwargs.pop("datefmt", self.logging_datefmt)
            logging.basicConfig(format=format_, datefmt=datefmt, **kwargs)
        # -- ENSURE: Default log level is set
        #    (even if logging subsystem is already configured).
        logging.getLogger().setLevel(level)
예제 #6
0
파일: command.py 프로젝트: iwillau/Windmill
def command():
    if len(sys.argv) != 3:
        print 'Invalid arguments'
        print usage
        sys.exit(1)

    command = sys.argv[1]
    config_file = sys.argv[2]
    if not os.path.isfile(config_file):
        print 'Invalid Config File'
        print usage
        sys.exit(1)

    config_file = os.path.abspath(config_file)
    parser = SafeConfigParser()
    parser.read([config_file])
    fileConfig([config_file]) # TODO: This should check for loggin config
                              #       and if not present set to sane defaults
    if parser.has_option('generate:main', 'base'):
        base = parser.get_option('generate:main', 'base')
    else:
        base = os.path.dirname(config_file)

    if command == 'generate':
        generate(parser, base)
    elif command == 'serve':
        serve(parser, base)
    else:
        print 'Invalid Command'
        print usage
        sys.exit(1)
예제 #7
0
    def setup(self, cfg):
        self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
        self.error_log.setLevel(self.loglevel)
        self.access_log.setLevel(logging.INFO)

        # set gunicorn.error handler
        self._set_handler(self.error_log, cfg.errorlog,
                logging.Formatter(self.error_fmt, self.datefmt))

        # set gunicorn.access handler
        if cfg.accesslog is not None:
            self._set_handler(self.access_log, cfg.accesslog,
                fmt=logging.Formatter(self.access_fmt))

        # set syslog handler
        if cfg.syslog:
            self._set_syslog_handler(
                self.error_log, cfg, self.syslog_fmt, "error"
            )
            self._set_syslog_handler(
                self.access_log, cfg, self.syslog_fmt, "access"
            )

        if cfg.logconfig:
            if os.path.exists(cfg.logconfig):
                defaults = CONFIG_DEFAULTS.copy()
                defaults['__file__'] = cfg.logconfig
                defaults['here'] = os.path.dirname(cfg.logconfig)
                fileConfig(cfg.logconfig, defaults=defaults,
                           disable_existing_loggers=False)
            else:
                msg = "Error: log config '%s' not found"
                raise RuntimeError(msg % cfg.logconfig)
예제 #8
0
def setup_environment():
    parser = ArgumentParser(description="Frontera strategy worker.")
    parser.add_argument('--config', type=str, required=True,
                        help='Settings module name, should be accessible by import')
    parser.add_argument('--log-level', '-L', type=str, default='INFO',
                        help="Log level, for ex. DEBUG, INFO, WARN, ERROR, FATAL")
    parser.add_argument('--strategy', type=str,
                        help='Crawling strategy class path')
    parser.add_argument('--partition-id', type=int,
                        help="Instance partition id.")
    args = parser.parse_args()
    settings = Settings(module=args.config)
    strategy_classpath = args.strategy if args.strategy else settings.get('CRAWLING_STRATEGY')
    if not strategy_classpath:
        raise ValueError("Couldn't locate strategy class path. Please supply it either using command line option or "
                         "settings file.")
    strategy_class = load_object(strategy_classpath)

    partition_id = args.partition_id if args.partition_id is not None else settings.get('SCORING_PARTITION_ID')
    if partition_id >= settings.get('SPIDER_LOG_PARTITIONS') or partition_id < 0:
        raise ValueError("Partition id (%d) cannot be less than zero or more than SPIDER_LOG_PARTITIONS." %
                         partition_id)
    settings.set('SCORING_PARTITION_ID', partition_id)

    logging_config_path = settings.get("LOGGING_CONFIG")
    if logging_config_path and exists(logging_config_path):
        fileConfig(logging_config_path)
    else:
        logging.basicConfig(level=args.log_level)
        logger.setLevel(args.log_level)
        logger.addHandler(CONSOLE)
    return settings, strategy_class
예제 #9
0
파일: log.py 프로젝트: anukat2015/lingpy
def get_logger(config_dir=None, force_default_config=False, test=False):
    """Get a logger configured according to the lingpy log config file.

    Note: If no logging configuration file exists, it will be created.

    :param config_dir: Directory in which to look for/create the log config file.
    :param force_default_config: Configure the logger using the default config.
    :param test: Force reconfiguration of the logger.
    :return: A logger.
    """
    global _logger
    if _logger is None or force_default_config or test:
        _logger = logging.getLogger('lingpy')
        _logger.addFilter(CustomFilter())
        testing = len(sys.argv) and sys.argv[0].endswith('nosetests')
        if not (force_default_config or test) and testing:
            _logger.setLevel(logging.CRITICAL)
        else:
            cfg = Config('logging', default=LOGGING, config_dir=config_dir)
            remove = False
            if cfg.path.exists() and not force_default_config:
                fname = text_type(cfg.path)
            else:
                with NamedTemporaryFile(delete=False) as fp:
                    fp.write(LOGGING.encode('utf8'))
                    fname = fp.name
                    remove = True
            fileConfig(fname, disable_existing_loggers=False)
            if remove:
                os.remove(fname)
    return _logger
예제 #10
0
    def make_logging_handlers_and_tools(self, multiproc=False):
        """Creates logging handlers and redirects stdout."""

        log_stdout = self.log_stdout
        if sys.stdout is self._stdout_to_logger:
            # If we already redirected stdout we don't neet to redo it again
            log_stdout = False

        if self.log_config:
            if multiproc:
                proc_log_config = self._mp_config
            else:
                proc_log_config = self._sp_config

            if proc_log_config:
                if isinstance(proc_log_config, dict):
                    new_dict = self._handle_dict_config(proc_log_config)
                    dictConfig(new_dict)
                else:
                    parser = self._handle_config_parsing(proc_log_config)
                    memory_file = self._parser_to_string_io(parser)
                    fileConfig(memory_file, disable_existing_loggers=False)

        if log_stdout:
            #  Create a logging mock for stdout
            std_name, std_level = self.log_stdout

            stdout = StdoutToLogger(std_name, log_level=std_level)
            stdout.start()
            self._tools.append(stdout)
예제 #11
0
파일: log.py 프로젝트: pmcilwaine/hermes
def setup_logging(logfile=None):
    if not logfile:
        logfile = 'logging.ini'

    logging_dir = resource_filename('hermes_cms', 'data')
    _config = os.path.join(logging_dir, logfile)
    config.fileConfig(fname=_config)
예제 #12
0
파일: ivdsm.py 프로젝트: openSUSE/vdsm
def runVdsm(baseDir="/usr/share/vdsm/", configFilePath="/etc/vdsm/vdsm.conf", loggerConfigurationPath='/etc/vdsm/logger.conf'):
    """
    Starts a VDSM instance in a new thread and returns a tuple ``(ClientIF, Thread Running VDSM)``
    """
    if pwd.getpwuid(os.geteuid())[0] != "vdsm":
        raise Exception("You can't run vdsm with any user other then 'vdsm'.")

    sys.path.append(baseDir)

    from config import config
    from logging import config as lconfig
    import clientIF

    loggerConfFile = loggerConfigurationPath
    lconfig.fileConfig(loggerConfFile)
    log = logging.getLogger('vds')

    config.read(configFilePath)

    cif = clientIF.clientIF(log)

    t = threading.Thread(target = cif.serve)
    t.setDaemon(True)
    t.start()

    return (cif, t)
예제 #13
0
def init_logging(conf):
    try:
        fileConfig(conf)
    except Exception as e:
        click.echo('FATAL. Unable to load logging configuration from config. error={0}'.format(e))
        sys.exit(1)
    return logging.getLogger(__name__)
예제 #14
0
파일: catchall.py 프로젝트: pumazi/anomaly
def main(argv=None):
    """Main logic hit by the commandline invocation."""
    parser = argparse.ArgumentParser(__doc__)
    parser.add_argument('config', help="path to the configuration file")
    args = parser.parse_args(argv)
    if args.config is not None:
        fileConfig(args.config)
        logger.info("Logging initialized")

    config = ConfigParser()
    config.read(args.config)
    # Grab the database uri setting from the config.
    Session = create_database_session(config.get('anomaly', 'database-uri'))

    # Queue initialization
    connection = BlockingConnection()
    channel = connection.channel()
    # Declare the exchange and an unnamed queue.
    channel.exchange_declare(exchange=EXCHANGE, type='topic')
    declared_queue = channel.queue_declare(queue=QUEUE, durable=True,
                                           exclusive=False)
    channel.queue_bind(exchange=EXCHANGE, queue=QUEUE,
                       routing_key=BINDING_KEY)

    # Setup up our consumer callback
    channel.basic_consume(consumer, queue=QUEUE)

    try:
        channel.start_consuming()
    except KeyboardInterrupt:
        channel.stop_consuming()
    connection.close()
예제 #15
0
    def __init__(self):
        log_folder = "./logs/gerrit"
        if not os.path.exists(log_folder):
            os.makedirs(log_folder)

        fileConfig('gerrit/log_configs/comment_added.ini', disable_existing_loggers=False)
        self.logger = logging.getLogger('gerrit.comment.added')
예제 #16
0
파일: base.py 프로젝트: anarang/rhi-pool
    def _configure_logging(self):
        """Configure logging for Insights.

        It will load configuration from logging.conf if present
        in root directory, otherwise custom logging format is used by
        default

        """

        if self.configured:
            LOGGER.info("Already configured")
            return

        # All output should be made by the logging module, including warnings
        logging.captureWarnings(True)

        # Allow overriding logging config based on the presence of logging.conf
        # file on Insights's project root
        logging_conf_path = os.path.join(get_project_root(), 'logging.conf')
        if os.path.isfile(logging_conf_path):
            config.fileConfig(logging_conf_path)
        else:
            logging.basicConfig(
                format='%(levelname)s %(module)s:%(lineno)d: %(message)s'
            )
    def load_env(self, env):
        self.env = env

        if self.env == 'dev':
            self.base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
            self.conf_dir = os.path.join(self.base_dir, 'conf')
            self.log_conf = os.path.join(self.conf_dir, 'logging.conf')
        else:
            os_setting = None
            if 'linux' in sys.platform:
                os_setting = unix
            elif 'darwin' in sys.platform:
                os_setting = unix
            elif 'win32' in sys.platform:
                logging.error('windows unsupported')
                raise NotImplementedError
            else:
                logging.error('unknown system')
                raise NotImplementedError

            self.base_dir = os_setting['base_dir']
            self.conf_dir = os_setting['conf_dir']
            self.log_conf = os_setting['log_conf']

        logging_config.fileConfig(self.log_conf)
def main():
    start = time.time()
    argv = sys.argv[1].split()
    modelDir = argv[0]
    inDir = argv[1]
    outDir = argv[2]

    fileConfig(BIN_DIR + "/logging_config.ini")
    logger = logging.getLogger()

    logger.info("getting models from: {}".format(modelDir))

    for f in os.listdir(inDir):
        inFile = os.path.join(inDir, f)
	outf = f.split(".")[0] + ".txt"
	outFile = os.path.join(outDir, outf)
    	models = []
    	for model in os.listdir(modelDir):
            modelName = model.split(".")[0]
            models.append((modelName, os.path.join(modelDir, model)))

    	logger.info("predicting lines of text for: {}".format(inFile))
    	predict.lm_predict_xml(models, inFile, outFile, top_labels=2)
    	logger.info("PREDICTION COMPLETE.")
    	logger.info("TIME TAKEN: {}".format(time.time() - start))
예제 #19
0
파일: logger.py 프로젝트: vamdt/spider
 def getLogger():
     if Logger.logger is None:
         directory = os.path.dirname(os.path.realpath(__file__))
         conf_file = os.path.join(directory, "logging.conf")
         fileConfig(conf_file)
         Logger.logger = logging.getLogger("file")
     return Logger.logger
예제 #20
0
def command():
    if len(sys.argv) != 3:
        print 'Invalid arguments'
        print usage
        sys.exit(1)

    command = sys.argv[1]
    config_file = sys.argv[2]
    if not os.path.isfile(config_file):
        print 'Invalid Config File'
        print usage
        sys.exit(1)

    config_file = os.path.abspath(config_file)
    parser = SafeConfigParser({'source': 'source',
			       'static': 'static',
			       'data': 'data',
				})
    parser.read([config_file])
    fileConfig([config_file]) # TODO: This should check for loggin config
                              #       and if not present set to sane defaults
    if not parser.has_option('pagepress:main', 'base'):
        parser.set('pagepress:main', 'base', os.path.dirname(config_file))

    g = Generator(parser)

    if command == 'generate':
        g.update()
    elif command == 'serve':
        serve(g)
    else:
        print 'Invalid Command'
        print usage
        sys.exit(1)
예제 #21
0
 def create_logger(name, configuration_file='example_logger.conf'):
     try:
         fileConfig(configuration_file)
         return getLogger(name)
     except Exception as e:
         print ("Couldn't create logger using %s" % configuration_file, e)
         return LoggerFactory.__create_console_logger(name)
예제 #22
0
 def main(self, argv=None, setup_logging=False):
     self.parser.add_argument('--paste-global', action='append',
                              help="Define a variable passed to as "
                              "global_conf, in the form 'var=value'")
     options = self.parser.parse_args(argv)
     if not hasattr(options, "cmd"):
         self.parser.error("too few arguments")
     else:
         if options.paste_global:
             config_args = {
                 pair[0]: pair[1] if len(pair) > 1 else ''
                 for pair in (
                     [p.strip().replace('\\=', '=') for p in re.split(r'(?<!\\)=', arg, 1)]
                     for arg in options.paste_global
                     )
                 }
         else:
             config_args = {}
         cfg = PasteSupportedConfig(
             file_=options.config,
             ini_section=options.name,
             cmd_opts=options,
             config_args=config_args,
             )
         if setup_logging:
             logging_config.fileConfig(
                 cfg.config_file_name,
                 cfg.config_defaults
                 )
         self.run_cmd(cfg, options)
예제 #23
0
def initializeLogging(configFile):
    from logging.config import fileConfig
    from twisted.python.log import PythonLoggingObserver

    fileConfig(configFile)
    observer = PythonLoggingObserver()
    observer.start()
예제 #24
0
    def configure( self, xmlDom ):
        """
        called with an XmlDom that contains the configuration for the task runner.
        It looks for its configuration in the provided Xml DOM and then creates the
        required python object using the provided module and class name.

        This new object is then configured as well, the task should not start being active
        until start is called.
        """
        myDom = getNamedElem( xmlDom, "TaskRunner" )
        logCfg = getAttrText( myDom, "logCfg" )
        if logCfg:
            fileConfig( logCfg )
        else:
            doDebug = getAttrText( myDom, "debug" )
            if doDebug == 'debug':
                logging.basicConfig(level=logging.DEBUG)
                _log.debug( 'debugging' )
            else:
                logging.basicConfig(level=logging.INFO)

        tasks = myDom.getElementsByTagName('Task')
        for task in tasks:
            # create and configure.
            moduleName = getAttrText( task, "module" )
            name = getAttrText( task, "name" )
            newTask = self.importExtension( moduleName, name )
            if newTask:
                newTask.configure( task )
                self._tasks[name] = newTask
                _log.info('loaded %s:%s' % (moduleName, name) )
예제 #25
0
    def setup(self, cfg):
        loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
        self.error_log.setLevel(loglevel)
        self.access_log.setLevel(logging.INFO)

        # set gunicorn.error handler
        self._set_handler(
            self.error_log, cfg.errorlog,
            logging.Formatter(self.error_fmt, self.datefmt), "error")

        # set gunicorn.access handler
        if cfg.accesslog is not None:
            self._set_handler(
                self.access_log, cfg.accesslog,
                logging.Formatter(self.access_fmt), "access")

        # set syslog handler
        if cfg.syslog:
            self._set_syslog_handler(
                self.error_log, cfg, self.syslog_fmt, "error"
            )
            self._set_syslog_handler(
                self.access_log, cfg, self.syslog_fmt, "access"
            )

        if cfg.logconfig:
            if os.path.exists(cfg.logconfig):
                fileConfig(
                    cfg.logconfig, defaults=CONFIG_DEFAULTS,
                    disable_existing_loggers=False)
            else:
                raise RuntimeError(
                    "Error: log config '%s' not found" % cfg.logconfig
                )
예제 #26
0
    def __init__(self, base):
        syncdir = os.path.dirname(base)
        self.topdir = os.path.split(syncdir)[0]

        if 'WEAVE_TESTFILE' in os.environ:
            test_filename = 'tests_%s.ini' % os.environ['WEAVE_TESTFILE']
        else:
            test_filename = 'tests.ini'

        while True:

            ini_file = os.path.join(self.topdir, test_filename)
            if os.path.exists(ini_file):
                break

            if ini_file == ("/%s" % test_filename) \
                or ini_file == test_filename:
                raise IOError("cannot locate %s" % test_filename)

            self.topdir = os.path.split(self.topdir)[0]

        cfg = RawConfigParser()
        cfg.read(ini_file)

        # loading loggers
        if cfg.has_section('loggers'):
            fileConfig(ini_file)

        here = {'here': os.path.dirname(os.path.realpath(ini_file))}
        config = dict([(key, value % here) for key, value in
                      cfg.items('DEFAULT') + cfg.items('app:main')])
        self.config = convert_config(config)
예제 #27
0
    def SvcDoRun(self):
        key=winreg.OpenKey(rootkey, subkey, 0, winreg.KEY_READ)
        port_to_bind=int(winreg.QueryValueEx(key, 'Port')[0])
        data_dir=str(winreg.QueryValueEx(key, 'DataDir')[0])
        app_config=data_dir + r'\mapproxy.yaml'
        log_conf=data_dir + r'\log.ini'

        cherrypy.config.update({
            'global':{
            'log.screen': False,
            'tools.log_tracebacks.on': True,
            'engine.autoreload.on': False,
            'engine.SIGHUP': None,
            'engine.SIGTERM': None
            }
        })
        
        fileConfig(log_conf, {'here': data_dir})
        application=make_wsgi_app(app_config)
        d=wsgiserver.WSGIPathInfoDispatcher({'/mapproxy': application})
        self.server=wsgiserver.CherryPyWSGIServer( (server_ip, port_to_bind), d, numthreads=10, server_name=None, max=-1, request_queue_size=2048, timeout=10, shutdown_timeout=5)
        # Infinite loop serving requests
        try:
            self.server.start()
        except Exception as e:
            # Log an error event
            servicemanager.LogErrorMsg("MapProxy failed to start:\n%s" % e)
예제 #28
0
def main(argv=sys.argv):
    if len(argv) == 1:
        config_filename = os.path.join([os.getcwd(), 'hutt.ini'])
    else:
        config_filename = argv[1]

    parser = configparser.ConfigParser()    
    parser.read(config_filename)

    if parser.has_section('loggers'):
        fileConfig(
            config_filename,
            dict(__file__=config_filename,
                 here=os.path.dirname(config_filename)),
            )

    config = DEFAULTS.copy()
    config.update(dict(parser.items(parser.sections()[0])))

    config.setdefault('nick', 'jabba')
    if 'password' not in config:
        config['password'] = getpass.getpass(
            "Password for {}: ".format(config['jid']))

    config['handlers'] = [resolve(name) for name in config['handlers'].split()]

    xmpp = Bot(**config)
    xmpp.connect()
    xmpp.process(block=True)
예제 #29
0
    def __init__(self, ini_path=None, ini_dir=None, load_sections=None):
        """
        :param ini_dir: Directory path in which to start looking for the ini
        file.  Will climb the file tree from here looking for 'tests.ini' file,
        unless 'WEAVE_TESTFILE' env var is set, in which case it will climb the
        file tree from here looking for 'tests_${WEAVE_TESTFILE}.ini'.

        :param ini_path: Full path to configuration file.  Takes precedence
        over ini_dir, if both are provided.  Raises IOError if file doesn't
        exist.

        One or the other of `ini_dir` or `ini_path` arguments MUST be provided.

        :param load_sections: A sequence of strings that name the configuration
        sections that should be dynamically loaded.  Any entry in this sequence
        could alternately be a 2-tuple containing the name of the section and
        the corresponding class parameter value to use.
        """
        self.start_dir = ini_dir
        if ini_path:
            if not os.path.isfile(ini_path):
                raise IOError("invalid config file: %s" % ini_path)
            ini_dir = os.path.dirname(ini_path)
        elif ini_dir:
            if 'WEAVE_TESTFILE' in os.environ:
                test_filename = 'tests_%s.ini' % os.environ['WEAVE_TESTFILE']
            else:
                test_filename = 'tests.ini'

            while True:
                ini_path = os.path.join(ini_dir, test_filename)
                if os.path.exists(ini_path):
                    break

                if ini_path == ("/%s" % test_filename) \
                    or ini_path == test_filename:
                    raise IOError("cannot locate %s" % test_filename)

                ini_dir = os.path.split(ini_dir)[0]
            else:
                raise ValueError('No ini_path or ini_dir specified.')

        self.ini_dir = ini_dir
        self.ini_path = ini_path

        ini_cfg = RawConfigParser()
        ini_cfg.read(ini_path)

        # loading loggers
        if ini_cfg.has_section('loggers'):
            fileConfig(ini_path)

        self.config = self.convert_config(ini_cfg, ini_path)

        if load_sections is not None:
            for section in load_sections:
                if isinstance(section, tuple):
                    self.add_class(section[0], cls_param=section[1])
                else:
                    self.add_class(section)
예제 #30
0
    def run(self):
        if 'logging_config' in self.config_mgr:
            fileConfig(self.config_mgr['logging_config'])
        else:
            logging.basicConfig()
            if 'log_level' in self.config_mgr:
                level = self.config_mgr['log_level']
                level = getattr(logging, level.upper())
                logging.getLogger('winchester').setLevel(level)

        self.pm_lock = threading.Lock()
        self.pipe = PipelineManager(self.config_mgr)

        #  TODO add trigger defs from the DB at startup

        # start threads
        self.stream_def_thread = threading.Thread(name='stream_defs_pipe',
                                                  target=pipe_stream_definition_consumer,
                                                  args=(self.kafka_config, self.pm_lock, self.pipe,))

        self.pipeline_ready_thread = threading.Thread(name='pipeline',
                                                      target=self.pipeline_ready_processor,
                                                      args=(self.pm_lock, self.pipe,))

        self.stream_def_thread.start()
        self.pipeline_ready_thread.start()

        self.stream_def_thread.join()
        self.pipeline_ready_thread.join()
        log.debug('Exiting')
예제 #31
0
# check that the logs folder exists
try:
    os.mkdir(CARRIERETHERNET_PARSER_LOG_FOLDER)
except OSError as error:
    #print(error)
    pass

# set the logging files
logging.CARRIERETHERNET_PARSER_LOGFILE_MAIN = "{}/{}.log".format(
    CARRIERETHERNET_PARSER_LOG_FOLDER, CARRIERETHERNET_PARSER_LOGFILE)
logging.CARRIERETHERNET_PARSER_LOGFILE_MOD = "{}/{}-mod.log".format(
    CARRIERETHERNET_PARSER_LOG_FOLDER, CARRIERETHERNET_PARSER_LOGFILE)

# check that logging.config exists
fileConfig('{}/bin/logging.conf'.format(CARRIERETHERNET_PARSER_DIR))


class Config:

    main_logger = logging.getLogger("carrierethernet-parser")
    mod_logger = logging.getLogger("nmsnetlib")

    GLOBAL_KEY = "some value"

    APP_DIR = CARRIERETHERNET_PARSER_DIR
    CARRIERETHERNET_PARSER_CONFIG_FOLDER = os.environ.get(
        "CARRIERETHERNET_PARSER_CONFIG_FOLDER",
        os.path.join(SCRIPTDIR, "configs"))
    CARRIERETHERNET_PARSER_DB_FOLDER = os.environ.get(
        "CARRIERETHERNET_PARSER_DB_FOLDER", os.path.join(SCRIPTDIR, "db"))
예제 #32
0
파일: env.py 프로젝트: senou/portal
# @Desc    :

from __future__ import with_statement

from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.

config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)

# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# target_metadata = None

import os
import sys

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

# print(f"当前路径:{BASE_DIR}")
# /Users/xxxx/python_code/project
        # ready db
        db = create_db(params.biplane_vicon_db_dir,
                       BiplaneViconSubject,
                       include_anthro=True)
        db['age_group'] = db['Age'].map(lambda age: '<35'
                                        if age < 40 else '>45')
        if params.excluded_trials:
            db = db[~db['Trial_Name'].str.contains('|'.join(params.
                                                            excluded_trials))]
        db['Trial'].apply(pre_fetch)

    # relevant parameters
    output_path = Path(params.output_dir)

    # logging
    fileConfig(config_dir / 'logging.ini', disable_existing_loggers=False)
    log = logging.getLogger(params.logger_name)

    # prepare db
    db_elev = db.loc[db['Trial_Name'].str.contains('_CA_|_SA_|_FE_')].copy()
    prepare_db(db_elev, params.torso_def, params.scap_lateral,
               params.dtheta_fine, params.dtheta_coarse,
               [params.min_elev, params.max_elev])
    db_elev['traj_interp'].apply(add_st_gh_contrib)

    #%%
    plot_utils.init_graphing(params.backend)
    plt.close('all')
    for activity, activity_df in db_elev.groupby('Activity', observed=True):
        # overall
        pdf_file_path = output_path / ('ind_elev_' + activity + '_' +
예제 #34
0
# type: ignore
from logging.config import fileConfig

from sqlalchemy import engine_from_config
from sqlalchemy import pool

from alembic import context
from database.archived_databases.stories import stories

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name, disable_existing_loggers=False)

# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = stories.Base.metadata

# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.


def run_migrations_offline():
    """Run migrations in 'offline' mode.
예제 #35
0
# @Date     : 03/23/2019 21:25:07
# @Poject   : GraphOpt
# @Author   : FEI, [email protected]
# @Desc     : implementation of graph-structured iterative hard thresholding algorithm, Graph-IHT

from __future__ import print_function

import os
import sys

sys.path.append(os.path.abspath(''))

import logging
from logging.config import fileConfig

fileConfig('../logging.conf')
# note: logger is not thread-safe, pls be careful when running multi-threads with logging
logger = logging.getLogger('fei')

import time
import pickle

import numpy as np

from objs import EMS
from utils import evaluate, normalize_gradient, normalize
from data.utils import visual_grid_graph, visual_grid_graph_feature

from sparse_learning.proj_algo import head_proj
from sparse_learning.proj_algo import tail_proj
예제 #36
0
from tool.settings import get_data_conf_setting
import threading
import time
from elasticsearch import helpers
from old.models import upsert_course, get_course_info_specify_fields
from tool.models import es_instance
import traceback
from tool.models import noun_set, manual_core_word_set
from logging import config, getLogger
from tool.settings import DEF_SORT_TYPE_STATUS, DEF_SORT_TYPE_SCORE, DEF_QUERY_TYPE_MIX_CK
reload(sys)
sys.setdefaultencoding('utf-8')
DEF_CUR_FILE_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(DEF_CUR_FILE_PATH)

config.fileConfig('{0}/tool/logging.conf'.format(DEF_CUR_FILE_PATH))
search_logger = getLogger('search')
update_logger = getLogger('update')
define("port", default=9999, help="run on the given port", type=int)


class MyThread(threading.Thread):
    def __init__(self, func, args, name=''):
        threading.Thread.__init__(self)
        self.name = name
        self.func = func
        self.args = args
        self.res = None

    def getResult(self):
        return self.res
예제 #37
0
StartTime = time.time()


flag = """
\033[37m┌─────────────────────────────────────────────┐\033[0m\n\033[37m│\033[44m\033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[0m\033[91;101m#########################\033[0m\033[37m│\n\033[37m│\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m  \033[0m\033[97;107m:::::::::::::::::::::::::\033[0m\033[37m│\n\033[37m│\033[44m\033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[0m\033[91;101m#########################\033[0m\033[37m│\n\033[37m│\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m  \033[0m\033[97;107m:::::::::::::::::::::::::\033[0m\033[37m│\n\033[37m│\033[44m\033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[0m\033[91;101m#########################\033[0m\033[37m│\n\033[37m│\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m  \033[0m\033[97;107m:::::::::::::::::::::::::\033[0m\033[37m│\n\033[37m│\033[44m\033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[97m★\033[0m\033[44m \033[0m\033[91;101m#########################\033[0m\033[37m│      \033[1mUnited we stand, Divided we fall\033[0m\n\033[37m│\033[97;107m:::::::::::::::::::::::::::::::::::::::::::::\033[0m\033[37m│ \033[1mKigyo Project, a tribute to USS Enterprise.\033[0m\n\033[37m│\033[91;101m#############################################\033[0m\033[37m│\n\033[37m│\033[97;107m:::::::::::::::::::::::::::::::::::::::::::::\033[0m\033[37m│\n\033[37m│\033[91;101m#############################################\033[0m\033[37m│\n\033[37m│\033[97;107m:::::::::::::::::::::::::::::::::::::::::::::\033[0m\033[37m│\n\033[37m│\033[91;101m#############################################\033[0m\033[37m│\n\033[37m└─────────────────────────────────────────────┘\033[0m\n
"""

def get_user_list(key):
    # Import here to evade a circular import
    from tg_bot.modules.sql import nation_sql
    royals = nation_sql.get_royals(key)
    return [a.user_id for a in royals]

# enable logging

fileConfig('logging.ini')

#print(flag)
log = logging.getLogger('[Enterprise]')
logging.getLogger('ptbcontrib.postgres_persistence.postgrespersistence').setLevel(logging.WARNING)
log.info("[KIGYO] Kigyo is starting. | An Eagle Union Project. | Licensed under GPLv3.")
log.info("[KIGYO] Not affiliated to Azur Lane or Yostar in any way whatsoever.")
log.info("[KIGYO] Project maintained by: github.com/Dank-del (t.me/dank_as_fuck)")

# if version < 3.6, stop bot.
if sys.version_info[0] < 3 or sys.version_info[1] < 7:
    log.error(
        "[KIGYO] You MUST have a python version of at least 3.7! Multiple features depend on this. Bot quitting."
    )
    quit(1)
        sound = sound.set_channels(channels)
    if frame_rate:
        sound = sound.set_frame_rate(frame_rate)
    try:
        sound.export(output_audio, format=output_format)
    except IOError as e:
        logger.error(f'Error while converting {input_audio} to {output_audio}: {e}')
    return sound.channels, sound.frame_rate


_to_16k_mono_wav = partial(audio_converter, output_format='wav',
                          frame_rate=16000, channels=1)


def convert_to_16k_mono_wav(input, output, input_format='mp3'):
    _to_16k_mono_wav(input, output, input_format)


def convert_to_mono_wav_original_frame_rate(input, output, input_format='mp3'):
    audio_converter(input, output, input_format, output_format='wav', channels=1)


if __name__ == '__main__':
    from logging.config import fileConfig
    fileConfig('logging.conf')

    fire.Fire({
              'audio': audio_converter,
              'to_16k_mono_wav': convert_to_16k_mono_wav,
              })
예제 #39
0
from flask_jwt import jwt_required
from flask import request
from . import rest
from app import utils
from app.model import Teacher as TeacherModel
import logging, datetime
from logging.config import fileConfig
fileConfig('conf/log-app.conf')
logger = logging.getLogger(__name__)

# import faker data
#from app.mocks import teacher as fakerTeacher


#query all teachers
@rest.route('teachers/', methods=['GET'])
@jwt_required()
def get_teachers():
    print("recevie get all teachers requests")
    limit = int(request.args.get('limit'))
    page = int(request.args.get('page'))
    name = request.args.get('name')
    if name:
        total, teachers = TeacherModel.SearchTeacherByName(page, limit, name)
    else:
        total, teachers = TeacherModel.GetTeachers(page, limit)
    return utils.jsonresp(jsonobj={
        'total': total,
        'limit': limit,
        'teachers': teachers
    })
예제 #40
0
import numpy as np
import torch
from os import path
from torch import optim
from torch.nn import functional

from ctgan.conditional import ConditionalGenerator
from ctgan.models import Discriminator, Generator
from ctgan.sampler import Sampler
from ctgan.transformer import DataTransformer

import logging
from logging.config import fileConfig
dirname = path.dirname(__file__)
logconfig = path.join(dirname, '../logging_config.ini')
fileConfig(logconfig)
logger = logging.getLogger(__name__)


class CTGANSynthesizer(object):
    """Conditional Table GAN Synthesizer.

    This is the core class of the CTGAN project, where the different components
    are orchestrated together.

    For more details about the process, please check the [Modeling Tabular data using
    Conditional GAN](https://arxiv.org/abs/1907.00503) paper.

    Args:
        embedding_dim (int):
            Size of the random sample passed to the Generator. Defaults to 128.
예제 #41
0
파일: env.py 프로젝트: NexMirror/Kallithea
# [alembic] sqlalchemy.url.
database_url = (config.get_main_option('sqlalchemy.url')
                or config.get_section_option('app:main', 'sqlalchemy.url'))

# Configure default logging for Alembic. (This can be overriden by the
# config file, but usually isn't.)
logging.getLogger('alembic').setLevel(logging.INFO)

# Setup Python loggers based on the config file provided to the alembic
# command. If we're being invoked via the Alembic API (presumably for
# stamping during "kallithea-cli db-create"), config_file_name is not available,
# and loggers are assumed to already have been configured.
if config.config_file_name:
    fileConfig(config.config_file_name, {
        '__file__': config.config_file_name,
        'here': os.path.dirname(config.config_file_name)
    },
               disable_existing_loggers=False)


def include_in_autogeneration(object, name, type, reflected, compare_to):
    """Filter changes subject to autogeneration of migrations. """

    # Don't include changes to sqlite_sequence.
    if type == 'table' and name == 'sqlite_sequence':
        return False

    return True


def run_migrations_offline():
import logging
import os
import random
import string
import unittest

from logging.config import fileConfig
from src.lambda_function import lambda_handler as handler
from StringIO import StringIO

## CONST
BODYSIZE = 10
STRINGLEN = 10

# create logger assuming running from ./run script
fileConfig('tests/logging_config.ini')
logger = logging.getLogger(__name__)


def _random_string_builder():
    s = string.lowercase + string.digits
    return ''.join(random.sample(s, STRINGLEN))


class TestLambdaFunction(unittest.TestCase):
    """ Unit testing logzio lambda function """

    def setUp(self):
        # Set os.environ for tests
        os.environ['URL'] = "https://listener.logz.io:8071"
        os.environ['TOKEN'] = "123456789"
예제 #43
0
 def setUp(self):
     fileConfig(os.path.join(os.path.dirname(__file__), 'logger.ini'))
예제 #44
0
        default=1
    )  #for single experiments just include one value, for combinations a list [1, 2]
    parser.add_argument('--competing_ccalg',
                        '-a',
                        choices=['cubic', 'bbr', 'reno', 'none'],
                        dest='competing_ccalgs',
                        action='append',
                        default=['reno', 'reno'])
    parser.add_argument('--duration', '-d', type=int, default=60)
    parser.add_argument('--chrome',
                        '-s',
                        action='store_true',
                        help='Run website traffic with headless chrome')
    parser.add_argument('--repeat', '-r', type=int, default=1)
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    # configure logging
    log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 '../logging_config.ini')
    fileConfig(log_file_path)
    logging.getLogger("paramiko").setLevel(logging.WARNING)
    args = parse_args()
    logging.info('Arguments: {}'.format(args))
    start.main(args)

# Tests that can be run as separate experiments at one go
# iperf(ccalg), video(ccalg), web video(website)
예제 #45
0
import argparse
from logging.config import fileConfig

fileConfig("logger.ini")

args_parser = argparse.ArgumentParser(
    description='Sort images and videos by oldest timestamp')
args_parser.add_argument('destination',
                         type=str,
                         help='Full path to destination directory')
args_parser.add_argument('source',
                         type=str,
                         nargs='+',
                         help='Input directory(s) to process')

if __name__ == "__main__":
    args = args_parser.parse_args()

    from pathlib import Path
    from ImageSorter import ImageSorter

    try:
        image_sorter = ImageSorter(Path(args.destination))
        for source_path_str in args.source:
            image_sorter.sort_dir(Path(source_path_str))
    except Exception as e:
        ImageSorter.logger.exception(f"Sort failed: {str(e)}")
예제 #46
0
# coding=utf-8
import logging
import os

from logging.config import fileConfig

# 导入配置
cur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
logger_conf_path = os.path.join(cur_dir, './conf/logger.conf')
fileConfig(logger_conf_path)

# 全局logger
logger = logging.getLogger("doctorLogger")
예제 #47
0
def dallinger():
    """Dallinger command-line utility."""
    from logging.config import fileConfig
    fileConfig(os.path.join(os.path.dirname(__file__), 'logging.ini'),
               disable_existing_loggers=False)
예제 #48
0
import logging
from logging.config import fileConfig
from os import path

#logpath = path.join(path.dirname(path.abspath(__file__)), 'logging.conf')
logpath = 'logging.conf'
#logpath='logging_config.ini'
print(logpath)
fileConfig(logpath)

# create logger
logger = logging.getLogger('simpleExample')

# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warning('warn message')
logger.error('error message')
logger.critical('critical message')
예제 #49
0
파일: db.py 프로젝트: ShT3ch/frontera
    parser.add_argument('--no-batches', action='store_true',
                        help='Disables generation of new batches.')
    parser.add_argument('--no-incoming', action='store_true',
                        help='Disables spider log processing.')
    parser.add_argument('--no-scoring', action='store_true',
                        help='Disables scoring log processing.')
    parser.add_argument('--config', type=str, required=True,
                        help='Settings module name, should be accessible by import.')
    parser.add_argument('--log-level', '-L', type=str, default='INFO',
                        help="Log level, for ex. DEBUG, INFO, WARN, ERROR, FATAL.")
    parser.add_argument('--port', type=int, help="Json Rpc service port to listen.")
    args = parser.parse_args()

    settings = Settings(module=args.config)
    if args.port:
        settings.set("JSONRPC_PORT", [args.port])

    logging_config_path = settings.get("LOGGING_CONFIG")
    if logging_config_path and exists(logging_config_path):
        fileConfig(logging_config_path)
    else:
        logging.basicConfig(level=args.log_level)
        logger.setLevel(args.log_level)
        # logger.addHandler(CONSOLE)

    worker = DBWorker(settings, args.no_batches, args.no_incoming, args.no_scoring)
    server = WorkerJsonRpcService(worker, settings)
    server.start_listening()
    worker.run()

예제 #50
0
파일: __init__.py 프로젝트: saulm/zato
def setup_logging():
    logging.addLevelName('TRACE1', TRACE1)
    from logging import config
    config.fileConfig(
        os.path.join(os.environ['ZATO_REPO_LOCATION'], 'logging.conf'))
예제 #51
0
파일: logger.py 프로젝트: malirod/pylua
def init_logging():
    from logging.config import fileConfig

    fileConfig(LOG_CONFIG)
예제 #52
0
from rumster.db import get_session
from rumster.db import base
from rumster.db import models

from alembic import context

from logging import config as log_config

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
log_config.fileConfig(config.config_file_name)

# set the target for 'autogenerate' support
target_metadata = base.BASE.metadata


def run_migrations_offline():
    """Run migrations in 'offline' mode.

    This configures the context with just a URL
    and not an Engine, though an Engine is acceptable
    here as well.  By skipping the Engine creation
    we don't even need a DBAPI to be available.

    Calls to context.execute() here emit the given string to the
    script output.
예제 #53
0
파일: run.py 프로젝트: jasonthomas/trunion
import site

ROOT = os.path.dirname(os.path.abspath(__file__))
if os.path.splitext(os.path.basename(__file__))[0] == 'cProfile':
    if os.environ.get('TRUNION_PATH'):
        ROOT = os.environ['TRUNION_PATH']
    else:
        print 'When using cProfile you must set $TRUNION_PATH'
        sys.exit(2)

path = lambda *a: os.path.join(ROOT, *a)

site.addsitedir(path('vendor'))
site.addsitedir(path('vendor/lib/python'))

# setting up the egg cache to a place where apache can write
os.environ['PYTHON_EGG_CACHE'] = '/tmp/python-eggs'

# setting up logging
ini_file = '/etc/mozilla-services/trunion/production.ini'
ini_file = os.environ.get('TRUNION_INI', ini_file)
try:
    fileConfig(ini_file)
except NoSectionError:
    pass

# running the app using Paste
from paste.deploy import loadapp

application = loadapp('config:%s' % ini_file)
예제 #54
0
def main():
    start = time.time()
    # Arguments passed to the analysis
    parser = argparse.ArgumentParser(
        description='Telegram messages analyzer. Author: Matteo Minardi')
    parser.add_argument('-p',
                        '--path',
                        help='The path of the folder or the zip file',
                        required=True)
    parser.add_argument('-e',
                        '--export',
                        help='Flag to export',
                        required=False,
                        default=False)
    parser.add_argument('-t',
                        '--type',
                        help='The type of analysis',
                        required=False,
                        choices=["USER", "WORD", "DOW", "*"],
                        default="*")
    args = parser.parse_args()

    # Configuration file
    config = configparser.ConfigParser(allow_no_value=True)
    config.read('config.ini')

    CONFIG['result_filename'] = config.get('DEFAULT', 'result_filename')
    CONFIG['delimiter'] = config.get('csv', 'delimiter')
    CONFIG['ignore'] = config.get('data', 'ignore').split(',')

    # Logging setup
    fileConfig('logger.ini')
    logger = logging.getLogger(LOGGER_NAME)

    html_filenames = []
    html_dict = {}
    # Check if the path is a correct path or
    if (FileUtility.is_zipfile(args.path)):
        logger.info("Getting the files from the zip")
        z = zipfile.ZipFile(args.path)
        html_filenames = [
            name for name in z.namelist() if name.endswith('.html')
        ]
        if (len(html_filenames) > 0):
            logger.info("Found %i files.", len(html_filenames))
        else:
            logger.info("No files found.")
            sys.exit(OK)

        for html_filename in html_filenames:
            try:
                with z.open(html_filename) as html_file:
                    html_helper = HTMLUtility(html_file)
            except FileNotFoundError:
                logger.debug("File %s not found.", args.path)
                sys.exit(ERROR_CODE_FILENOTFOUND)

            if (html_helper is None):
                logger.debug("Error while creating the HTML parser.")
                sys.exit(ERROR_CODE_HTMLPARSER)
            else:
                html_dict[html_filename] = html_helper

    elif (FileUtility.is_directory(args.path)):
        # Opening and parsing a html files from args.path
        logger.info("Getting the files from the directory")
        html_filenames = glob.glob(args.path + "*.html")

        if (len(html_filenames) > 0):
            logger.info("Found %i files.", len(html_filenames))
        else:
            logger.info("No files found.")
            sys.exit(OK)

        for html_filename in html_filenames:
            try:
                with open(html_filename, encoding="utf8") as html_file:
                    html_helper = HTMLUtility(html_file)
            except FileNotFoundError:
                logger.debug("File %s not found.", args.path)
                sys.exit(ERROR_CODE_FILENOTFOUND)

            if (html_helper is None):
                logger.debug("Error while creating the HTML parser.")
                sys.exit(ERROR_CODE_HTMLPARSER)
            else:
                html_dict[html_filename] = html_helper
    else:
        logger.debug(
            "Path %s it's not a correct '.zip' file or existing directory.",
            args.path)
        sys.exit(ERROR_CODE_WRONG_PATH)

    messages = []
    error_counter = 0
    previous_user = ''
    for filename, html_helper in html_dict.items():
        logger.info("Extracting messages from %s", filename)
        messages_elements = html_helper.soup.select(ta.MESSAGE_SELECTOR)
        for message_el in messages_elements:
            try:
                current_classes = message_el.attrs['class']

                id = message_el.get('id').replace('message', '')
                ts = message_el.find(ta.TIMESTAMP_ELEMENT,
                                     class_=ta.TIMESTAMP_CLASS).get('title')
                ts = int(
                    DateUtility.string_to_timestamp(ts, ta.TIMESTAMP_FORMAT))
                if (ta.MESSAGE_JOINED_CLASS in current_classes):
                    # Get the previous element and get the user
                    user = previous_user
                else:
                    user = message_el.find(ta.USER_ELEMENT,
                                           class_=ta.USER_CLASS).text.strip()
                    previous_user = user
                text = message_el.find(ta.TEXT_ELEMENT,
                                       class_=ta.TEXT_CLASS).text.strip()

                message = Message(id, ts, user, text)
                message.clean()

                messages.append(message)
            except Exception:
                error_counter = error_counter + 1

    logger.info("Extracted: %i", len(messages))
    logger.info("Errors: %i", error_counter)

    # Sorting the messages by ts
    messages.sort(key=lambda m: m.ts)

    # Creating a result csv file with all messages
    if (args.export):
        logger.info("Creating the result file.")
        setup_result(config)

        data_directory = RESULT_PATH + '/' + CONFIG['result_filename'] + ".csv"
        with open(data_directory, "w", encoding="utf8",
                  newline='') as messages_csv:
            csv_writer = csv.writer(messages_csv,
                                    delimiter=CONFIG['delimiter'])
            csv_writer.writerow(['id', 'timestamp', 'user', 'text'])
            for message in messages:
                csv_writer.writerow(
                    [message.id, message.ts, message.user, message.text])
        logger.info("Created result file.")

    # Executing the choosen test
    if (args.type == '*'):
        all_tests(messages, CONFIG['ignore'], args.export)
    else:
        if (args.type == 'WORD'):
            top_word(messages, CONFIG['ignore'], args.export)
        elif (args.type == 'USER'):
            top_user(messages, args.export)
        elif (args.type == 'DOW'):
            top_day(messages, args.export)
    logger.info("Total execution time %i s", time.time() - start)
    sys.exit(OK)
예제 #55
0
import tweepy
import pprint
import json
import time

# import UDF
import utils

# import logging
import logging
from logging.config import fileConfig

fileConfig('./logging_config.ini')
logger = logging.getLogger()


class TweetStreamListener(tweepy.StreamListener):
    def __init__(self, session, DeliveryStreamName):
        logger.info('Initializing stream listener')
        logger.info('Connecting to Kinesis firehose')
        self.session = session
        self.firehose = self.session.client('firehose')
        self.DeliveryStreamName = DeliveryStreamName

    def on_connect(self):
        if self.DeliveryStreamName not in self.firehose.list_delivery_streams(
        )['DeliveryStreamNames']:
            raise ConnectionAbortedError('DeliveryStream not set up yet.')
        else:
            logger.info('Stream Listener connected.')
예제 #56
0
from flask import abort
from linebot import LineBotApi
from linebot import WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.exceptions import LineBotApiError
from linebot.models import MessageEvent
from linebot.models import ImageMessage
from linebot.models import TextSendMessage

from .functions import get_config
from .functions import get_logging_config
from .functions import is_webhook_confirmed
from .functions import save_img_tmp_file
from .classified_image import predict

config.fileConfig(get_logging_config())
logger = getLogger()

app = Flask(__name__)

app_config = get_config()
line_bot_api = LineBotApi(app_config["LineConfig"]["AccessToken"])
handler = WebhookHandler(app_config["LineConfig"]["ChannelSecret"])


@app.route("/", methods=['POST'])
def callback():
    # get X-Line-Signature header value
    signature = request.headers['X-Line-Signature']

    # get request body as text
예제 #57
0
#
#script that looks for door open/close events and reports them to slack


from time import sleep
import time
import pifacedigitalio
import slacker
from backports import configparser
import logging
from logging import config as logconfig

if __name__ == '__main__':
    config = configparser.ConfigParser()
    config.read('bouncer.ini')
    logconfig.fileConfig('bouncer.ini')

    slack = slacker.Slacker(config['slack.com']['token'])
    pfd = pifacedigitalio.PiFaceDigital()
    listener = pifacedigitalio.InputEventListener(chip=pfd)

    def doorclose(event):
       if pfd.input_pins[2].value:
          logging.info("Door is closed")
          slack.chat.post_message('#door', 'The forge door is closed at ' +time.strftime("%H:%M:%S") +' CST')
       else:
          logging.info("FALSE ALARM door is not really closed")
          # We're picking up noise somewhere that is causing false alerts, so this reads the pin and
          # looks for the expected logical value every time a rising/falling edge event occurs

    def dooropen(event):
예제 #58
0
파일: extract_all.py 프로젝트: yyztc/itop
from extract_excel import ExtractExcel
from extract_oem import ExtractOEM11G
from extract_ops import ExtractOPS
from extract_vcenter import ExtractVcenter
from extract_zabbix import ExtractZabbix
from pymongo import MongoClient
import logging
from logging.config import fileConfig
import configparser

fileConfig('logger_config.ini')
logger=logging.getLogger('infoLogger')

class ExtractAll():

    def __init__(self):
        self.cfg = configparser.ConfigParser()
        self.cfg.read("config.ini")        
        cmdb_db = self.cfg.get("cmdb","db")
        cmdb_str = self.cfg.get("cmdb","conn_str")
        self.client = MongoClient(cmdb_str)
        self.db = self.client[cmdb_db]

    def truncate_cmdb(self, coll_name):
        coll = self.db[coll_name]
        result = coll.delete_many({})
        logging.info("%s deleted %s" % (coll_name, str(result.deleted_count)))


    def main(self):
        coll_list = ['excel_server','excel_storage','excel_network','oem_server','oem_database','ops_database','vcenter_server','vcenter_virtualmachine','vcenter_logicalvolume','zabbix_weblogic','zabbix_oc4j','zabbix_solr','zabbix_bw','zabbix_ems','zabbix_nginx','zabbix_ohs','zabbix_spotfirewebplayer','zabbix_spotfire','zabbix_gfs','zabbix_zookeeper','zabbix_others']
예제 #59
0
def start_octobot(starting_args):
    if starting_args.pause_time is not None:
        sleep(starting_args.pause_time)

    fileConfig('config/logging_config.ini')

    logger = logging.getLogger("OctoBot Launcher")

    # Force new log file creation not to log at the previous one's end.
    logger.parent.handlers[1].doRollover()

    sys.excepthook = _log_uncaught_exceptions

    # Version
    logger.info("Version : {0}".format(LONG_VERSION))

    # Test update
    if starting_args.update:
        Commands.update(logger)
    else:
        Commands.check_bot_update(logger)

        logger.info("Loading config files...")
        config = load_config()

        # Handle utility methods before bot initializing if possible
        if starting_args.packager:
            Commands.package_manager(config, starting_args.packager)

        elif starting_args.creator:
            Commands.tentacle_creator(config, starting_args.creator)

        else:
            # In those cases load OctoBot
            from octobot import OctoBot

            config[CONFIG_EVALUATOR] = load_config(CONFIG_EVALUATOR_FILE_PATH,
                                                   False)

            TelegramApp.enable(config, starting_args.telegram)

            WebService.enable(config, starting_args.web)

            bot = OctoBot(config)

            import interfaces

            interfaces.__init__(bot, config)

            if starting_args.data_collector:
                Commands.data_collector(config)

            # start crypto bot options
            else:
                if starting_args.backtesting:
                    import backtesting

                    backtesting.__init__(bot)

                    config[CONFIG_BACKTESTING][CONFIG_ENABLED_OPTION] = True
                    config[CONFIG_CATEGORY_NOTIFICATION][
                        CONFIG_ENABLED_OPTION] = False

                    config[CONFIG_TRADER][CONFIG_ENABLED_OPTION] = False
                    config[CONFIG_SIMULATOR][CONFIG_ENABLED_OPTION] = True

                if starting_args.simulate:
                    config[CONFIG_TRADER][CONFIG_ENABLED_OPTION] = False
                    config[CONFIG_SIMULATOR][CONFIG_ENABLED_OPTION] = True

                if starting_args.risk is not None and 0 < starting_args.risk <= 1:
                    config[CONFIG_TRADER][
                        CONFIG_TRADER_RISK] = starting_args.risk

                if starting_args.start:
                    Commands.start_bot(bot, logger)
예제 #60
0
    for exp in exp_names:
        results_filename = RESULTS_FILENAME.format(exp)
        if os.path.isfile(results_filename):
            with open(results_filename) as f:
                results = json.load(f)
                if results['mark_invalid']:
                    invalid_exps.append(exp)

    return invalid_exps


def parse_args():
    parser = argparse.ArgumentParser(
        description=
        'Runs the classification pipeline for each of the given websites to classify the congestion control algorithm of each website'
    )
    parser.add_argument('--website',
                        nargs=2,
                        action='append',
                        required='True',
                        metavar=('WEBSITE', 'FILE_URL'),
                        dest='websites')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    fileConfig(LOGGING_CONFIG)
    args = parse_args()
    classify_websites(args.websites)