Ejemplo n.º 1
0
    def _emit(self, record):
        # Following snippett taken from WatchedFileHandler in Python 2.6
        # Reopen the stream if the original file has been moved
        if not os.path.exists(self.baseFilename):
            stat = None
            changed = 1
        else:
            stat = os.stat(self.baseFilename)
            changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
        if changed and self.stream is not None:
            try:
                self.stream.flush()
            except:
                pass
            try:
                self.stream.close()
            except:
                pass
            self.stream = self._reopen()
            if stat is None:
                stat = os.stat(self.baseFilename)
            self.dev, self.ino = stat[ST_DEV], stat[ST_INO]

        # Now that we've reopened the file if necessary, call the regular
        # emit() routine for the rotating handler.
        RotatingFileHandler.emit(self, record)
Ejemplo n.º 2
0
def Register():
	from logging.handlers import RotatingFileHandler
	from logging import Formatter
	
	h = {}
	formatter = Formatter("%(asctime)s [%(process)s:%(thread)s] ** %(levelname)s ** %(message)s")
	logsnames = getConfigValue('lognames')
	qsize = getIntConfigValue('qsize')
	rotation_bytes = getIntConfigValue('rotation_bytes')
	rotation_count = getIntConfigValue('rotation_count')

	for i in logsnames.split(","):
		h[i] = RotatingFileHandler(os.path.join(os.path.dirname(__file__), i), maxBytes=rotation_bytes, backupCount=rotation_count)
		h[i].formatter = formatter

	def write():
		
		while True:
			try:
				rec = cherrypy.engine.log_cache.get_nowait()
				h[rec.name].emit(rec)
			except Empty:
				return

	from cherrypy.process import plugins
	pq = plugins.Monitor(cherrypy.engine,write,3)	
	pq.subscribe()	
	
	cherrypy.engine.log_cache = Queue(maxsize = qsize)
	conf_path = os.path.join(os.path.dirname(__file__), "config.txt")
	cherrypy.config.update(conf_path)
	app = cherrypy.quickstart( Root() )
Ejemplo n.º 3
0
def init(verbose=0, quiet=False, filename='activity.log'):
    """
    Initialize the logger
    * verbose (int) specify the verbosity level of the standart output
      0 (default) ~ ERROR, 1 ~ WARN & WARNING, 2 ~ INFO, 3 ~ DEBUG
    * quiet (boolean) allow to remove all message whatever is the verbosity lvl
    """
    if not os.path.exists('log'):
        os.mkdir('log')

    with open("log/" + filename, 'w'):
        pass

    logger = logging.getLogger()
    logger.propagate = False
    logger.setLevel(min([
        conf['logging']['log_file_level'], 
        conf['logging']['log_console_level'], 
        verbose]))

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/" + filename, 'w', 10000000, 10)
    file_handler.setLevel(conf['logging']['log_file_level'])
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/errors.log", 'w', 10000000, 10)
    file_handler.setLevel(logging.ERROR)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(levelname)s :: %(filename)s :: %(message)s')
    stream_handler = logging.StreamHandler()
    if verbose is -1:
        stream_handler.setLevel(conf['logging']['log_file_level'])
    elif verbose is 0:
        stream_handler.setLevel(logging.ERROR)
    elif verbose is 1:
        stream_handler.setLevel(logging.WARNING)
    elif verbose is 2:
        stream_handler.setLevel(logging.INFO)
    elif verbose is 3:
        stream_handler.setLevel(logging.DEBUG)
    elif verbose is 4:
        stream_handler.setLevel(0)
    else:
        stream_handler.setLevel(conf['logging']['log_console_level'])
    stream_handler.setFormatter(formatter)
    if not quiet:
        logger.addHandler(stream_handler)

    logging.info("=" * 80)
    logging.info('Logging system started: verbose=%d, quiet=%s' %
                 (verbose, str(quiet)))
Ejemplo n.º 4
0
def getLogger(log_path, level=logging.INFO):
    logger = logging.getLogger("Rotating Logger")
    logger.setLevel(level)
    fhandler = RotatingFileHandler(log_path, maxBytes=1024*1024*10, backupCount=5)
    fhandler.setFormatter(logging.Formatter("[%(asctime)s] %(message)s"))
    logger.addHandler(fhandler)
    return logger
Ejemplo n.º 5
0
def setup_logging(verbose=True, level="INFO", directory=None, filename=None,
                  rotation_size="10MB"):
    """
    Setup logging.
    """

    root_logger = logging.getLogger("")

    for handler in root_logger.handlers:
        root_logger.removeHandler(handler)

    if verbose:
        handler = logging.StreamHandler(sys.stdout)
        handler.setLevel(parse_loglevel(level))
        root_logger.addHandler(handler)

    if not(directory is None and filename is None):
        max_log_size = parse_size(rotation_size)

        filepath = os.path.join(directory, filename)

        handler = RotatingFileHandler(filepath, maxBytes=max_log_size,
                                      backupCount=5)

        handler.setFormatter(
            logging.Formatter("%(asctime)s %(levelname)s %(message)s"))

        root_logger.setLevel(parse_loglevel(level))
        root_logger.addHandler(handler)

    return logging
Ejemplo n.º 6
0
 def __set_logger(self, log_level):
     work_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
     log_dir = work_dir + '/data/ossftp/'
     try:
         os.makedirs(log_dir)
     except OSError as exc: 
         if exc.errno == errno.EEXIST and os.path.isdir(log_dir):
             pass
         else:
             raise
     LOGFILE = os.path.join(log_dir, "ossftp.log")
     MAXLOGSIZE = 10*1024*1024 #Bytes
     BACKUPCOUNT = 30
     FORMAT = "%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
     handler = RotatingFileHandler(LOGFILE,
             mode='w',
             maxBytes=MAXLOGSIZE,
             backupCount=BACKUPCOUNT)
     formatter = logging.Formatter(FORMAT)
     handler.setFormatter(formatter)
     logger = logging.getLogger()
     if log_level == "DEBUG":
         logger.setLevel(logging.DEBUG)
     elif log_level == "INFO":
         logger.setLevel(logging.INFO)
     elif log_level == "WARNING":
         logger.setLevel(logging.WARNING)
     elif log_level == "ERROR":
         logger.setLevel(logging.ERROR)
     elif log_level == "CRITICAL":
         logger.setLevel(logging.CRITICAL)
     else:
         print "wrong loglevel parameter: %s" % log_level
         exit(1)
     logger.addHandler(handler)
def start(host,port,allow_agent=False):
    import argparse
    from gevent.pywsgi import WSGIServer
    from geventwebsocket.handler import WebSocketHandler
    from jinja2 import FileSystemLoader
    import os

    root_path = os.path.dirname(wssh.__file__)
 #   root_path = '/home/bob/test/wssh/wssh'#os.path.dirname(wssh.__file__)
#    print "RootPath===>",root_path
    app.jinja_loader = FileSystemLoader(os.path.join(root_path, 'templates'))
    app.static_folder = os.path.join(root_path, 'static')


#    global wssh_server_log_file
    handler = RotatingFileHandler(wssh_server_log_file, maxBytes=10000000, backupCount=5)
    handler.setLevel(logging.DEBUG)
    app.logger.addHandler(handler)

    app.config['WSSH_ALLOW_SSH_AGENT'] = allow_agent

    agent = 'wsshd/{0}'.format(wssh.__version__)

    print '{0} running on {1}:{2}'.format(agent, host, port)

    app.debug = True
    http_server = WSGIServer((host, port), app,
        log=None,
        handler_class=WebSocketHandler)
    try:
        http_server.serve_forever()
    except KeyboardInterrupt:
        pass
Ejemplo n.º 8
0
def setup_logging(name,logdir=None, scrnlog=True, txtlog=True, loglevel=logging.DEBUG):
    logdir = os.path.abspath(logdir)

    if not os.path.exists(logdir):
        os.mkdir(logdir)

    log = logging.getLogger(name)
    log.setLevel(loglevel)
    log.propagate = False
    
    log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")

    if txtlog:
        txt_handler = RotatingFileHandler(os.path.join(logdir, "blogstrap-py.log"), maxBytes=(1024*1024*20), backupCount=5)
        #txt_handler.doRollover()
        txt_handler.setFormatter(log_formatter)
        log.addHandler(txt_handler)
        log.info("Logger initialised.")

    if scrnlog:
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(log_formatter)
        log.addHandler(console_handler)

    return log
Ejemplo n.º 9
0
def getLogger(config, section, keyword="Test", tid=None):
    """
    @param config : ConfigParser object
    @param section : Section in config
    @param keyword : additional keyword
    """

    level = config.get("global","log_level")

    log_dir = config.get("global", "log_dir")
    log_path = config.get(section, "logfile")
    fname = join(log_dir, log_path)

    #Thread id(Multiprocess id)
    if tid != None:
        fname = "%s.%s" % (fname,tid)

    logger = logging.getLogger(str(keyword))              
    logger.setLevel( LOG_LEVELS[level] )

    if fname:
        log_handler = RotatingFileHandler(fname, maxBytes=100000000, backupCount=5)
    else:
        log_handler = StreamHandler(sys.stdout)

    log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    log_handler.setFormatter(log_formatter)
    logger.addHandler(log_handler)

    return logger
Ejemplo n.º 10
0
    def __init__(self, pid, config, debug):
        AbstractDaemon.__init__(self, pid, "acolyte", debug=debug)
        self.processes = []
        self.collectors = []
        self.config = config
        self.shutdown_event = multiprocessing.Event()

        log_fmt_default = "[%(asctime)s] %(levelname)s: %(name)s: %(message)s"
        log_format = config.get("master").get("log_format", log_fmt_default)
        self.log_format = log_format

        fmt = logging.Formatter(log_format)

        logfile = config.get("master").get("logfile")

        max_log_size = int(config.get("master").get("max_log_size", 1))
        handler = RotatingFileHandler(logfile, backupCount=5, maxBytes=max_log_size * 1000000)

        handler.setFormatter(fmt)
        handler_fd = handler.stream.fileno()
        self.files_preserve = [handler_fd]

        log = logging.getLogger(self.name)

        if debug == "on":
            log.setLevel(logging.DEBUG)
            self.debug = True
        else:
            log.setLevel(logging.INFO)

        log.addHandler(handler)
        self.log = log
Ejemplo n.º 11
0
Archivo: cli.py Proyecto: levi-rs/chirp
def configure_logger():
    """
    Creates a rotating log

    :param dir_path: String, path to current directory
    """
    # Formatting
    formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(message)s')

    # Set up STDOUT handler
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setLevel(logging.DEBUG)
    stdout_handler.setFormatter(formatter)

    # Set up file logging with rotating file handler
    rotate_fh = RotatingFileHandler(LOG_FILE, backupCount=5, maxBytes=1000000)
    rotate_fh.setLevel(logging.DEBUG)
    rotate_fh.setFormatter(formatter)

    # Create Logger object
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(stdout_handler)
    logger.addHandler(rotate_fh)

    return logger
Ejemplo n.º 12
0
def configure_app(app):
    """Main config function:
    Works out what environment to configure for based on
    Environment Variable (Dev is assumed if none found),
    then uses that to select the config class, and sets
    logging options.
    :param app: Flask app object
    """

    config = {"Dev": "restaurants.config.DevConfig",
              "Test": "restaurants.config.TestConfig",
              "Prod": "restaurants.config.ProdConfig"
              }

    # Get Environment Variable
    env = os.getenv('RESTAURANT_APP_ENV', 'Dev')

    # Config based on options in this file
    app.config.from_object(config[env])

    # Config based on options in "APPLICATION_SETTINGS" file if it exists (used for anything sensitive)
    try:
        app.config.from_pyfile(app.config.get('APPLICATION_SETTINGS'))
    except IOError:
        print 'could not find ' + app.config.get('APPLICATION_SETTINGS') + ', continuing without it'

    # Logging Config
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler(app.config['LOGGING_LOCATION'], maxBytes=1024 * 1024 * 100, backupCount=20)
    formatter = logging.Formatter(app.config['LOGGING_FORMAT'])
    file_handler.setFormatter(formatter)
    app.logger.addHandler(file_handler)
 def get_logger(self):
     log = logging.getLogger('djutils.queue.logger')
     log.setLevel(logging.DEBUG)
     handler = RotatingFileHandler(self.logfile, maxBytes=1024*1024, backupCount=3)
     handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(message)s"))
     log.addHandler(handler)
     return log
Ejemplo n.º 14
0
    def __init__(self):
        super(Actuasim, self).__init__()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        file_handler = RotatingFileHandler('actuasim.log', 'a', 10000000, 1)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)
        self.logger.info('=======================================')
        self.logger.info('           ACTUASIM START')
        self.logger.info('=======================================')
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.resize(1700, 900)
        self.classrooms = []
        self.tabs = QTabWidget()
        self.setCentralWidget(self.tabs)
        self.file_menu = self.ui.menubar.addMenu("&File")
        self.save_action = QAction("&Save", self, triggered=self.save)
        self.file_menu.addAction(self.save_action)
        self.load_action = QAction("&Load", self, triggered=self.load)
        self.file_menu.addAction(self.load_action)
        self.command_handler = CommandHandler(self)

        # endpoints, status, id
        self.control_endpoint = ('0.0.0.0', 0)
        self.data_endpoint = ('0.0.0.0', 0)
        self.status = 0
        self.channel_id = random.randint(0, 255)  # TODO: handle multiple channel

        # server
        self.knxserver = Knxserver()
        self.knxserver.trigger.connect(self.frame_received)
        self.knxserver.start()
Ejemplo n.º 15
0
def main():
    i=Ingestor()
    todaydate = time.strftime("%m-%d-%Y")
    filename = 'logs/' + str(todaydate) + '.log'
    handler = RotatingFileHandler(filename, mode='a',backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
def configure_logging(app):
    ''' Configure logging.
    :param app: The Flask application object.
    '''

    # Get the path of the log from the config
    log_path = app.config['LOG_PATH']

    # Get the level of logging from the config
    log_level = app.config['LOG_LEVEL']

    # If path directory doesn't exist, create it.
    log_dir = os.path.dirname(log_path)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Create formatter
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # Create Log_Handler
    log_handler = RotatingFileHandler(log_path, maxBytes=250000, backupCount=5)

    # add formatter to log handler
    log_handler.setFormatter(formatter)

    # Get the level of the Debug and set it to the logger
    app.logger.setLevel(log_level)

    # Add the handlers to the logger
    app.logger.addHandler(log_handler)

    # Test if the logging is working by typing this string to a file.
    app.logger.info('Logging to: %s', log_path)
Ejemplo n.º 17
0
    def __setLogging__(self):
        """
        Initializes logging. Use by the constructor.
        """
        compSect = self.myconfig.RemoteMsg

        # Logging
        if not hasattr(compSect, "logFile"):
            compSect.logFile = os.path.join(compSect.RemoteMsgDir, "remoteMsg.log")
        print("Log file is: " + compSect.logFile)

        if not hasattr(compSect, "listenerLogFile"):
            compSect.listenerLogFile = os.path.join(compSect.RemoteMsgDir, "listener.log")
        print("Listener log file is: " + compSect.listenerLogFile)

        logHandler = RotatingFileHandler(compSect.logFile, "a", 1000000, 3)
        logFormatter = logging.Formatter("%(asctime)s:%(levelname)s:%(filename)s:%(message)s")
        logHandler.setFormatter(logFormatter)
        self.mylogger = logging.getLogger("RemoteMsg")
        self.mylogger.addHandler(logHandler)
        self.mylogger.setLevel(logging.INFO)
        # map log strings to integer levels:
        self.logMsg = {
            "DEBUG": logging.DEBUG,
            "ERROR": logging.ERROR,
            "NOTSET": logging.NOTSET,
            "CRITICAL": logging.CRITICAL,
            "WARNING": logging.WARNING,
            "INFO": logging.INFO,
        }
        ##                    'SQLDEBUG' : logging.SQLDEBUG  }
        if hasattr(compSect, "logLevel") and compSect.logLevel in self.logMsg.keys():
            self.mylogger.setLevel(self.logMsg[compSect.logLevel])
Ejemplo n.º 18
0
def initialize_logger(redfish_logfile):
    """Return api version.

    :param redfish_logfile: redfish log
    :type str
    :returns:  True

    """
    global logger
    logger = logging.getLogger()
    
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: %(message)s'
        )
    file_handler = RotatingFileHandler(redfish_logfile, 'a', 1000000, 1)

    # First logger to file
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    # Second logger to console
    steam_handler = logging.StreamHandler()
    steam_handler.setLevel(logging.DEBUG)
    logger.addHandler(steam_handler)
    return True
Ejemplo n.º 19
0
def get_logger(name, logfile=ZTASKD_LOG_PATH, loglevel=ZTASKD_LOG_LEVEL):
    LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'critical': logging.CRITICAL
    }

    logger_ = logging.getLogger("ztaskq.%s" % name)
    logger_.propagate = False
    logger_.setLevel(LEVELS[loglevel.lower()])
    if logfile:
        if '%(name)s' in logfile:
            filename = logfile % { 'name': name }
        else:
            filename = logfile
        handler = RotatingFileHandler(filename=filename,
                                      maxBytes=ZTASKD_LOG_MAXBYTES,
                                      backupCount=ZTASKD_LOG_BACKUP)
    else:
        handler = logging.StreamHandler()

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    handler.setFormatter(formatter)
    logger_.addHandler(handler)

    return logger_
Ejemplo n.º 20
0
def _init_logger(logger, phase):
    log_handler = RotatingFileHandler(LOGFILE, maxBytes=1048576, backupCount=5)
    fmt = '%(asctime)s %(levelname)s {0} [-] %(message)s'.format(phase)
    formatter = logging.Formatter(fmt)
    log_handler.setFormatter(formatter)
    logger.addHandler(log_handler)
    logger.setLevel(logging.DEBUG)
Ejemplo n.º 21
0
 def __init__(self, debug, log_name, log_level, logger):
     self.logger = logging.getLogger(logger)
     if debug:
         logfile = os.path.join(os.getcwd(), log_name)
         max_log_size = 100*1024*1024 #Bytes
         backup_count = 5
         format = \
         "%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
         hdlr = RotatingFileHandler(logfile,
                                       mode='a',
                                       maxBytes=max_log_size,
                                       backupCount=backup_count)
         formatter = logging.Formatter(format)
         hdlr.setFormatter(formatter)
         self.logger.addHandler(hdlr)
         if "DEBUG" == log_level.upper():
             self.logger.setLevel(logging.DEBUG)
         elif "INFO" == log_level.upper():
             self.logger.setLevel(logging.INFO)
         elif "WARNING" == log_level.upper():
             self.logger.setLevel(logging.WARNING)
         elif "ERROR" == log_level.upper():
             self.logger.setLevel(logging.ERROR)
         elif "CRITICAL" == log_level.upper():
             self.logger.setLevel(logging.CRITICAL)
         else:
             self.logger.setLevel(logging.ERROR)
     else:
         self.logger.addHandler(EmptyHandler())
Ejemplo n.º 22
0
def add_logger():
    log_file = app.config.get("LOGGING_FILE", "prioritizer.log")
    handler = RotatingFileHandler(log_file, maxBytes=10000, backupCount=10)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)

    app.logger.info(type(app.logger))
Ejemplo n.º 23
0
    def __init__(self, filename, level="debug", logid="qiueer", mbs=20, count=10, is_console=True):
        '''
        mbs: how many MB
        count: the count of remain
        '''
        try:
            self._level = level
            #print "init,level:",level,"\t","get_map_level:",self._level
            self._filename = filename
            self._logid = logid

            self._logger = logging.getLogger(self._logid)
            
            
            if not len(self._logger.handlers):
                self._logger.setLevel(self.get_map_level(self._level))  
                
                fmt = '[%(asctime)s] %(levelname)s\n%(message)s'
                datefmt = '%Y-%m-%d %H:%M:%S'
                formatter = logging.Formatter(fmt, datefmt)
                
                maxBytes = int(mbs) * 1024 * 1024
                file_handler = RotatingFileHandler(self._filename, mode='a',maxBytes=maxBytes,backupCount=count)
                self._logger.setLevel(self.get_map_level(self._level))  
                file_handler.setFormatter(formatter)  
                self._logger.addHandler(file_handler)
    
                if is_console == True:
                    stream_handler = logging.StreamHandler(sys.stderr)
                    console_formatter = ColoredFormatter(fmt, datefmt)
                    stream_handler.setFormatter(console_formatter)
                    self._logger.addHandler(stream_handler)

        except Exception as expt:
            print expt
Ejemplo n.º 24
0
class sncli:

    def __init__(self, do_server_sync, verbose=False):
        self.config         = Config()
        self.do_server_sync = do_server_sync
        self.verbose        = verbose
        self.do_gui         = False

        if not os.path.exists(self.config.get_config('db_path')):
            os.mkdir(self.config.get_config('db_path'))

        # configure the logging module
        self.logfile = os.path.join(self.config.get_config('db_path'), 'sncli.log')
        self.loghandler = RotatingFileHandler(self.logfile, maxBytes=100000, backupCount=1)
        self.loghandler.setLevel(logging.DEBUG)
        self.loghandler.setFormatter(logging.Formatter(fmt='%(asctime)s [%(levelname)s] %(message)s'))
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        self.logger.addHandler(self.loghandler)
        self.config.logfile = self.logfile

        logging.debug('sncli logging initialized')

        self.logs = []

        try:
            self.ndb = NotesDB(self.config, self.log, self.gui_update_view)
        except Exception, e:
            self.log(str(e))
            sys.exit(1)
Ejemplo n.º 25
0
    def start(self):
        # remove all handlers
        self.handlers = []

        # console log handler
        if self.consoleLogging:
            console = logging.StreamHandler()
            console.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S'))
            console.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(console)

        # rotating log file handlers
        if self.logFile and makeDir(os.path.dirname(self.logFile)):
            rfh = RotatingFileHandler(
                filename=self.logFile,
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh_errors = RotatingFileHandler(
                filename=self.logFile.replace('.log', '.error.log'),
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(rfh)

            rfh_errors.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh_errors.setLevel(self.logLevels['ERROR'])
            self.addHandler(rfh_errors)
Ejemplo n.º 26
0
def _get_handler():
    # we only need one global handler
    global handler
    if handler is not None:
        return handler

    path = '/var/log/rhsm/rhsm.log'
    try:
        if not os.path.isdir("/var/log/rhsm"):
            os.mkdir("/var/log/rhsm")
    except Exception:
        pass

    # Try to write to /var/log, fallback on console logging:
    try:
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5, encoding='utf-8')
    except IOError:
        handler = logging.StreamHandler()
    except Exception:
        handler = logging.StreamHandler()

    handler.setFormatter(logging.Formatter(LOG_FORMAT))
    handler.setLevel(LOG_LEVEL)

    return handler
Ejemplo n.º 27
0
def main():
    ini=Recall()
    #
    #setup logging
    #
    logger=logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter=logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
    file_handler = RotatingFileHandler('ridirect.log', 'a', 1000000, 1)

    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(logging.DEBUG)
    formatter=logging.Formatter('%(asctime)s \t %(filename)s \t %(levelname)s \t %(message)s', "%H:%M:%S")
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)

    r=Measurement(None,ini)
    print "Time out: ",r.time_out
    logging.info("Start"+r.version)
    r.collect_new()
    r.dispatch_new()
    logging.info("Stop")
    return True
Ejemplo n.º 28
0
def setup_logger(cfg):
    if 'LOGFILE' in cfg:
        file_handler = RotatingFileHandler(cfg['LOGFILE'], 'a', 1 * 1024 * 1024, 10)
        file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
        file_handler.setLevel(getattr(logging, cfg['DEBUG']))
    logger.setLevel(getattr(logging, cfg['DEBUG']))
    logger.addHandler(file_handler)
Ejemplo n.º 29
0
def configure_logging(app):
    """
    Configure logger.

    Logger object set to utils.logging class.

    :param app: Flask object
    """
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]'
    )

    root_path = os.path.dirname(app.root_path)
    debug_log = os.path.join(root_path, app.config['DEBUG_LOG'])

    debug_file_handler = RotatingFileHandler(
        debug_log, maxBytes=100000, backupCount=100000
    )

    debug_file_handler.setLevel(app.config['LOG_LEVEL'])
    debug_file_handler.setFormatter(formatter)
    app.logger.addHandler(debug_file_handler)

    error_log = os.path.join(root_path, app.config['ERROR_LOG'])
    error_file_handler = RotatingFileHandler(
        error_log, maxBytes=100000, backupCount=10
    )

    error_file_handler.setLevel(logging.ERROR)
    error_file_handler.setFormatter(formatter)
    app.logger.addHandler(error_file_handler)
Ejemplo n.º 30
0
 def setup_logging():
     if not app.debug:
         from logging.handlers import RotatingFileHandler
         if app.config.get('ERROR_LOG'):
             error_fh = RotatingFileHandler(app.config['ERROR_LOG'], maxBytes=1024*1024*10, backupCount=10, encoding='utf_8')
             error_fh.setLevel(logging.ERROR)
             app.logger.addHandler(error_fh)
Ejemplo n.º 31
0
                      "info")  # info as default, #debug for local dev

LOG_PATH = os.getenv("LOG_PATH", "./logs")

# Define the logs
# Set verbosity
log_level = level = logging.getLevelName(VERBOSITY.upper())
if isinstance(log_level, int):
    logging.basicConfig(
        level=log_level,
        format=
        "[%(levelname)s] %(asctime)s | %(message)s | in function: %(funcName)s",
        handlers=[
            RotatingFileHandler(
                os.path.join(LOG_PATH, "info.log"),
                maxBytes=10000,
                backupCount=10,
            ),
            logging.StreamHandler(),
        ],
    )

# Read the application configuration settings
yaml_path = os.path.join(get_folder_path("."), "config")

# ETL Configs
ETL_CONFIG = read_yaml(yaml_path, filename="etl.yml")
ETL_DATASET_CONFIG = ETL_CONFIG["dataset"]  # name of the datasets
ETL_DATA_CONFIG = ETL_CONFIG["etl"]  # remote input
ETL_EXTRACTION_CONFIG = ETL_CONFIG["etl_results"]  # file on disk
ETL_DATA_PATH = ETL_CONFIG["data_path"]
Ejemplo n.º 32
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)
    # app.url_map.strict_slashes = False

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    bootstrap.init_app(app)

    from app.audit import bp as audit_bp
    app.register_blueprint(audit_bp,
                           url_prefix='/audit',
                           template_folder='/audit/templates')

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp,
                           url_prefix='/auth',
                           template_folder='/auth/templates')

    from app.keyval import bp as keyval_bp
    app.register_blueprint(keyval_bp,
                           url_prefix='/keyval',
                           template_folder='/keyval/templates')

    from app.app import bp as app_bp
    app.register_blueprint(app_bp,
                           url_prefix='/app',
                           template_folder='/app/templates')

    from app.env import bp as env_bp
    app.register_blueprint(env_bp,
                           url_prefix='/env',
                           template_folder='/env/templates')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp,
                           url_prefix='/',
                           template_folder='/main/templates')

    if app.config['LOG_TO_STDOUT']:
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.INFO)
        app.logger.addHandler(stream_handler)
    else:
        if not os.path.exists('logs'):
            os.mkdir('logs')
        file_handler = RotatingFileHandler('logs/c4maint.log',
                                           maxBytes=10240,
                                           backupCount=10)
        file_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                              '[in %(pathname)s:%(lineno)d]'))
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('c4maint startup')

    app.config['EXPLAIN_TEMPLATE_LOADING'] = True
    return app
Ejemplo n.º 33
0
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
import sys
import logging
from logging.handlers import RotatingFileHandler
import os
import re

logger = logging.getLogger("node_exporter probe")
logger.setLevel(logging.INFO)
fileHandler = RotatingFileHandler(
    "/datastorage/prometheus/node_exporter_probe.log",
    maxBytes=1024 * 1024 * 100,
    backupCount=5)
fileHandler.setLevel(logging.INFO)
formatter = logging.Formatter(
    "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)


def main():
    runTimeException = []
    gpuExists = False
    try:
        gpuOutput = subprocess.check_output(["lspci"], shell=True)
        r = re.search(
            "[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F].[0-9] (3D|VGA compatible) controller: NVIDIA Corporation.*",
Ejemplo n.º 34
0
import logging
from logging.handlers import RotatingFileHandler
import json
import re

from flask import Flask
from flask import request

from collections import OrderedDict

#Preparing logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('flaskapp.log', 'a', 1000000, 1)

file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

#steam_handler = logging.StreamHandler()
#steam_handler.setLevel(logging.DEBUG)
#logger.addHandler(steam_handler)

#Setting flask app up
app = Flask(__name__)

#In memory data
data = {"alex": "http://alex.com"}
Ejemplo n.º 35
0
        auth = None
        if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
            auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
        secure = None
        if app.config['MAIL_USE_TLS']:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'],
            subject='Microblog Failure',
            credentials=auth,
            secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

    if not os.path.exists('logs'):
        os.mkdir('logs')
    file_handler = RotatingFileHandler('logs/microblog.log',
                                       maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('Microblog startup')
Ejemplo n.º 36
0
import redis
import logging
from logging.handlers import RotatingFileHandler
from ihome.utils.commons import ReConverter

# 数据库
db = SQLAlchemy()

# 创建redis连接对象
# redis_store = None
redis_store = redis.StrictRedis(host='127.0.0.1', port=6379, decode_responses=True)

# 配置日志信息
# 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler("logs/log", maxBytes=1024*1024*100, backupCount=10)
# 创建日志记录的格式                 日志等级    输入日志信息的文件名 行数    日志信息
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日记录器
logging.getLogger().addHandler(file_log_handler)
# 设置日志的记录等级
logging.basicConfig(level=logging.DEBUG)  # 调试debug级


# 工厂模式
def create_app(config_name):
    """
    创建flask的应用对象
    :param config_name: str  配置模式的模式的名字 ("develop",  "product")
Ejemplo n.º 37
0
pink_passages = [{1, 4}, {0, 2, 5, 7}, {1, 3, 6}, {2, 7}, {0, 5, 8, 9},
                 {4, 6, 1, 8}, {5, 7, 2, 9}, {3, 6, 9, 1}, {4, 9, 5},
                 {7, 8, 4, 6}]
"""
    logging setup
    you can set the appropriate importance level of the data
    that are written to your logfiles.
"""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(levelname)s :: %(message)s",
                              "%H:%M:%S")
# logger to file
if os.path.exists("./logs/game.log"):
    os.remove("./logs/game.log")
file_handler = RotatingFileHandler('./logs/game.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# logger to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
"""
    Functions handling exchanges between
    the players and the server.
"""


def send_json_to_player(player, data):
    """
Ejemplo n.º 38
0
from card_auto_add.loops.processor import Processor

logger = logging.getLogger("card_access")
logger.setLevel(logging.INFO)

formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)

max_bytes = 1 * 1024 * 1024
file_handler = RotatingFileHandler(
    "C:/Users/700 Kalamath/.cards/card_access.log",
    maxBytes=max_bytes,
    backupCount=10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

config = Config(logger)
sentry_sdk.init(config.sentry_dsn)

cas = CardAccessSystem(config)
server_api = WebhookServerApi(config)

processor = Processor(config, server_api)
processor.start()

ingester = Ingester(config, cas, server_api, processor.command_queue)
Ejemplo n.º 39
0
TELEGRAM_LEECH_COMMAND_G = Config.TELEGRAM_LEECH_COMMAND_G
CANCEL_COMMAND_G = Config.CANCEL_COMMAND_G
GET_SIZE_G = Config.GET_SIZE_G
STATUS_COMMAND = Config.STATUS_COMMAND
SAVE_THUMBNAIL = Config.SAVE_THUMBNAIL
CLEAR_THUMBNAIL = Config.CLEAR_THUMBNAIL
UPLOAD_AS_DOC = Config.UPLOAD_AS_DOC
BOT_START_TIME = time.time()
PYTDL_COMMAND_G = Config.PYTDL_COMMAND_G
LOG_COMMAND = Config.LOG_COMMAND
CLONE_COMMAND_G = Config.CLONE_COMMAND_G

if os.path.exists("bot.log"):
    with open("bot.log", "r+") as f_d:
        f_d.truncate(0)

# the logging things
logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    datefmt="%d-%b-%y %H:%M:%S",
    handlers=[
        RotatingFileHandler("bot.log",
                            maxBytes=FREE_USER_MAX_FILE_SIZE,
                            backupCount=10),
        logging.StreamHandler()
    ])
logging.getLogger("pyrogram").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
Ejemplo n.º 40
0
from flask.ext.script import Manager
from api import app

if app.config['ENVIRONMENT'] == 'production':
    import logging
    from logging.handlers import RotatingFileHandler

    file_handler = RotatingFileHandler('logs/errors.log', maxBytes=13107200)
    file_handler.setLevel(logging.WARNING)
    app.logger.addHandler(file_handler)

manager = Manager(app)
@manager.command
def runserver():
    app.run(host='0.0.0.0', port=5002, debug=app.config['ENVIRONMENT'] != 'production')

if __name__ == '__main__':
    if app.config['ENVIRONMENT'] != 'production':
        manager.run()
    else:
        app.run()
Ejemplo n.º 41
0
        pytz.utc).isoformat()
    event_dict['level'] = level

    if session:
        event_dict['session_id'] = session.get('session_id')

    if request:
        try:
            event_dict['ip_address'] = request.headers[
                'X-Forwarded-For'].split(',')[0].strip()
        except:
            event_dict['ip_address'] = 'unknown'

    return event_dict


# Add a handler to write log messages to a file
if app.config.get('LOG_FILENAME'):
    file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
                                       maxBytes=app.config['LOG_MAXBYTES'],
                                       backupCount=app.config['LOG_BACKUPS'],
                                       mode='a',
                                       encoding='utf-8')
    file_handler.setLevel(logging.DEBUG)
    app.logger.addHandler(file_handler)

# Wrap the application logger with structlog to format the output
logger = wrap_logger(app.logger,
                     processors=[add_fields,
                                 JSONRenderer(indent=None)])
Ejemplo n.º 42
0
app = Flask(__name__)
app.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#'
socketio = SocketIO(app)
clientsSSImap = {}
ClientLanguageMap = {}
reverseclientsSSImap = {}
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
english_bot = ChatBot("Chatterbot", storage_adapter="chatterbot.storage.SQLStorageAdapter")
trainer = ChatterBotCorpusTrainer(english_bot)
trainer.train("chatterbot.corpus.english.conversations")

if app.debug is True:   
    import logging
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('python.log', maxBytes=1024 * 1024 * 100, backupCount=20)
    file_handler.setLevel(logging.ERROR)
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    file_handler.setFormatter(formatter)
    app.logger.addHandler(file_handler)


@app.route('/')
def sessions():
    return render_template('logonpage.html')

@app.route('/errorpage')
def errorpae():
    return render_template('errorpage.html')

@app.route('/ChatApp/<UUID>')
Ejemplo n.º 43
0
# Import Logging
import logging
from logging.handlers import RotatingFileHandler

# WSGI
app = Flask(__name__)

# Ucitaj konfiguracije iz fajla config.py
app.config.from_object('config_local')

# Session
Session(app)

# Logovanje
handler = RotatingFileHandler(app.config['LOG_FILE'], maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
app.logger.addHandler(handler)
logger = app.logger

# Baza podataka
db = SQLAlchemy(app)

# SimpleCache
cache = SimpleCache()

# Stranica za gresku 404
@app.errorhandler(404)
def not_found(error):
Ejemplo n.º 44
0
#!usr/bin/env python
# -*- coding: utf-8 -*-

# from gevent import monkey

# monkey.patch_all()

import logging
from logging.handlers import RotatingFileHandler

from gevent.wsgi import WSGIServer

from application import create_app

# Failed to find application object 'application' in 'wsgi'
application = create_app()

if __name__ == '__main__':
    formatter = logging.Formatter(
        "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
    handler = RotatingFileHandler(
        'app.log', maxBytes=10000, backupCount=1)
    handler.setLevel(logging.WARNING)
    handler.setFormatter(formatter)
    application.logger.addHandler(handler)

    http_server = WSGIServer(('', 8080), application)
    http_server.serve_forever()
Ejemplo n.º 45
0
import urllib
import os
import bottle
import sys
import traceback
import logging
from logging.handlers import RotatingFileHandler

# setup logging
# Generate a default rotating file log handler and stream handler
logFileName = 'server.log'
fhFormatter = logging.Formatter('%(asctime)-25s %(name)-25s ' +
                                ' %(levelname)-7s %(message)s')
rfh = RotatingFileHandler(logFileName,
                          mode='a',
                          maxBytes=26214400,
                          backupCount=2,
                          encoding=None,
                          delay=True)
rfh.setFormatter(fhFormatter)

logger = logging.getLogger("server")
logger.addHandler(rfh)
logger.setLevel(logging.DEBUG)


def do_monitor():
    try:
        exception = sys.exc_info()[1]
        stack = traceback.format_exc()

        if exception is not None:
Ejemplo n.º 46
0
# Change Accordingly While Deploying To A VPS
APP_ID = int(os.environ.get("APP_ID"))

API_HASH = os.environ.get("API_HASH")

BOT_TOKEN = os.environ.get("BOT_TOKEN")

DB_URI = os.environ.get("DB_URI")

USER_SESSION = os.environ.get("USER_SESSION")

VERIFY = {}

logging.basicConfig(
    level=logging.INFO,
    format="[%(asctime)s - %(levelname)s] - %(name)s - %(message)s",
    datefmt='%d-%b-%y %H:%M:%S',
    handlers=[
        RotatingFileHandler("autofilterbot.txt",
                            maxBytes=50000000,
                            backupCount=10),
        logging.StreamHandler()
    ])
logging.getLogger("pyrogram").setLevel(logging.WARNING)

start_uptime = time.time()


def LOGGER(name: str) -> logging.Logger:
    return logging.getLogger(name)
Ejemplo n.º 47
0
import dill
import pandas as pd
import os
import flask
import logging
from logging.handlers import RotatingFileHandler
from time import strftime

dill._dill._reverse_typemap['ClassType'] = type

# initialize our Flask application and the model
app = flask.Flask(__name__)
model = None

handler = RotatingFileHandler(filename='app.log',
                              maxBytes=100000,
                              backupCount=10)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(handler)


def load_model(model_path):
    # load the pre-trained model
    global model
    with open(model_path, 'rb') as f:
        model = dill.load(f)
    print(model)


modelpath = "/usr/src/app/models/logreg_pipeline.dill"
Ejemplo n.º 48
0
def handle_websocket():
    logger.info("Handling websocket")
    client = None

    def myEventCallback(event):
        try:
            if wsock:
                wsock.send(json.dumps(event.data))
        except WebSocketError as e:
            logger.error("WebSocket error in callback: %s" % str(e))
            # ignore this and let any Exception in receive() terminate the loop

    wsock = request.environ.get('wsgi.websocket')
    if not wsock:
        abort(400, 'Expected WebSocket request.')

    try:
        message = wsock.receive()
        if message is None:
            abort(400, 'No data or non UTF-8 data received over WebSocket')

        data = json.loads(message)
        pin = data["pin"]

        doc = cloudantDb.document(urllib.quote(data["email"]))
        response = doc.get().result(10)
        if response.status_code != 200:
            logger.error("User not registered: %s" % data["email"])
            wsock.close()
        else:
            document = response.json()
            print document

            if str(pin) != str(document["pin"]):
                logger.error("PIN for %s does not match (%s != %s)" %
                             (data["email"], pin, document["pin"]))
                wsock.close()
            else:
                deviceId = str(document['device']["id"])
                deviceType = str(document['device']["type"])
                options = {
                    "org": applicationOptions['org'],
                    "id": str(uuid.uuid4()),
                    "auth-method": applicationOptions['auth-method'],
                    "auth-key": applicationOptions['auth-key'],
                    "auth-token": applicationOptions['auth-token']
                }
                try:
                    clientsLogFileName = "device." + data["email"] + ".log"
                    fhFormatter = logging.Formatter(
                        '%(asctime)-25s %(name)-25s ' +
                        ' %(levelname)-7s %(message)s')
                    clientsLogHandler = RotatingFileHandler(clientsLogFileName,
                                                            mode='a',
                                                            maxBytes=102400,
                                                            backupCount=2,
                                                            encoding=None,
                                                            delay=True)
                    clientsLogHandler.setFormatter(fhFormatter)
                    logger.info("Using log file %s" % clientsLogFileName)
                    deviceLogger = logging.getLogger("device.%s" %
                                                     data["email"])
                    deviceLogger.propagate = False
                    deviceLogger.addHandler(clientsLogHandler)
                    deviceLogger.setLevel(logging.DEBUG)

                    client = ibmiotf.application.Client(
                        options, logHandlers=[clientsLogHandler])
                    client.logger.propagate = False

                    client.connect()
                    client.deviceEventCallback = myEventCallback
                    client.subscribeToDeviceEvents(deviceType, deviceId, "+")
                except ibmiotf.ConnectionException as e:
                    # We've been unable to do the initial connect. In this case, we'll terminate the socket to trigger the client to try again.
                    do_monitor()
                    logger.error("Connect attempt failed: %s" % str(e))
                    deviceLogger.error("Connect attempt failed: %s" % str(e))
                    wsock.close()
                    sys.exit(1)
    except WebSocketError as e:
        logger.error("WebSocket error during subscriber setup: %s" % str(e))
        deviceLogger.error("WebSocket error during subscriber setup: %s" %
                           str(e))
    except HTTPError as e:
        logger.error("HTTPError handling websocket: %s" % str(e))
        deviceLogger.error("HTTPError handling websocket: %s" % str(e))
        raise
    except:
        do_monitor()
        logger.error("Unexpected error:", sys.exc_info()[1])
        deviceLogger.error("Unexpected error:", sys.exc_info()[1])
        sys.exit(1)
    #Send the message back
    while True:
        try:
            message = wsock.receive()
            time.sleep(1)
            #wsock.send("Your message was: %r" % message)
        except WebSocketError as e:
            # This can occur if the browser has navigated away from the page, so the best action to take is to stop.
            logger.error("WebSocket error during loop: %s" % str(e))
            deviceLogger.error("WebSocket error during loop: %s" % str(e))
            break
    # Always ensure we disconnect. Since we are using QoS0 and cleanSession=true, we don't need to worry about cleaning up old subscriptions as we go: the IoT Foundation
    # will handle this automatically.
    if client is not None:
        logger.info("Disconnecting client %s" % data["email"])
        deviceLogger.info("Disconnecting client %s" % data["email"])
        client.disconnect()
        client.logger.removeHandler(clientsLogHandler)

    if clientsLogHandler is not None:
        logger.info("Removing handler from device %s logger" % data["email"])
        deviceLogger.removeHandler(clientsLogHandler)
Ejemplo n.º 49
0
import logging
from logging.handlers import RotatingFileHandler
import os
from flask import Flask

app = Flask('devnone')

if os.environ.get('prod'):
    app.config.from_object('devnone.conf')
else:
    app.config.from_object('devnone.conf_dev')

# logging
handler = RotatingFileHandler(os.path.join(os.path.dirname(__file__), 'logs',
                                           'devnone.log'),
                              backupCount=10,
                              maxBytes=10 * 1024 * 1024)
handler.setLevel(logging.INFO)
handler.setFormatter(
    logging.Formatter(
        '{"%(levelname)s":"%(asctime)s", '
        '"%(funcName)s":%(lineno)d, "%(threadName)s":"%(message)s"}'))
app.logger.addHandler(handler)
Ejemplo n.º 50
0
    if (intent == "directions"):
        directions(request)
    elif (intent == "camera"):
        camera(request)
    elif (intent == "stop_camera"):
        stop_camera(request)

    app.logger.info(intent)
    return ''

@app.route("/", methods=['GET', 'POST'])
def dashboard():
    if request.method == 'GET': 
        app.logger.info('Serving dashboard page')
        templateData = {
            'title' : 'HELLO!',
            'time': 'time o clock' 
        }
        return render_template('dashboard.html', **templateData)
    else: 
        handle(request)            
        return 'intent handled'

if __name__ == "__main__":
    handler = RotatingFileHandler('logs/TSG.log', maxBytes=100000, backupCount=1)
    # Loger logs at info level and above
    handler.setLevel(logging.DEBUG)
    app.logger.addHandler(handler)
    app.logger.setLevel(logging.DEBUG)
    app.run(host='0.0.0.0', port=80, debug=True)
Ejemplo n.º 51
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)
    babel.init_app(app)
    app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
        if app.config['ELASTICSEARCH_URL'] else None
    app.redis = Redis.from_url(app.config['REDIS_URL'])
    app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'], subject='Microblog Failure',
                credentials=auth, secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if app.config['LOG_TO_STDOUT']:
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            app.logger.addHandler(stream_handler)
        else:
            if not os.path.exists('logs'):
                os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/microblog.log',
                                               maxBytes=10240, backupCount=10)
            file_handler.setFormatter(logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s '
                '[in %(pathname)s:%(lineno)d]'))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('Microblog startup')

    return app
Ejemplo n.º 52
0
else:
    lvl = logging.DEBUG

handle = logging.NullHandler()

frmt = logging.Formatter(CONFIG.get('logformat', '%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s'))
handle.setFormatter(frmt)
LOG.addHandler(handle)

# CONSOLE
stream_handle = logging.StreamHandler()
stream_handle.setFormatter(frmt)
LOG.addHandler(stream_handle)

handle.setFormatter(frmt)
LOG.addHandler(handle)

# FILE
rfh = RotatingFileHandler(LOG_FILE, 'a', 512000, 3)
rfh.setFormatter(frmt)
LOG.addHandler(rfh)

LOG.setLevel(lvl)

# Disable some logging..
logging.getLogger("plexapi").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)


Ejemplo n.º 53
0
# Main

if __name__ == "__main__":

    # Set default encoding for python 2.x (no need in python 3.x)
    if sys.version_info[0] < 3:
        reload(sys)
        sys.setdefaultencoding("utf-8")

    # Set logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    if not os.path.isdir(LOG_DIR):
        os.mkdir(LOG_DIR, 0o755)
    log_handler = RotatingFileHandler("{0}/{1}".format(LOG_DIR, LOG_FILE),
                                      maxBytes=10485760,
                                      backupCount=10,
                                      encoding="utf-8")
    os.chmod("{0}/{1}".format(LOG_DIR, LOG_FILE), 0o600)
    log_handler.setLevel(logging.DEBUG)
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.ERROR)
    formatter = logging.Formatter(
        fmt=
        '%(asctime)s %(filename)s %(name)s %(process)d/%(threadName)s %(levelname)s: %(message)s',
        datefmt="%Y-%m-%d %H:%M:%S %Z")
    log_handler.setFormatter(formatter)
    console_handler.setFormatter(formatter)
    logger.addHandler(log_handler)
    logger.addHandler(console_handler)

    # Set parser and parse args
Ejemplo n.º 54
0
    def parse_args(self, gen_be=True):
        '''
        Parse the command line arguments and setup neon
        runtime environment accordingly

        Arguments:
            gen_be (bool): if False, the arg parser will not
                           generate the backend

        Returns:
            namespace: contains the parsed arguments as attributes
        '''
        args = super(NeonArgparser, self).parse_args()
        err_msg = None  # used for relaying exception to logger

        # set up the logging
        # max thresh is 50 (critical only), min is 10 (debug or higher)
        try:
            log_thresh = max(10, 40 - args.verbose*10)
        except (AttributeError, TypeError):
            # if defaults are not set or not -v given
            # for latter will get type error
            log_thresh = 40
        args.log_thresh = log_thresh

        # logging formater
        fmtr = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        # get the parent logger for neon
        main_logger = logging.getLogger('neon')
        main_logger.setLevel(log_thresh)

        # setup a console stderr log handler
        stderrlog = logging.StreamHandler()
        stderrlog.setFormatter(fmtr)

        if args.logfile:
            # add log to file as well
            filelog = RotatingFileHandler(filename=args.logfile, mode='w',
                                          maxBytes=10000000, backupCount=5)
            filelog.setFormatter(fmtr)
            filelog.setLevel(log_thresh)
            main_logger.addHandler(filelog)

            # if a log file is specified and progress bar displayed,
            # log only errors to console.
            if args.no_progress_bar:
                stderrlog.setLevel(log_thresh)
            else:
                stderrlog.setLevel(logging.ERROR)
        else:
            stderrlog.setLevel(log_thresh)

        # add this handler instead
        main_logger.propagate = False
        main_logger.addHandler(stderrlog)

        # need to write out float otherwise numpy
        # generates type in bytes not bits (f16 == 128 bits)
        args.datatype = 'float' + args.datatype[1:]
        args.datatype = np.dtype(args.datatype).type

        # invert no_progress_bar meaning and store in args.progress_bar
        args.progress_bar = not args.no_progress_bar

        if args.backend == 'cpu' and args.rounding > 0:
            err_msg = 'CPU backend does not support stochastic rounding'
            logger.exception(err_msg)
            raise NotImplementedError(err_msg)

        # done up front to avoid losing data due to incorrect path
        if args.save_path:
            savedir = os.path.dirname(os.path.abspath(args.save_path))
            if not os.access(savedir, os.R_OK | os.W_OK):
                err_msg = 'Can not write to save_path dir %s' % savedir
            if os.path.exists(args.save_path):
                logger.warning('save file %s exists, attempting to overwrite' % args.save_path)
                if not os.access(args.save_path, os.R_OK | os.W_OK):
                    err_msg = 'Can not write to save_path file %s' % args.save_path
            if err_msg:
                logger.exception(err_msg)
                raise IOError(err_msg)

        if (args.serialize > 0) and (args.save_path is None):
            args.save_path = "neon_model.pkl"
            logger.warn('No path given for model serialization, using default "%s"',
                        args.save_path)
        if (args.save_path is not None) and (args.serialize == 0):
            args.serialize = 1
            logger.warn('No schedule given for model serialization, using default %d',
                        args.serialize)

        if args.model_file:
            err_msg = None
            if not os.path.exists(args.model_file):
                err_msg = 'Model file %s not present' % args.model_file
            if not os.access(args.model_file, os.R_OK):
                err_msg = 'No read access for model file %s' % args.model_file
            if err_msg:
                logger.exception(err_msg)
                raise IOError(err_msg)

        # extended parsers may need to generate backend after argparsing
        if gen_be:
            # generate the backend
            gen_backend(backend=args.backend,
                        rng_seed=args.rng_seed,
                        device_id=args.device_id,
                        batch_size=args.batch_size,
                        default_dtype=args.datatype,
                        stochastic_round=args.rounding)

        # display what command line / config options were set (and from where)
        logger.info(self.format_values())

        self._PARSED = True
        self.args = args
        return args
Ejemplo n.º 55
0
import redis
from flask_login import LoginManager
from flask_marshmallow import Marshmallow
from webargs.flaskparser import FlaskParser
from flask_jwt_extended import JWTManager
from logging.handlers import RotatingFileHandler
from flask_sqlalchemy import SQLAlchemy

parser = FlaskParser()
jwt = JWTManager()
app_log_handler = RotatingFileHandler('logs/app.log',
                                      maxBytes=1000000,
                                      backupCount=30)
red = redis.StrictRedis(host='localhost',
                        port=6379,
                        password='******',
                        decode_responses=True)
db = SQLAlchemy()
ma = Marshmallow()
login_manager = LoginManager()
Ejemplo n.º 56
0
import os
from flask_login import LoginManager
from flask_openid import OpenID
from config import basedir

lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'

oid = OpenID(app, os.path.join(basedir, 'tmp'))

if not app.debug:
    import logging
    from logging.handlers import RotatingFileHandler

    file_handler = RotatingFileHandler('tmp/microblog.log', 'a',
                                       1 * 1024 * 1024, 10)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))

    app.logger.setLevel(logging.INFO)

    file_handler.setLevel(logging.INFO)

    app.logger.addHandler(file_handler)

    app.logger.info('microblog startup')

from flask_msearch import Search
search = Search()
Ejemplo n.º 57
0
def create_app(config_filename="config.development.Config",
               app_name=None,
               register_blueprints=True):
    # App configuration
    app = Flask(app_name or __name__)
    app_settings = os.getenv("APP_SETTINGS", config_filename)
    print(f" * Loading config: '{app_settings}'")
    try:
        cfg = import_string(app_settings)()
    except ImportError:
        print(" *** Cannot import config ***")
        cfg = import_string("config.config.BaseConfig")
        print(" *** Default config loaded, expect problems ***")
    if hasattr(cfg, "post_load"):
        print(" *** Doing some magic")
        cfg.post_load()
    app.config.from_object(cfg)

    app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)

    Bootstrap(app)

    app.jinja_env.add_extension("jinja2.ext.with_")
    app.jinja_env.add_extension("jinja2.ext.do")
    app.jinja_env.globals.update(is_admin=is_admin)

    if HAS_SENTRY:
        sentry_sdk.init(
            app.config["SENTRY_DSN"],
            integrations=[SentryFlaskIntegration(),
                          SentryCeleryIntegration()],
            release=f"{VERSION} ({GIT_VERSION})",
        )
        print(" * Sentry Flask/Celery support activated")
        print(" * Sentry DSN: %s" % app.config["SENTRY_DSN"])

    if app.debug:
        app.jinja_env.auto_reload = True
        logging.basicConfig(level=logging.DEBUG)

    # Logging
    if not app.debug:
        formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s "
                                      "[in %(pathname)s:%(lineno)d]")
        file_handler = RotatingFileHandler("%s/errors_app.log" % os.getcwd(),
                                           "a", 1000000, 1)
        file_handler.setLevel(logging.INFO)
        file_handler.setFormatter(formatter)
        app.logger.addHandler(file_handler)

    CORS(app, origins=["*"])

    if app.debug:
        logging.getLogger("flask_cors.extension").level = logging.DEBUG

    mail = Mail(app)  # noqa: F841
    migrate = Migrate(app, db)  # noqa: F841 lgtm [py/unused-local-variable]
    babel = Babel(app)  # noqa: F841
    app.babel = babel

    template = {
        "swagger": "2.0",
        "info": {
            "title": "reel2bits API",
            "description": "API instance",
            "version": VERSION
        },
        "host": app.config["AP_DOMAIN"],
        "basePath": "/",
        "schemes": ["https"],
        "securityDefinitions": {
            "OAuth2": {
                "type": "oauth2",
                "flows": {
                    "authorizationCode": {
                        "authorizationUrl":
                        f"https://{app.config['AP_DOMAIN']}/oauth/authorize",
                        "tokenUrl":
                        f"https://{app.config['AP_DOMAIN']}/oauth/token",
                        "scopes": {
                            "read": "Grants read access",
                            "write": "Grants write access",
                            "admin": "Grants admin operations",
                        },
                    }
                },
            }
        },
        "consumes": ["application/json", "application/jrd+json"],
        "produces": ["application/json", "application/jrd+json"],
    }

    db.init_app(app)

    # ActivityPub backend
    back = Reel2BitsBackend()
    ap.use_backend(back)

    # Oauth
    config_oauth(app)

    # Setup Flask-Security
    security = Security(
        app, user_datastore)  # noqa: F841 lgtm [py/unused-local-variable]

    @FlaskSecuritySignals.password_reset.connect_via(app)
    @FlaskSecuritySignals.password_changed.connect_via(app)
    def log_password_reset(sender, user):
        if not user:
            return
        add_user_log(user.id, user.id, "user", "info",
                     "Your password has been changed !")

    @FlaskSecuritySignals.reset_password_instructions_sent.connect_via(app)
    def log_reset_password_instr(sender, user, token):
        if not user:
            return
        add_user_log(user.id, user.id, "user", "info",
                     "Password reset instructions sent.")

    @FlaskSecuritySignals.user_registered.connect_via(app)
    def create_actor_for_registered_user(app, user, confirm_token):
        if not user:

            return
        actor = create_actor(user)
        actor.user = user
        actor.user_id = user.id
        db.session.add(actor)
        db.session.commit()

    @babel.localeselector
    def get_locale():
        # if a user is logged in, use the locale from the user settings
        identity = getattr(g, "identity", None)
        if identity is not None and identity.id:
            return identity.user.locale
        # otherwise try to guess the language from the user accept
        # header the browser transmits.  We support fr/en in this
        # example.  The best match wins.
        return request.accept_languages.best_match(AVAILABLE_LOCALES)

    @babel.timezoneselector
    def get_timezone():
        identity = getattr(g, "identity", None)
        if identity is not None and identity.id:
            return identity.user.timezone

    @app.before_request
    def before_request():
        _config = Config.query.first()
        if not _config:
            flash(gettext("Config not found"), "error")

        cfg = {
            "REEL2BITS_VERSION_VER": VERSION,
            "REEL2BITS_VERSION_GIT": GIT_VERSION,
            "app_name": _config.app_name,
            "app_description": _config.app_description,
        }
        if GIT_VERSION:
            cfg["REEL2BITS_VERSION"] = "{0}-{1}".format(VERSION, GIT_VERSION)
        else:
            cfg["REEL2BITS_VERSION"] = VERSION

        g.cfg = cfg

    @app.errorhandler(InvalidUsage)
    def handle_invalid_usage(error):
        response = jsonify(error.to_dict())
        response.status_code = error.status_code
        return response

    # Tracks files upload set
    sounds = UploadSet("sounds", AUDIO)
    configure_uploads(app, sounds)

    # Album artwork upload set
    artworkalbums = UploadSet("artworkalbums",
                              Reel2bitsDefaults.artwork_extensions_allowed)
    configure_uploads(app, artworkalbums)

    # Track artwork upload set
    artworksounds = UploadSet("artworksounds",
                              Reel2bitsDefaults.artwork_extensions_allowed)
    configure_uploads(app, artworksounds)

    # User avatars
    avatars = UploadSet("avatars", Reel2bitsDefaults.avatar_extensions_allowed)
    configure_uploads(app, avatars)

    # Total max size upload for the whole app
    patch_request_class(app, app.config["UPLOAD_TRACK_MAX_SIZE"])

    app.flake_id = FlakeId()

    if register_blueprints:
        from controllers.main import bp_main

        app.register_blueprint(bp_main)

        from controllers.admin import bp_admin

        app.register_blueprint(bp_admin)

        # ActivityPub
        from controllers.api.v1.well_known import bp_wellknown

        app.register_blueprint(bp_wellknown)

        from controllers.api.v1.nodeinfo import bp_nodeinfo

        app.register_blueprint(bp_nodeinfo)

        from controllers.api.v1.ap import bp_ap

        # Feeds
        from controllers.feeds import bp_feeds

        app.register_blueprint(bp_feeds)

        # API
        app.register_blueprint(bp_ap)

        from controllers.api.v1.auth import bp_api_v1_auth

        app.register_blueprint(bp_api_v1_auth)

        from controllers.api.v1.accounts import bp_api_v1_accounts

        app.register_blueprint(bp_api_v1_accounts)

        from controllers.api.v1.timelines import bp_api_v1_timelines

        app.register_blueprint(bp_api_v1_timelines)

        from controllers.api.v1.notifications import bp_api_v1_notifications

        app.register_blueprint(bp_api_v1_notifications)

        from controllers.api.tracks import bp_api_tracks

        app.register_blueprint(bp_api_tracks)

        from controllers.api.albums import bp_api_albums

        app.register_blueprint(bp_api_albums)

        from controllers.api.account import bp_api_account

        app.register_blueprint(bp_api_account)

        from controllers.api.reel2bits import bp_api_reel2bits

        app.register_blueprint(bp_api_reel2bits)

        # Pleroma API
        from controllers.api.pleroma_admin import bp_api_pleroma_admin

        app.register_blueprint(bp_api_pleroma_admin)

        # OEmbed
        from controllers.api.oembed import bp_api_oembed

        app.register_blueprint(bp_api_oembed)

        # Iframe
        from controllers.api.embed import bp_api_embed

        app.register_blueprint(bp_api_embed)

        swagger = Swagger(
            app,
            template=template)  # noqa: F841 lgtm [py/unused-local-variable]

        # SPA catchalls for meta tags
        from controllers.spa import bp_spa

        app.register_blueprint(bp_spa)

    @app.route("/uploads/<string:thing>/<path:stuff>", methods=["GET"])
    @cross_origin(origins="*",
                  methods=["GET", "HEAD", "OPTIONS"],
                  expose_headers="content-length",
                  send_wildcard=True)
    def get_uploads_stuff(thing, stuff):
        if app.testing or app.debug:
            directory = safe_join(app.config["UPLOADS_DEFAULT_DEST"], thing)
            app.logger.debug(f"serving {stuff} from {directory}")
            return send_from_directory(directory, stuff, as_attachment=True)
        else:
            app.logger.debug(f"X-Accel-Redirect serving {stuff}")
            resp = Response("")
            resp.headers[
                "Content-Disposition"] = f"attachment; filename={stuff}"
            resp.headers[
                "X-Accel-Redirect"] = f"/_protected/media/{thing}/{stuff}"
            resp.headers[
                "Content-Type"] = ""  # empty it so Nginx will guess it correctly
            return resp

    def render_tags(tags):
        """
        Given a dict like {'tag': 'meta', 'hello': 'world'}
        return a html ready tag like
        <meta hello="world" />
        """
        for tag in tags:

            yield "<{tag} {attrs} />".format(
                tag=tag.pop("tag"),
                attrs=" ".join([
                    '{}="{}"'.format(a, html.escape(str(v)))
                    for a, v in sorted(tag.items()) if v
                ]),
            )

    @app.errorhandler(404)
    def page_not_found(msg):
        excluded = ["/api", "/.well-known", "/feeds", "/oauth/authorize"]
        if any([request.path.startswith(m) for m in excluded]):
            return jsonify({"error": "page not found"}), 404

        html = get_spa_html(app.config["REEL2BITS_SPA_HTML"])
        head, tail = html.split("</head>", 1)

        request_tags = get_request_head_tags(request)

        default_tags = get_default_head_tags(request.path)
        unique_attributes = ["name", "property"]

        final_tags = request_tags
        skip = []

        for t in final_tags:
            for attr in unique_attributes:
                if attr in t:
                    skip.append(t[attr])
        for t in default_tags:
            existing = False
            for attr in unique_attributes:
                if t.get(attr) in skip:
                    existing = True
                    break
            if not existing:
                final_tags.append(t)

        head += "\n" + "\n".join(render_tags(final_tags)) + "\n</head>"
        return head + tail

    @app.errorhandler(403)
    def err_forbidden(msg):
        if request.path.startswith("/api/"):
            return jsonify({"error": "access forbidden"}), 403
        pcfg = {
            "title": gettext("Whoops, something failed."),
            "error": 403,
            "message": gettext("Access forbidden"),
            "e": msg,
        }
        return render_template("error_page.jinja2", pcfg=pcfg), 403

    @app.errorhandler(410)
    def err_gone(msg):
        if request.path.startswith("/api/"):
            return jsonify({"error": "gone"}), 410
        pcfg = {
            "title": gettext("Whoops, something failed."),
            "error": 410,
            "message": gettext("Gone"),
            "e": msg
        }
        return render_template("error_page.jinja2", pcfg=pcfg), 410

    if not app.debug:

        @app.errorhandler(500)
        def err_failed(msg):
            if request.path.startswith("/api/"):
                return jsonify({"error": "server error"}), 500
            pcfg = {
                "title": gettext("Whoops, something failed."),
                "error": 500,
                "message": gettext("Something is broken"),
                "e": msg,
            }
            return render_template("error_page.jinja2", pcfg=pcfg), 500

    @app.after_request
    def set_x_powered_by(response):
        response.headers["X-Powered-By"] = "reel2bits"
        return response

    # Register CLI commands
    app.cli.add_command(commands.db_datas)
    app.cli.add_command(commands.users)
    app.cli.add_command(commands.roles)
    app.cli.add_command(commands.tracks)
    app.cli.add_command(commands.system)

    return app
Ejemplo n.º 58
0
else:
    DryRun = False

###############################################################################

# Logging configuration
# =====================

logger = logging.getLogger()
logger.setLevel(LogLevel)

formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')

# set log into file .log
file_handler = RotatingFileHandler(CurrentLocation + '/social-pipe.log', 'a',
                                   1000000, 1)
file_handler.setLevel(LogLevel)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

# set log into output std
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LogLevel)
logger.addHandler(stream_handler)

###############################################################################

# Avoiding multiple executions
# ============================

#if path.isfile(FlagFile):
Ejemplo n.º 59
0
#     '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')

### Independent logs in file
logger = logging.getLogger(__name__)
if not logger.handlers:
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    logger.setLevel(logging.DEBUG)

    # handler_name = logging.FileHandler(__name__ + '.log')
    # handler_name.setLevel(logging.DEBUG)
    # handler_name.setFormatter(formatter)

    handler_backup = RotatingFileHandler('backup_for_scriptlog.log',
                                         maxBytes=5 * 1024 * 1024,
                                         backupCount=999)
    handler_backup.setLevel(logging.INFO)
    handler_backup.setFormatter(formatter)

    logger.addHandler(handler_backup)
    # logger.addHandler(handler_name)

### global variable
null = None
true = True
false = False


def use_jsonpointer(doc, pointer):
    def main(self):

        # import appdaemon.stacktracer
        # appdaemon.stacktracer.trace_start("/tmp/trace.html")

        # Windows does not support SIGUSR1 or SIGUSR2
        if platform.system() != "Windows":
            signal.signal(signal.SIGUSR1, self.handle_sig)
            signal.signal(signal.SIGINT, self.handle_sig)
            signal.signal(signal.SIGHUP, self.handle_sig)

        # Get command line args

        parser = argparse.ArgumentParser()

        parser.add_argument("-c",
                            "--config",
                            help="full path to config directory",
                            type=str,
                            default=None)
        parser.add_argument("-p",
                            "--pidfile",
                            help="full path to PID File",
                            default="/tmp/hapush.pid")
        parser.add_argument(
            "-t",
            "--tick",
            help="time that a tick in the schedular lasts (seconds)",
            default=1,
            type=float)
        parser.add_argument(
            "-s",
            "--starttime",
            help="start time for scheduler <YYYY-MM-DD HH:MM:SS>",
            type=str)
        parser.add_argument(
            "-e",
            "--endtime",
            help="end time for scheduler <YYYY-MM-DD HH:MM:SS>",
            type=str,
            default=None)
        parser.add_argument("-i",
                            "--interval",
                            help="multiplier for scheduler tick",
                            type=float,
                            default=1)
        parser.add_argument(
            "-D",
            "--debug",
            help="debug level",
            default="INFO",
            choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
        parser.add_argument('-v',
                            '--version',
                            action='version',
                            version='%(prog)s ' + utils.__version__)
        parser.add_argument('--profiledash',
                            help=argparse.SUPPRESS,
                            action='store_true')

        # Windows does not have Daemonize package so disallow
        if platform.system() != "Windows":
            parser.add_argument("-d",
                                "--daemon",
                                help="run as a background process",
                                action="store_true")

        args = parser.parse_args()

        config_dir = args.config

        if platform.system() != "Windows":
            from daemonize import Daemonize

        if platform.system() != "Windows":
            isdaemon = args.daemon
        else:
            isdaemon = False

        if config_dir is None:
            config_file_yaml = utils.find_path("appdaemon.yaml")
        else:
            config_file_yaml = os.path.join(config_dir, "appdaemon.yaml")

        if config_file_yaml is None:
            print(
                "FATAL: no configuration directory defined and defaults not present\n"
            )
            parser.print_help()
            sys.exit(1)

        config = None

        #
        # First locate secrets file
        #
        try:

            #
            # Initially load file to see if secret directive is present
            #
            yaml.add_constructor('!secret', utils._dummy_secret)
            with open(config_file_yaml, 'r') as yamlfd:
                config_file_contents = yamlfd.read()

            config = yaml.load(config_file_contents)

            if "secrets" in config:
                secrets_file = config["secrets"]
            else:
                secrets_file = os.path.join(os.path.dirname(config_file_yaml),
                                            "secrets.yaml")

            #
            # Read Secrets
            #
            if os.path.isfile(secrets_file):
                with open(secrets_file, 'r') as yamlfd:
                    secrets_file_contents = yamlfd.read()

                utils.secrets = yaml.load(secrets_file_contents)

            else:
                if "secrets" in config:
                    print(
                        "ERROR", "Error loading secrets file: {}".format(
                            config["secrets"]))
                    sys.exit()

            #
            # Read config file again, this time with secrets
            #
            yaml.add_constructor('!secret', utils._secret_yaml)

            with open(config_file_yaml, 'r') as yamlfd:
                config_file_contents = yamlfd.read()

            config = yaml.load(config_file_contents)

        except yaml.YAMLError as exc:
            print("ERROR", "Error loading configuration")
            if hasattr(exc, 'problem_mark'):
                if exc.context is not None:
                    print("ERROR", "parser says")
                    print("ERROR", str(exc.problem_mark))
                    print("ERROR", str(exc.problem) + " " + str(exc.context))
                else:
                    print("ERROR", "parser says")
                    print("ERROR", str(exc.problem_mark))
                    print("ERROR", str(exc.problem))
            sys.exit()

        if "appdaemon" not in config:
            print("ERROR",
                  "no 'appdaemon' section in {}".format(config_file_yaml))
            sys.exit()

        appdaemon = config["appdaemon"]
        if "disable_apps" not in appdaemon:
            appdaemon["disable_apps"] = False

        appdaemon["config_dir"] = config_dir
        appdaemon["config_file"] = config_file_yaml
        appdaemon["app_config_file"] = os.path.join(
            os.path.dirname(config_file_yaml), "apps.yaml")

        if args.starttime is not None:
            appdaemon["starttime"] = args.starttime

        if args.endtime is not None:
            appdaemon["endtime"] = args.endtime

        appdaemon["tick"] = args.tick
        appdaemon["interval"] = args.interval
        appdaemon["loglevel"] = args.debug

        appdaemon["config_dir"] = os.path.dirname(config_file_yaml)

        appdaemon["stop_function"] = self.stop

        if "hadashboard" in config:
            hadashboard = config["hadashboard"]
            hadashboard["profile_dashboard"] = args.profiledash
            hadashboard["config_dir"] = config_dir
            hadashboard["config_file"] = config_file_yaml
            hadashboard["config_dir"] = os.path.dirname(config_file_yaml)
            if args.profiledash:
                hadashboard["profile_dashboard"] = True

            if "dashboard" not in hadashboard:
                hadashboard["dashboard"] = True

        else:
            hadashboard = {"dashboard": False}

        if "log" not in config:
            logfile = "STDOUT"
            errorfile = "STDERR"
            diagfile = "STDOUT"
            log_size = 1000000
            log_generations = 3
            accessfile = None
        else:
            logfile = config['log'].get("logfile", "STDOUT")
            errorfile = config['log'].get("errorfile", "STDERR")
            diagfile = config['log'].get("diagfile", "NONE")
            if diagfile == "NONE":
                diagfile = logfile
            log_size = config['log'].get("log_size", 1000000)
            log_generations = config['log'].get("log_generations", 3)
            accessfile = config['log'].get("accessfile")

        if isdaemon and (logfile == "STDOUT" or errorfile == "STDERR"
                         or logfile == "STDERR" or errorfile == "STDOUT"):
            print("ERROR", "STDOUT and STDERR not allowed with -d")
            sys.exit()

        # Setup Logging

        self.logger = logging.getLogger("log1")
        numeric_level = getattr(logging, args.debug, None)
        self.logger.setLevel(numeric_level)
        self.logger.propagate = False
        # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

        # Send to file if we are daemonizing, else send to console

        fh = None
        if logfile != "STDOUT":
            fh = RotatingFileHandler(logfile,
                                     maxBytes=log_size,
                                     backupCount=log_generations)
            fh.setLevel(numeric_level)
            # fh.setFormatter(formatter)
            self.logger.addHandler(fh)
        else:
            # Default for StreamHandler() is sys.stderr
            ch = logging.StreamHandler(stream=sys.stdout)
            ch.setLevel(numeric_level)
            # ch.setFormatter(formatter)
            self.logger.addHandler(ch)

        # Setup compile output

        self.error = logging.getLogger("log2")
        numeric_level = getattr(logging, args.debug, None)
        self.error.setLevel(numeric_level)
        self.error.propagate = False
        # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

        if errorfile != "STDERR":
            efh = RotatingFileHandler(errorfile,
                                      maxBytes=log_size,
                                      backupCount=log_generations)
        else:
            efh = logging.StreamHandler()

        efh.setLevel(numeric_level)
        # efh.setFormatter(formatter)
        self.error.addHandler(efh)

        # setup diag output

        self.diag = logging.getLogger("log3")
        numeric_level = getattr(logging, args.debug, None)
        self.diag.setLevel(numeric_level)
        self.diag.propagate = False
        # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

        if diagfile != "STDOUT":
            dfh = RotatingFileHandler(diagfile,
                                      maxBytes=log_size,
                                      backupCount=log_generations)
        else:
            dfh = logging.StreamHandler()

        dfh.setLevel(numeric_level)
        # dfh.setFormatter(formatter)
        self.diag.addHandler(dfh)

        # Setup dash output
        if accessfile is not None:
            self.access = logging.getLogger("log4")
            numeric_level = getattr(logging, args.debug, None)
            self.access.setLevel(numeric_level)
            self.access.propagate = False
            # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
            efh = RotatingFileHandler(config['log'].get("accessfile"),
                                      maxBytes=log_size,
                                      backupCount=log_generations)

            efh.setLevel(numeric_level)
            # efh.setFormatter(formatter)
            self.access.addHandler(efh)
        else:
            self.access = self.logger

        # Startup message

        self.log(self.logger, "INFO",
                 "AppDaemon Version {} starting".format(utils.__version__))
        self.log(self.logger, "INFO",
                 "Configuration read from: {}".format(config_file_yaml))
        self.log(self.logger, "DEBUG",
                 "AppDaemon Section: {}".format(config.get("AppDaemon")))
        self.log(self.logger, "DEBUG",
                 "HADashboard Section: {}".format(config.get("HADashboard")))

        if isdaemon:
            keep_fds = [fh.stream.fileno(), efh.stream.fileno()]
            pid = args.pidfile
            daemon = Daemonize(app="appdaemon",
                               pid=pid,
                               action=self.run,
                               keep_fds=keep_fds)
            daemon.start()
            while True:
                time.sleep(1)
        else:
            self.run(appdaemon, hadashboard)