コード例 #1
0
ファイル: autotune.py プロジェクト: maggishaggy/scVI
def progress_listener(progress_queue, logging_queue):
    """Listens to workers when they finish a job and logs progress.
    Workers put in the progress_queue when they finish a job
    and when they do this function sends a log to the progress logger.
    """
    # write all logs to queue
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)
    queue_handler = QueueHandler(logging_queue)
    queue_handler.setLevel(logging.DEBUG)
    root_logger.addHandler(queue_handler)
    logger.debug("Listener listening...")

    progress_logger = logging.getLogger("progress_logger")

    i = 0
    while True:
        # get job done signal
        progress_queue.get()
        i += 1
        logger.info("{i} job.s done".format(i=i))
        # update progress bar through ProgressHandler
        progress_logger.info(None)
        if cleanup_event.is_set():
            break
コード例 #2
0
def get_logger(filename: Optional[str] = None, q: Optional[Queue] = None) -> Logger:
    """
    Retrieves the logger for the current process for logging to the log file

    If no filename is provided, the logger for the current process is assumed to already have
    handlers registered, and will be returned.

    If a filename is provided an the logger has no handlers, a handler will be created and registered

    Args:
        filename: The name of the file to log to
        q: The queue used to pass messages if the collector is running in debug mode

    Returns:
        A logger that can be used to log messages
    """
    # Get the logger for the current process id
    logger = logging.getLogger(str(os.getpid()))
    # If the logger does not have any handlers registered i.e. on collector start
    if not logger.hasHandlers() and filename is not None:
        # Create a handler and register it to the logger
        formatter = logging.Formatter(fmt="%(asctime)s %(filename)-25s %(levelname)-5s %(message)s",
                                      datefmt="%Y-%m-%d %I:%H:%M")
        handler = logging.FileHandler(f"LOGS/{filename}.log")
        handler.setFormatter(formatter)
        logger.setLevel(logging.DEBUG)
        logger.addHandler(handler)
        if q is not None:
            q_handler = QueueHandler(q)
            q_handler.setLevel(logging.DEBUG)
            logger.addHandler(q_handler)
    return logger
コード例 #3
0
    def configure_logging(self):
        """Configure process logger."""
        self._root_logger = logging.getLogger()
        self._root_logger.setLevel(logging.INFO)

        _qh = QueueHandler(self._queue)
        _qh.setLevel(logging.INFO)
        self._root_logger.addHandler(_qh)
コード例 #4
0
ファイル: logging.py プロジェクト: hwwhww/py-evm
def setup_queue_logging(log_queue: 'Queue[str]', level: int) -> None:
    queue_handler = QueueHandler(log_queue)
    queue_handler.setLevel(level)

    logger = cast(TraceLogger, logging.getLogger())
    logger.addHandler(queue_handler)
    logger.setLevel(level)

    logger.debug('Logging initialized: PID=%s', os.getpid())
コード例 #5
0
ファイル: logging.py プロジェクト: renaynay/trinity
def setup_queue_logging(log_queue: 'Queue[str]', level: int) -> None:
    queue_handler = QueueHandler(log_queue)
    queue_handler.setLevel(level)

    logger = get_extended_debug_logger('')
    logger.addHandler(queue_handler)
    logger.setLevel(level)

    logger.debug('Logging initialized: PID=%s', os.getpid())
コード例 #6
0
ファイル: autotune.py プロジェクト: maggishaggy/scVI
def hyperopt_worker(
    progress_queue: multiprocessing.Queue,
    logging_queue: multiprocessing.Queue,
    exp_key: str,
    workdir: str = ".",
    gpu: bool = True,
    hw_id: str = None,
    poll_interval: float = 1.0,
    reserve_timeout: float = 30.0,
    mongo_port_address: str = "localhost:1234/scvi_db",
):
    """Launches a ``hyperopt`` ``MongoWorker`` which runs jobs until ``ReserveTimeout`` is raised.

    :param progress_queue: Queue in which to put None when a job is done.
    :param logging_queue: Queue to send logs to using a ``QueueHandler``.
    :param exp_key: This key is used by hyperopt as a suffix to the part of the MongoDb
        which corresponds to the current experiment. In particular, it has to be passed to ``MongoWorker``.
    :param workdir:
    :param gpu: If ``True`` means a GPU is to be used.
    :param hw_id: Id of the GPU to use. set via env variable ``CUDA_VISIBLE_DEVICES``.
    :param poll_interval: Time to wait between attempts to reserve a job.
    :param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
        before throwing a ``ReserveTimeout`` Exception.
    :param mongo_port_address: Addres to the running MongoDb service.
    """
    # write all logs to queue
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)
    queue_handler = QueueHandler(logging_queue)
    queue_handler.setLevel(logging.DEBUG)
    root_logger.addHandler(queue_handler)
    logger.debug("Worker working...")

    os.environ["CUDA_VISIBLE_DEVICES"] = hw_id if gpu else str()

    # FIXME is this stil necessary?
    sys.path.append(".")

    mjobs = MongoJobs.new_from_connection_str(
        os.path.join(as_mongo_str(mongo_port_address), "jobs"))
    mworker = MongoWorker(mjobs,
                          float(poll_interval),
                          workdir=workdir,
                          exp_key=exp_key)

    while True:
        # FIXME we don't protect ourselves from memory leaks, bad cleanup, etc.
        try:
            mworker.run_one(reserve_timeout=float(reserve_timeout))
            progress_queue.put(None)
        except ReserveTimeout:
            logger.debug(
                "Caught ReserveTimeout. "
                "Exiting after failing to reserve job for {time} seconds.".
                format(time=reserve_timeout))
            break
コード例 #7
0
ファイル: fanworkers.py プロジェクト: DarrenBM/test
 def run(self):
     qh = QueueHandler(self.errq)
     qh.setLevel(logging.INFO)
     
     self.log = logging.getLogger('SW-{}'.format(self.pid))
     self.log.setLevel(logging.INFO)
     self.log.propagate = False
     self.log.addHandler(qh)
     self.log.info('{0} ({1}) now running'.format(self.name, self.pid)) # @DEBUG
     self.stream()
コード例 #8
0
ファイル: naive.py プロジェクト: yukikpc/game-of-life
    def run(cls,
            board_definition_path: Path,
            history: int,
            log_level: int = logging.INFO) -> int:
        handler = QueueHandler(log_queue)
        handler.setLevel(log_level)

        root_logger = logging.getLogger()
        root_logger.setLevel(log_level)
        root_logger.removeHandler(root_logger.handlers[0])
        root_logger.addHandler(handler)

        logger.debug("Enter Main.run")
        logger.debug(f"|-- board_definition_path: {board_definition_path}")
        logger.debug(f"|-- history: {history}, log_level: {log_level}")

        controller_strategy = common.LifeCycleControllerConcreateStrategy()
        presenter_strategy = common.LifeCyclePresenterConcreateStrategy()
        life_cycle_flow = life_cycle.LifeCycleFlow(controller_strategy,
                                                   presenter_strategy)
        controller = life_cycle_flow.controller

        try:
            board_definition_repository = gateways.BoardDefinitionRepository(
                board_definitions.CSVIO(board_definition_path))
            board_vo = board_definition_repository.read()
        except FileNotFoundError:
            print(f"Definition file is not found: {board_definition_path}",
                  file=sys.stderr)
            return 1

        view_model: Optional[common.ViewModel] = controller.create_board(
            board_vo, maxlen=history)

        post_str = "\n".join([
            ": ".join([k, v.value])
            for k, v in controller_strategy.dispatch_table.items()
        ])
        cls.__print(view_model, post_str)

        while True:
            ch = cls.__getch()

            try:
                view_model = controller.transition(ch)
                if view_model is None:
                    return 0
                else:
                    cls.__print(view_model, post_str)
            except IndexError as e:
                cls.__print(view_model, "\n".join([post_str, str(e)]))
                continue
            except ValueError:
                continue
コード例 #9
0
ファイル: logging.py プロジェクト: divyanks/py-evm-1
def setup_queue_logging(log_queue: 'Queue[str]', level: int) -> None:
    queue_handler = QueueHandler(log_queue)
    queue_handler.setLevel(logging.DEBUG)

    logger = logging.getLogger()
    logger.addHandler(queue_handler)
    logger.setLevel(logging.DEBUG)
    # These loggers generates too much DEBUG noise, drowning out the important things, so force
    # the INFO level for it until https://github.com/ethereum/py-evm/issues/806 is fixed.
    logging.getLogger('p2p.kademlia').setLevel(logging.INFO)
    logging.getLogger('p2p.discovery').setLevel(logging.INFO)
    logger.debug('Logging initialized: PID=%s', os.getpid())
コード例 #10
0
    def logger(self):
        """Configure teamcity logger.

        Returns: tc_logger
        """
        if self._tc_logger is None:
            self._tc_logger = logging.getLogger(_TC_LOGGER_NAME)
            qh = QueueHandler(queue=self._queue)
            qh.setLevel(logging.DEBUG)
            self._tc_logger.setLevel(logging.DEBUG)
            self._tc_logger.addHandler(qh)
        return self._tc_logger
コード例 #11
0
def init(app,
         use_queue=True,
         level=logging.root.level,
         request_logger_level=middleware_logger.level):
    access_token = app.config.get('SYNCHROLOG_ACCESS_TOKEN', None)
    assert bool(
        access_token), 'SYNCHROLOG_ACCESS_TOKEN app config can not be empty'

    handler = _RequestHandler(access_token)
    handler.setLevel(level)

    logger = logging.root
    logger.setLevel(level)
    if use_queue:
        queue_handler = QueueHandler(queue)
        queue_handler.setLevel(level)
        logger.addHandler(QueueHandler(queue))
        listener = QueueListener(queue, handler)
        listener.start()
    else:
        logger.addHandler(handler)

    logging.setLogRecordFactory(_build_make_record_function())
    middleware_logger.setLevel(request_logger_level)

    @app.route('/synchrolog-time')
    def synchrolog_time():
        return jsonify({'time': datetime.now().isoformat()}), 200

    @app.errorhandler(HTTPException)
    def http_exception_handler(exception):
        logger.error(
            msg='HTTP exception during web request',
            exc_info=exception,
        )
        return exception

    @app.before_request
    def before_request():
        environ = request.environ.copy()
        anonymous_id = request.cookies.get(ANONYMOUS_KEY, _generate_uuid())
        environ[ANONYMOUS_KEY] = anonymous_id
        request.environ = environ

    @app.after_request
    def after_response(response):
        anonymous_key = request.environ.get(ANONYMOUS_KEY)
        if anonymous_key is not None:
            response.set_cookie(key=ANONYMOUS_KEY, value=anonymous_key)
        message = f'"{request.method} {request.path}" {response.status_code}'
        middleware_logger.info(message)
        return response
コード例 #12
0
def _setup_logging_multiprocessing(queues: List[Queue], levels: List[int]) -> None:
    """Re-setup logging in a multiprocessing context (only needed if a start_method other than
    fork is used) by setting up QueueHandler loggers for each queue and level
    so that log messages are piped to the original loggers in the main process.
    """

    root_logger = getLogger()
    for handler in root_logger.handlers:
        root_logger.removeHandler(handler)

    root_logger.setLevel(min(levels) if len(levels) else logging.DEBUG)
    for queue, level in zip(queues, levels):
        handler = QueueHandler(queue)
        handler.setLevel(level)
        root_logger.addHandler(handler)
コード例 #13
0
ファイル: __init__.py プロジェクト: QianPeili/flask-demo
class FlaskLogger(object):

    def __init__(self):
        self.que = queue.Queue(-1)
        self.queue_handler = QueueHandler(self.que)
        self.queue_handler.setLevel(logging.DEBUG)

        file_handler = logging.FileHandler(
            '/usr/local/var/log/flask_app.log')
        self.listener = QueueListener(
            self.que, file_handler,
            respect_handler_level=logging.INFO)

    def init_app(self, app):

        app.logger.addHandler(self.queue_handler)
        self.listener.start()
コード例 #14
0
 def _add_logger(self, thread_name: str):
     """
     Defines a new logger, queueHandler and QueueListener for a new thread
     This method is only called if the thread name is not in the 'thread_names' set.
     Args:
         thread_name: The name of the thread that should be added
     """
     log_queue = queue.Queue(-1)
     queue_handler = QueueHandler(log_queue)
     queue_handler.setLevel(logging.DEBUG)
     logger = logging.getLogger(thread_name)
     logger.propagate = False
     logger.setLevel(logging.DEBUG)
     logger.addHandler(queue_handler)
     listener = QueueListener(log_queue, self.console_handler, self.file_handler, respect_handler_level=True)
     self.loggers[thread_name] = logger
     self.listeners[thread_name] = listener
     self.thread_names.add(thread_name)
コード例 #15
0
    def __init__(self, config):
        self.config = config

        self.logger_q = mp.Queue(-1)

        queue_handler = QueueHandler(self.logger_q)
        queue_handler.setLevel(logging.DEBUG)

        self.logger = setup_logging(logger_q=self.logger_q,
                                    name="MAIN",
                                    config=self.config["General"])
        self.STOP_WAIT_SECS = float(self.config["General"]["StopWaitSecs"])

        handler = logging.StreamHandler()
        handler.setLevel(logging.DEBUG)
        self.log_listener = QueueListener(self.logger_q, handler)

        self.procs = []
        self.queues = []

        self.shutdown_event = mp.Event()
        self.event_queue = self.MPQueue()
コード例 #16
0
def server_main(url, queue, token, channel, debug):
    """
    :param url: the base url on which to listen for notifications
    :param queue: the queue to feed messages into
    - get current list of all victims
    - start listening
    """
    global drive_service
    global startPageToken
    global channel_id
    startPageToken = token
    channel_id = channel
    creds = authorize()
    service = build('drive', 'v3', credentials=creds)
    drive_service = service
    httpd = HTTPServer(("localhost", 8080), NotificationHandler)
    qh = QueueHandler(queue)
    qh.setLevel(logging.DEBUG if debug else logging.INFO)
    logging.getLogger("notif_logger").addHandler(qh)
    logging.getLogger("notif_logger").setLevel(
        logging.DEBUG if debug else logging.INFO)
    httpd.serve_forever()
コード例 #17
0
def init_logger():
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)
    if platform.python_version().startswith('2'):
        # python 2
        # log at stdout
        import sys
        ch = logging.StreamHandler(sys.stdout)
    else:
        # python 3
        # log into queue
        import queue
        que = queue.Queue(-1)  # no limit on size
        from logging.handlers import QueueHandler
        ch = QueueHandler(que)

    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    root.addHandler(ch)
    yield ch
コード例 #18
0
def setup_logger() -> logging.Logger:
    # create logger
    logger = logging.getLogger('cnstream_service')
    logger.propagate = False

    # create formatter
    # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    formatter = logging.Formatter('%(levelname)s - %(message)s')

    # create console handler and set level to debug
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)

    # create queue handler and set level to info
    qh = QueueHandler(log_queue)
    qh.setLevel(logging.INFO)
    qh.setFormatter(formatter)

    # add handler to logger
    logger.addHandler(ch)
    logger.addHandler(qh)

    return logger
コード例 #19
0
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.log import enable_pretty_logging
from tornado.process import cpu_count
from create_db import dsn

q = queue.Queue(-1)
formatter = logging.Formatter(style='{',
                              datefmt='%H:%M:%S',
                              fmt='[{levelname} {name} {asctime}] {message}')
stream_hndl = logging.StreamHandler(stream=sys.stderr)
stream_hndl.setFormatter(formatter)
stream_hndl.setLevel(logging.DEBUG)
listener = QueueListener(q, stream_hndl)

queued_hndl = QueueHandler(q)
queued_hndl.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(queued_hndl)
logger.setLevel(logging.DEBUG)
listener.start()

ConnPool = ThreadedConnectionPool(1, cpu_count() * 2, dsn)
WorkresPool = ThreadPoolExecutor(cpu_count())
Clients = {}
UPLOAD_DIR = 'upload'


class MainWebSocketHandler(WebSocketHandler):
    def check_origin(self, origin):
        return True
コード例 #20
0
ファイル: fanworkers.py プロジェクト: DarrenBM/test
    def run(self):
        print("starting DB Worker")

        qh = QueueHandler(self.errq)
        f = logging.Formatter('SW formatter: %(asctime)s: %(name)s|%(processName)s|%(process)d|%(levelname)s -- %(message)s')
        qh.setFormatter(f)
        qh.setLevel(logging.INFO)
        self.log = logging.getLogger(__name__)
        self.log.setLevel(logging.INFO)
        self.log.propagate = False
        self.log.addHandler(qh)

        self.log.info('DBWorker {} starting'.format(self.pid))

        self.tweet_lookup_queue = {}
        self.tweet_text_lookup_queue = []

        while True:
            
            try:
                d = self.tweet_queue.get(block=True)
            except Empty:
                time.sleep(SLEEP_TIME)
            else:
                try:
                    code, data = d
                except ValueError:
                    self.log.exception('Code, data assignment failed with:\n{}\n'.format(d))
                else:
                    if code==TWEET_MESSAGE:
                        # Set streamsession_id from data
                        ssid = data.pop('streamsession_id', None)
                        if ssid is not None:
                            self.streamsession_id = ssid
                        
                        retries = 0
                        retry_limit = 5
                        while retries < retry_limit:
                            
                            try:
                                self.process_tweet(data)
    
                            except (IntegrityError, OperationalError) as e:
                                msg =  '\n\n' + '*'*70
                                msg += '\n\nDB ingegrity error. Retrying ({}).'.format(retries)
                                msg += 'Exception: {}\n'
                                msg += '-'*70 + '\n'
                                msg += traceback.format_exc()
                                msg += '\n\n{0}\nTweet Data:\n{1}\n{0}'.format('*'*70, json_dumps(data, indent=4))
                                
                                self.log.warning(msg)
                                retries += 1
    
                            else:
                                retries = retry_limit
    
                        # Check/dump lookup queues
                        if (len(self.tweet_lookup_queue) >=
                            self.lookup_queue_limit):
                            self.dump_tweet_lookup_queue()
                        if (len(self.tweet_text_lookup_queue) >=
                            self.lookup_queue_limit):
                            self.dump_tweet_text_lookup_queue()
                            
                        self.countq.put(1)
                        self.count += 1
    
                    elif code == START_MESSAGE: # stream started, time passed as data
                        self.log.debug('received START_MESSAGE')
                        session = get_session(self.db_path)
                        ss = session.merge(StreamSession(starttime=data))
                        self.commit_transaction(session)
                        self.streamsession_id = ss.id
                        session.close()
    
    
                    elif code == STOP_STREAM_MESSAGE: # stream stopped, time passed as data
                        self.log.debug('received STOP_STREAM_MESSAGE')
                        session = get_session(self.db_path)
                        ss = session.merge(StreamSession(id=self.streamsession_id))
                        ss.endtime=data
                        self.commit_transaction(session)
                        session.close()
    
                    elif code == STOP_MESSAGE: # process stopped by parent
                        # replace message for other workers
                        
                        self.tweet_queue.put((code, data))
                        print('stopping DB worker')
                        print('    dumping tweet lookup queue...')
                        self.dump_tweet_lookup_queue()
                        print('    DONE.')
                        print('    dumping tweet text lookup queue...')
                        self.dump_tweet_text_lookup_queue()
                        print('    DONE.')
                        print('Recording session stop time')
                        print('    DONE.')
                        break
                    
        print('{}: Process {} (id={}) finished.'.format(str(dt.now()),
                                                        self.name,
                                                        self.pid))
コード例 #21
0
        """Formats the record."""
        original = logging.Formatter.format(self, record)
        return filter_secrets(original)


_logger = logging.getLogger(__name__)
_logger.setLevel(1)
# create formatter and add it to the handlers
LOG_FORMAT = '%(asctime)s - %(threadName)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s'
formatter = SensitiveFormatter(LOG_FORMAT)

# create console handler
_ch = logging.StreamHandler()
_ch.setFormatter(formatter)
_ch.setLevel(10)

# Get Queue
_qch: Queue = Queue(-1)
_qhch = QueueHandler(_qch)
_qhch.setLevel(10)
_qlch = QueueListener(_qch, _ch)
_logger.addHandler(_qhch)
_qlch.start()

_logger.log(1, "lvl1")
_logger.debug('debug')
_logger.info('info')
_logger.warning('warning')
_logger.error('error')
_logger.critical('critical')
コード例 #22
0
ファイル: logger.py プロジェクト: tedinGH/lvyaoyu
# 设置日志格式
fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s',
                        '%Y-%m-%d %H:%M:%S')

# 添加cmd handler
cmd_handler = logging.StreamHandler(sys.stdout)
cmd_handler.setLevel(logging.DEBUG)
cmd_handler.setFormatter(fmt)

# 添加文件的handler
file_handler = logging.handlers.TimedRotatingFileHandler(
    filename="logs\\log.txt",
    encoding='utf-8',
    when="D",
    interval=1,
    backupCount=31)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(fmt)

# 添加http handler
queue_handler = QueueHandler(log_queue)
queue_handler.setLevel(logging.INFO)
queue_handler.setFormatter(fmt)

# 将handlers添加到logger中
logger.addHandler(cmd_handler)
logger.addHandler(file_handler)
logger.addHandler(queue_handler)

# logger.debug("今天天气不错")
コード例 #23
0
ファイル: __init__.py プロジェクト: cjmochrie/I-Borrow-Desk
from .routes import *
from .api import *

admin.add_view(AdminView(name='Home'))
admin.add_view(DbView(User, db.session))

# Build the database
db.create_all()

# logging
if not app.debug:
    que = queue.Queue(-1)  # no limit on size
    # Create a QueueHandler to receive logging
    queue_handler = QueueHandler(que)
    queue_handler.setLevel(logging.ERROR)

    # Create the actual mail handler
    credentials = app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']
    mail_handler = SMTPHandler(('smtp.gmail.com', '587'),
                               app.config['APP_ADDRESS'],
                               [app.config['ADMIN_ADDRESS']],
                               'stock_loan exception',
                               credentials=credentials, secure=())

    # Create a listener handler to deque things from the QueueHandler and send to the mail handler
    listener = QueueListener(que, mail_handler)
    listener.start()

    # Add the queue handler to the app
    app.logger.addHandler(queue_handler)
コード例 #24
0
from .routes import *
from .api import *

admin.add_view(AdminView(name='Home'))
admin.add_view(DbView(User, db.session))

# Build the database
db.create_all()

# logging
if not app.debug:
    que = queue.Queue(-1)  # no limit on size
    # Create a QueueHandler to receive logging
    queue_handler = QueueHandler(que)
    queue_handler.setLevel(logging.ERROR)

    # Create the actual mail handler
    credentials = app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']
    mail_handler = SMTPHandler(('smtp.gmail.com', '587'),
                               app.config['APP_ADDRESS'],
                               [app.config['ADMIN_ADDRESS']],
                               'stock_loan exception',
                               credentials=credentials,
                               secure=())

    # Create a listener handler to deque things from the QueueHandler and send to the mail handler
    listener = QueueListener(que, mail_handler)
    listener.start()

    # Add the queue handler to the app
コード例 #25
0
class LogWindow:
    __logger = logging.getLogger('LogWindow')
    __UI_FILE = "assets/logwindow.ui"
    __LINES_TO_DISPLAY = 5000

    def __init__(self, qt_threadpool: QtCore.QThreadPool):
        self.__qt_threadpool = qt_threadpool

        ui_file = QtCore.QFile(
            MiscUtils.get_abs_resource_path(LogWindow.__UI_FILE))
        ui_file.open(QtCore.QFile.ReadOnly)
        loader = QtUiTools.QUiLoader()
        self.__window: QtWidgets.QMainWindow = loader.load(ui_file)
        ui_file.close()

        self.__cleanup_started = False
        self.__window.setWindowTitle("View Logs")

        self.__log_window_queue = Queue()
        self.__log_window_queue_handler = QueueHandler(self.__log_window_queue)
        self.__log_window_queue_handler.setLevel(logging.INFO)
        logging.getLogger().addHandler(self.__log_window_queue_handler)

        self.qt_worker = QWorker(self.__queue_poll_thread_target)
        self.qt_worker.signals.progress.connect(
            self.__queue_poll_thread_progress)
        self.__qt_threadpool.start(self.qt_worker)

        self.__txt_log_display: QtWidgets.QPlainTextEdit = self.__window.findChild(
            QtWidgets.QPlainTextEdit, 'txt_log_display')
        self.__btn_clear: QtWidgets.QPushButton = self.__window.findChild(
            QtWidgets.QPushButton, 'btn_clear')
        self.__btn_log_dir: QtWidgets.QPushButton = self.__window.findChild(
            QtWidgets.QPushButton, 'btn_log_dir')

        self.__txt_log_display.setMaximumBlockCount(self.__LINES_TO_DISPLAY)
        self.__btn_clear.clicked.connect(self.__btn_clear_clicked)
        self.__btn_log_dir.clicked.connect(self.__btn_log_dir_clicked)

    def show(self):
        if not self.__window.isVisible():
            self.__txt_log_display.clear()
        self.__window.show()
        self.__window.raise_()
        self.__window.activateWindow()

    def hide(self):
        self.__window.hide()

    def cleanup(self):
        self.__logger.info("Performing cleanup")
        self.__cleanup_started = True
        self.hide()
        logging.getLogger().removeHandler(self.__log_window_queue_handler)
        self.__log_window_queue.put(None)
        self.__logger.info("Cleanup completed")

    def __btn_clear_clicked(self):
        self.__txt_log_display.clear()

    def __btn_log_dir_clicked(self):
        path = os.path.realpath(MiscUtils.get_log_dir_path())
        webbrowser.open("file:///" + path)

    def __queue_poll_thread_target(self, progress_signal):
        MiscUtils.debug_this_thread()
        self.__logger.info("Queue poll thread started")
        log_formatter = MiscUtils.get_default_log_formatter()
        while not self.__cleanup_started:
            log_record = self.__log_window_queue.get()
            if log_record is None:
                break
            if self.__window.isVisible():
                log_text = log_formatter.format(log_record)
                progress_signal.emit(log_text)

    def __queue_poll_thread_progress(self, progress):
        self.__txt_log_display.appendPlainText(progress)
コード例 #26
0
ファイル: web.py プロジェクト: matthewjkuss/csvql
import logging
from logging.handlers import QueueHandler

import os
import re
from urllib.parse import unquote

from . import tokenise, parse, interpret, execute
from . import database

log = logging.getLogger(__name__)

log_queue = queue.Queue()
user_log = QueueHandler(log_queue)
user_log.setLevel("INFO")
formatter = logging.Formatter('%(levelname)s (%(module)s): %(message)s')
user_log.setFormatter(formatter)
for child in ["tokenise", "execute", "parse", "interpret"]:
    logging.root.getChild(f"csvql.{child}").addHandler(user_log)

console_log = logging.StreamHandler()
console_log.setLevel("DEBUG")
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_log.setFormatter(formatter)
logging.root.getChild(f"csvql.web").addHandler(console_log)

myregex = "^" + "(?P<name>.*)" + "\.csv" + "$"

DATABASE: Dict[str, database.Table] = {}
コード例 #27
0
def create_app(config_class=Config):
    # create an app
    app = Flask(__name__)
    # add configuration variables to the app
    app.config.from_object(config_class)

    # fit the flask extensions to the specific configuration of this app
    db.init_app(app)
    migrate.init_app(app, db)
    mail.init_app(app)

    # register blueprints
    from App.api import bp as api_bp
    app.register_blueprint(api_bp, url_prefix='/api/v1')

    from App.main import bp as main_bp
    app.register_blueprint(main_bp)

    from App.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    if not app.debug:
        if app.config['MAIL_SERVER']:

            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='ShopifyAPI Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
                ))
            q = Queue()
            q_handler = QueueHandler(q)
            q_handler.setLevel(logging.ERROR)
            listener = QueueListener(q, mail_handler)
            app.logger.addHandler(q_handler)
            listener.start()

        if not os.path.exists('logs'):
            os.mkdir('logs')
        file_handler = RotatingFileHandler('logs/Shopify.log',
                                           maxBytes=131072,
                                           backupCount=10)
        file_handler.setFormatter(
            logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
            ))
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('SIR startup')

    return app