def logger_init(level=logging.INFO):
    q = Queue()

    # this is the handler for all log records
    handler = logging.StreamHandler()
    f = logging.Formatter(
        '%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
    handler.setFormatter(f)

    file_handler = logging.FileHandler(LOG_FILE, 'a')
    f = logging.Formatter(
        '%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
    file_handler.setFormatter(f)

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q, handler, file_handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(level)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(handler)
    logger.addHandler(file_handler)

    return ql, q
示例#2
0
文件: redis.py 项目: Allifreyr/plexpy
 def __init__(self, *handlers, **kwargs):
     redis = kwargs.get('redis')
     if redis is None:
         from redis import Redis
         redis = Redis()
     self.key = kwargs.get('key', 'python.logging')
     QueueListener.__init__(self, redis, *handlers)
示例#3
0
 def __init__(self, *handlers, **kwargs):
     redis = kwargs.get('redis')
     if redis is None:
         from redis import Redis
         redis = Redis()
     self.key = kwargs.get('key', 'python.logging')
     QueueListener.__init__(self, redis, *handlers)
def listener_process(q, stop_event, config):
    """
    This could be done in the main process, but is just done in a separate
    process for illustrative purposes.

    This initialises logging according to the specified configuration,
    starts the listener and waits for the main process to signal completion
    via the event. The listener is then stopped, and the process exits.
    """
    #logging.config.dictConfig(config)
    logging.config.fileConfig(config)
    #listener = logging.handlers.QueueListener(q, MyHandler())
    listener = QueueListener(q, MyHandler())
    listener.start()
    if os.name == 'posix':
        # On POSIX, the setup logger will have been configured in the
        # parent process, but should have been disabled following the
        # dictConfig call.
        # On Windows, since fork isn't used, the setup logger won't
        # exist in the child, so it would be created and the message
        # would appear - hence the "if posix" clause.
        logger = logging.getLogger('main_logger')
        logger.critical('Should not appear, because of disabled logger ...')
    stop_event.wait()
    logger.info("Logger listener stop event triggered.")
    listener.stop()
示例#5
0
def listener():
    """
    Wrapper that create a QueueListener, starts it and automatically stops it.
    To be used in a with statement in the main process, for multiprocessing.
    """

    global queue

    # Initialize queue if not already done
    if queue is None:
        try:
            queue = multiprocessing.Queue()
        except OSError as e:
            queue = False

            # Some machines don't have access to /dev/shm. See
            # http://stackoverflow.com/questions/2009278 for more information.
            if e.errno == errno.EACCES:
                logger.warning("Multiprocess logging disabled, because "
                    "current user cannot map shared memory. You won't see any" \
                    "logging generated by the worker processed.")

    # Multiprocess logging may be disabled.
    if not queue:
        yield
    else:
        queue_listener = QueueListener(queue, *logger.handlers)

        try:
            queue_listener.start()
            yield
        finally:
            queue_listener.stop()
示例#6
0
def initialize_logging(config):
    multiprocessing.current_process().name = 'Stack'
    cfg = config.get(A.LOGGING, {})

    # log to s3 if there's a destination specified in the config
    bucket = cfg.get(A.logging.S3_BUCKET)
    if bucket:
        json_formatter = JSONFormatter(config)
        s3_handler = S3Handler(bucket, cfg.get(A.logging.S3_PREFIX, ''))
        s3_handler.setFormatter(json_formatter)
        s3_handler.setLevel(logging.INFO)

        # The parent process is the only one that actually buffers the log
        # records in memory and writes them out to s3.  The child processes
        # send all of their log records to the parent's queue.
        #
        # Using the QueueHandler and QueueListener classes from logutils-0.3.2
        # here since they're the implementations in future versions of stdlib
        # logging anyway (logutils is the "backports from Py3k logging"
        # library).
        queue = multiprocessing.Queue()
        ql = QueueListener(queue, s3_handler)

        def cleanup():
            ql.stop()
            s3_handler.flush()
        atexit.register(cleanup)
        ql.start()

        qh = QueueHandler(queue)
        log.addHandler(qh)

    # set local_file to an empty string or some other false value to deactivate
    local_file = cfg.get(A.logging.LOCAL_FILE, 'bang.log')
    if local_file:
        local_handler = logging.FileHandler(local_file)
        local_handler.setFormatter(
                logging.Formatter(CONSOLE_LOGGING_FORMAT)
                )
        level = sanitize_config_loglevel(
                cfg.get(A.logging.LOCAL_FILE_LEVEL, logging.DEBUG)
                )
        local_handler.setLevel(level)
        log.addHandler(local_handler)

    # also log to stderr
    if sys.stderr.isatty():
        formatter = ColoredConsoleFormatter(CONSOLE_LOGGING_FORMAT)
    else:
        formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT)
    handler = logging.StreamHandler()  # default stream is stderr
    handler.setFormatter(formatter)
    console_level = sanitize_config_loglevel(
            cfg.get(A.logging.CONSOLE_LEVEL, 'INFO')
            )
    handler.setLevel(console_level)
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.debug('Logging initialized.')
示例#7
0
class queueHandler(object):
    def __enter__(self):
        q = multiprocessing.Queue()
        self.listener = QueueListener(q, self)
        self.listener.start()
        return q

    def __exit__(self, type, value, traceback):
        self.listener.stop()
示例#8
0
def initialize_logging(config):
    multiprocessing.current_process().name = 'Stack'
    cfg = config.get(A.LOGGING, {})

    # log to s3 if there's a destination specified in the config
    bucket = cfg.get(A.logging.S3_BUCKET)
    if bucket:
        json_formatter = JSONFormatter(config)
        s3_handler = S3Handler(bucket, cfg.get(A.logging.S3_PREFIX, ''))
        s3_handler.setFormatter(json_formatter)
        s3_handler.setLevel(logging.INFO)

        # The parent process is the only one that actually buffers the log
        # records in memory and writes them out to s3.  The child processes
        # send all of their log records to the parent's queue.
        #
        # Using the QueueHandler and QueueListener classes from logutils-0.3.2
        # here since they're the implementations in future versions of stdlib
        # logging anyway (logutils is the "backports from Py3k logging"
        # library).
        queue = multiprocessing.Queue()
        ql = QueueListener(queue, s3_handler)

        def cleanup():
            ql.stop()
            s3_handler.flush()

        atexit.register(cleanup)
        ql.start()

        qh = QueueHandler(queue)
        log.addHandler(qh)

    # set local_file to an empty string or some other false value to deactivate
    local_file = cfg.get(A.logging.LOCAL_FILE, 'bang.log')
    if local_file:
        local_handler = logging.FileHandler(local_file)
        local_handler.setFormatter(logging.Formatter(CONSOLE_LOGGING_FORMAT))
        level = sanitize_config_loglevel(
            cfg.get(A.logging.LOCAL_FILE_LEVEL, logging.DEBUG))
        local_handler.setLevel(level)
        log.addHandler(local_handler)

    # also log to stderr
    if sys.stderr.isatty():
        formatter = ColoredConsoleFormatter(CONSOLE_LOGGING_FORMAT)
    else:
        formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT)
    handler = logging.StreamHandler()  # default stream is stderr
    handler.setFormatter(formatter)
    console_level = sanitize_config_loglevel(
        cfg.get(A.logging.CONSOLE_LEVEL, 'INFO'))
    handler.setLevel(console_level)
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.debug('Logging initialized.')
示例#9
0
    def remove_client(self, id):

        log_handler = self._clients[id]
        del self._clients[id]

        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.remove(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
示例#10
0
    def add_client(self, id, log_handler):
        self._clients[id] = log_handler

        # add handler to a QueueListener
        # TODO: this is bad because all other clients have to wait until this
        # function returns
        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.append(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
    def init_app(self, app):
        handlers = []
        filehandler = RotatingFileHandler(filename=app.config['LOG_FILE_LOC'],
                                          maxBytes=1000000, backupCount=5)
        formatter = logging.Formatter(
            "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
        filehandler.setLevel(logging.INFO)
        if app.config['TESTING']:
            filehandler.setLevel(logging.ERROR)
        filehandler.setFormatter(formatter)

        logging.basicConfig()
        handlers.append(filehandler)

        if not app.debug:
            mail_handler = SMTPHandler(
                (app.config['INTERNAL_MAILS_SERVER'],
                 app.config['INTERNAL_MAILS_PORT']),
                app.config['INTERNAL_MAILS_SERVER_USERNAME'],
                app.config['NIGHTS_WATCH'],
                app.config.get('SERVER_ERROR_MAIL_SUBJECT',
                               'Server error'),
                credentials=(app.config['INTERNAL_MAILS_SERVER_USERNAME'],
                             app.config['INTERNAL_MAILS_SERVER_PASSWORD']),
                secure=())
            mail_handler.setLevel(logging.ERROR)
            mail_handler.setFormatter(formatter)
            handlers.append(mail_handler)
        self.logging_queue_listener = QueueListener(
            self.logging_queue, *handlers)
        app.logger.addHandler(self.logging_queue_handler)
示例#12
0
文件: log.py 项目: umaxyon/pac-job
class MultiProcessLogger(object):
    def __init__(self):
        self.q = Queue(-1)
        self.ql = QueueListener(self.q, *tuple(getLogger('PacPac').handlers))
        self.qh = QueueHandler(self.q)

    def get_logger(self):
        lg = getLogger("queue_listen")
        lg.addHandler(self.qh)
        lg.setLevel(DEBUG)
        lg.propagate = False  # 上位(root)に伝搬させない
        self.ql.start()
        return lg

    def end_log_listen(self):
        self.ql.stop()
示例#13
0
    def __init__(self):


        self.logger = colorlog.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)

        logging.addLevelName(logging.INFO, 'I')
        # colorlog.default_log_colors['I'] = "bold_green"
        logging.addLevelName(logging.CRITICAL, 'C')
        colorlog.default_log_colors['C'] = "bold_red"
        logging.addLevelName(logging.DEBUG, 'D')
        logging.addLevelName(logging.WARNING, 'W')

        SUCCESS = logging.DEBUG + 1
        logging.addLevelName(SUCCESS, 'success')
        colorlog.default_log_colors['success'] = "bold_green"
        setattr(self.logger, 'success', lambda message, *args: self.logger._log(SUCCESS, message, args))

        # Console log msg setting
        sh = colorlog.StreamHandler()
        sh.setLevel(logging.DEBUG + 1)
        sh_fmt = colorlog.ColoredFormatter('%(log_color)s> %(message)s')
        sh.setFormatter(sh_fmt)
        self.logger.addHandler(sh)

        # File log msg setting
        self.config = Config()
        product_name = self.config.get_product_name()
        folder_name = "{}_Log_{}".format(product_name,
                                         datetime.now().year)
        folder_path = os.path.join(os.getcwd(), folder_name)
        self._make_sure_dir_exists(folder_path)

        filename = '{}.txt'.format(datetime.now().strftime("Log %Y%m%d"))
        self.log_path = os.path.join(folder_path, filename)

        fh = logging.FileHandler(self.log_path)
        fmt = logging.Formatter('%(asctime)s, %(levelname)s, %(module)s, %(station)s, %(serial)s, "%(message)s"',
                                datefmt='%Y-%m-%d %H:%M:%S')
        fh.setFormatter(fmt)
        que = Queue.Queue(-1)
        queue_handler = QueueHandler(que)
        queue_handler.setLevel(logging.INFO)
        self.logger.addHandler(queue_handler)
        self.listener = QueueListener(que, fh)

        self.latest_filter = None
示例#14
0
 def setUp(self):
     self.handler = h = TestHandler(Matcher())
     self.logger = temp_logger = logging.getLogger()
     self.queue = q = queue.Queue(-1)
     self.qh = qh = QueueHandler(q)
     self.ql = ql = QueueListener(q, h)
     ql.start()
     temp_logger.addHandler(qh)
示例#15
0
文件: util.py 项目: leowmjw/bang
def initialize_logging(config):
    multiprocessing.current_process().name = 'Stack'
    cfg = config.get('logging', {})
    console_level = cfg.get('console_level', 'INFO')
    log.setLevel(console_level)

    # log to s3 if there's a destination specified in the config
    bucket = cfg.get('s3_bucket')
    if bucket:
        json_formatter = JSONFormatter(config)
        s3_handler = S3Handler(bucket, cfg.get('s3_prefix', ''))
        s3_handler.setFormatter(json_formatter)
        s3_handler.setLevel(logging.INFO)

        # The parent process is the only one that actually buffers the log
        # records in memory and writes them out to s3.  The child processes
        # send all of their log records to the parent's queue.
        #
        # Using the QueueHandler and QueueListener classes from logutils-0.3.2
        # here since they're the implementations in future versions of stdlib
        # logging anyway (logutils is the "backports from Py3k logging"
        # library).
        queue = multiprocessing.Queue()
        ql = QueueListener(queue, s3_handler)

        def cleanup():
            ql.stop()
            s3_handler.flush()
        atexit.register(cleanup)
        ql.start()

        qh = QueueHandler(queue)
        log.addHandler(qh)

    # also log to stderr
    if sys.stderr.isatty():
        formatter = ColoredConsoleFormatter(CONSOLE_LOGGING_FORMAT)
    else:
        formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT)
    handler = logging.StreamHandler()  # default stream is stderr
    handler.setFormatter(formatter)
    handler.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.debug('Logging initialized.')
示例#16
0
    def logger_init(self):
        mp_queue = multiprocessing.Queue()
        # extract stream handler from logger, temp. remove from logger but send
        # to queue listener - on closing Pool stream handler is added again
        logger = logging.getLogger(self.logger_name)
        stream_handlers = [
            x for x in logger.handlers if type(x) == logging.StreamHandler
        ]
        # this is necessary if we don't want to setup the hander in tests
        if stream_handlers:
            self.stream_handler = stream_handlers[0]

        logger.removeHandler(self.stream_handler)
        # queue_listener gets records from the queue
        # and sends them to the the logger stream handler.
        mp_queue_listener = QueueListener(mp_queue, self.stream_handler)
        mp_queue_listener.start()

        return mp_queue_listener, mp_queue
示例#17
0
    def remove_client(self, id):

        log_handler = self._clients[id]
        del self._clients[id]

        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.remove(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
示例#18
0
    def add_client(self, id, log_handler):
        self._clients[id] = log_handler

        # add handler to a QueueListener
        # TODO: this is bad because all other clients have to wait until this
        # function returns
        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.append(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
示例#19
0
def get_logger(name=__name__):
    """
        Get logger with handlers
    """
    name = name.replace('.py', '')
    queue = Queue.Queue(-1)
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    handlers = []
    for handler in HANDLERS:
        if handler == 'gelf':
            handlers.append(get_gelf_handler(name=name))
        elif handler == 'stderr':
            handlers.append(get_stderr_handler())

    listener = QueueListener(queue, *handlers)
    listener.start()
    queue_handler = QueueHandler(queue)
    logger.addHandler(queue_handler)
    return logger
示例#20
0
def log_server(level, queue, filename, mode='w'):
    """Run the logging server.

    This listens to the queue of log messages, and handles them using Python's
    logging handlers.  It prints to stderr, as well as to a specified file, if
    it is given.

    """
    formatter = _get_formatter()
    handlers = []

    sh = StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(level)
    handlers.append(sh)

    if filename:
        fh = FileHandler(filename, mode)
        fh.setFormatter(formatter)
        fh.setLevel(level)
        handlers.append(fh)

    listener = QueueListener(queue, *handlers)
    listener.start()

    # For some reason, queuelisteners run on a separate thread, so now we just
    # "busy wait" until terminated.
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
    finally:
        listener.stop()
示例#21
0
def listener():
    """
    Wrapper that create a QueueListener, starts it and automatically stops it.
    To be used in a with statement in the main process, for multiprocessing.
    """

    global queue

    # Initialize queue if not already done
    if queue is None:
        try:
            queue = multiprocessing.Queue()
        except OSError as e:
            queue = False

            # Some machines don't have access to /dev/shm. See
            # http://stackoverflow.com/questions/2009278 for more information.
            if e.errno == errno.EACCES:
                logger.warning('Multiprocess logging disabled, because '
                               'current user cannot map shared memory. You won\'t see any' \
                               'logging generated by the worker processed.')

    # Multiprocess logging may be disabled.
    if not queue:
        yield
    else:
        queue_listener = QueueListener(queue, *logger.handlers)

        try:
            queue_listener.start()
            yield
        finally:
            queue_listener.stop()
示例#22
0
def get_logger():
    if not settings.MEDUSA_MULTITHREAD:
        return get_base_logger()

    from logutils.queue import QueueHandler, QueueListener
    from multiprocessing import Queue

    mplogger = logging.getLogger(__name__ + ".__multiprocessing__")
    if not getattr(mplogger, "setup_done", False):
        base = get_base_logger()
        logqueue = Queue()

        mplogger.setLevel(logging.DEBUG)
        mplogger.addHandler(QueueHandler(logqueue))
        mplogger.setup_done = True
        mplogger.propagate = False

        global listener
        listener = QueueListener(logqueue, ProxyLogHandler(get_base_logger()))
        listener.start()

    return mplogger
    def __init__(self, parent=None, target=None, args=(), kwargs={}, timer_check_interval=1000, log_handler=None):
        super(SubprocessWorker, self).__init__(parent)
        self.id = self.get_next_worker_id()
        self.running = False
        self.timer = None
        self.timer_check_interval=timer_check_interval
        self.process = None
        self.log_handler = log_handler if log_handler is not None else QtLogHandler()
        self.log_handler.messageEmitted.connect(self.handle_message)
        self.log_queue = multiprocessing.Queue()
        self.result_queue = multiprocessing.Queue()
        self.log_queue_listener = QueueListener(self.log_queue, self.log_handler)
        self.target = target
        self.args = tuple(args)
        self.kwargs = dict(kwargs)

        self.thread = QtCore.QThread()
        self.moveToThread(self.thread)

        self.thread.started.connect(self.start)
        self.thread.finished.connect(self.stop)

        log.debug('Worker {} initialized on thread {}'.format(self.id, str(self.thread)))
示例#24
0
def std_logging_queue_handler():
    start = datetime.datetime.now()
    q = Queue(-1)

    logger = logging.getLogger()
    hdlr = logging.FileHandler('qtest.out', 'w')
    ql = QueueListener(q, hdlr)


    # Create log and set handler to queue handle
    root = logging.getLogger()
    root.setLevel(logging.DEBUG) # Log level = DEBUG
    qh = QueueHandler(q)
    root.addHandler(qh)

    ql.start()

    for i in range(100000):
        logging.info("msg:%d" % i)
    ql.stop()
    print(datetime.datetime.now() - start)
示例#25
0
    """
    log_file_found = utility.setup_logging(default_conf_path=settings.LOGGING_CONFIGURATION_FILE,
     logging_path=openwarp_settings.LOG_FILE)
    # Compile python module if it was not compiled.
    # This should always output to the terminal no matter the verbosity level
    if os.path.isdir('./openwarpgui/nemoh'):
	subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'], cwd='openwarpgui/nemoh')
    else :
	subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'], cwd='nemoh')	
    logger = logging.getLogger(__name__)

    if len(sys.argv) <= 1:
        utility.log_and_print(logger, 'Error: No configurations file given to the CLI. Usage of script is: openwarp_cli configuration1 .. ')

    queue = multiprocessing.Queue(-1)
    ql = QueueListener(queue, *logging.getLogger().handlers)
    ql.start()

    for i in range(1, len(sys.argv)):
        path = sys.argv[i]
        user_config = None
        # Checking if it is a valid path
        if os.path.exists(path):
            utility.log_and_print(logger, 'Processing configuration file at ' + path + '\n')
            with open(path, 'rt') as f:
                user_config = json.load(f)
        else: # Check if it is json string
            try:
                user_config = json.loads(path)
            except Exception as e:
                user_config = None
示例#26
0
class LogEventServer(object):
    def __init__(self):
        self.queue = mp.Queue()
        self.queue_listener = None
        self._pipelines = {}
        self._clients = {}

    @property
    def running(self):
        return self.queue_listener is not None

    @if_running
    def add_client(self, id, log_handler):
        self._clients[id] = log_handler

        # add handler to a QueueListener
        # TODO: this is bad because all other clients have to wait until this
        # function returns
        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.append(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()

    @if_running
    def remove_client(self, id):

        log_handler = self._clients[id]
        del self._clients[id]

        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.remove(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()

    @if_running
    def add_pipeline(self, ppl):
        self._pipelines[ppl.name] = ppl
        ppl.queue_handler = QueueHandler(self.queue)

        # direct messages from this pipeline to the main bus
        ppl.logger.addHandler(ppl.queue_handler)

    @if_running
    def remove_pipeline(self, ppl):
        del self._pipelines[ppl.name]
        
        ppl.logger.removeHandler(ppl.queue_handler)
        del ppl.queue_handler

    def start(self):
        if not self.running:
            self.queue_listener = QueueListener(self.queue)
            self.queue_listener.start()
        else:
            raise Exception("already running")

    @if_running
    def stop(self):
        self.queue_listener.stop()
        self.queue_listener = None
class SubprocessWorker(QtCore.QObject):
    """
    Worker object that runs the target function in a separate sub-process, checking its exitcode periodically.

    Signals and parameters:

        - messageEmitted: Emitted whenever the target function's root logger emits a message
            - Worker ID (int)
            - Log level name (str)
            - Log message (str)
        - resultEmitted: Emitted when the process ends.
            - Worker ID (int)
            - Exit code (int)
            - Return value or exception instance (object)
    """

    messageEmitted = QtCore.pyqtSignal(int, str, str)
    resultEmitted = QtCore.pyqtSignal(int, int, object)

    get_next_worker_id = itertools.count().next

    def __init__(self, parent=None, target=None, args=(), kwargs={}, timer_check_interval=1000, log_handler=None):
        super(SubprocessWorker, self).__init__(parent)
        self.id = self.get_next_worker_id()
        self.running = False
        self.timer = None
        self.timer_check_interval=timer_check_interval
        self.process = None
        self.log_handler = log_handler if log_handler is not None else QtLogHandler()
        self.log_handler.messageEmitted.connect(self.handle_message)
        self.log_queue = multiprocessing.Queue()
        self.result_queue = multiprocessing.Queue()
        self.log_queue_listener = QueueListener(self.log_queue, self.log_handler)
        self.target = target
        self.args = tuple(args)
        self.kwargs = dict(kwargs)

        self.thread = QtCore.QThread()
        self.moveToThread(self.thread)

        self.thread.started.connect(self.start)
        self.thread.finished.connect(self.stop)

        log.debug('Worker {} initialized on thread {}'.format(self.id, str(self.thread)))

    def check_process_status(self):
        if not self.running:
            message = 'Cannot check process status while worker is not running!'
            log.error(message)
            raise RuntimeError(message)
        log.debug('Checking status of subprocess {} (pid {})'.format(self.process.name, self.process.pid))
        if not self.process.is_alive():
            message = 'Subprocess {} ended (pid {}, exit code {})'.format(
                self.process.name, self.process.pid, self.process.exitcode
            )
            log.debug(message)
            self.resultEmitted.emit(self.id, self.process.exitcode, self.result_queue.get())
        else:
            message = 'Subprocess {} (pid {}) is still active'.format(self.process.name, self.process.pid)
            log.debug(message)

    @QtCore.pyqtSlot()
    def start(self):
        if self.running:
            log.warn('Worker {} already started on thread {}'.format(self.id, str(self.thread)))
            return
        log.debug('Worker {} started on thread {}'.format(self.id, str(self.thread)))
        self.running = True

        self.timer = QtCore.QTimer()
        self.timer.timeout.connect(self.check_process_status)

        self.log_queue_listener.start()

        self.process = multiprocessing.Process(
            target=wrap_target_function,
            args=(self.target, self.log_queue, self.result_queue) + self.args,
            kwargs=self.kwargs
        )

        self.process.start()
        self.timer.start(self.timer_check_interval)
        log.debug('Subprocess {} (pid {}) started'.format(self.process.name, self.process.pid))

    @QtCore.pyqtSlot()
    def stop(self):
        if not self.running:
            log.warn('Worker {} already stopped on thread {}'.format(self.id, str(self.thread)))
            return
        self.running = False
        self.timer.stop()

        if self.process.is_alive():
            log.debug('Terminating subprocess {} (pid {})'.format(self.process.name, self.process.pid))
            self.process.terminate()
            self.process.join()
        log.debug('Worker {} stopped on thread {}'.format(self.id, str(self.thread)))

        self.log_queue_listener.stop()

    @QtCore.pyqtSlot(str, str)
    def handle_message(self, level, message):
        self.messageEmitted.emit(self.id, level, message)
示例#28
0
文件: web.py 项目: standlove/OpenWARP
class WebController:
    '''
    This class exposes HTTP services for the frontend HTML to consume using AJAX.
    '''

    def __init__(self):
        self.logger = logging.getLogger(__name__ + '.WebController')
        cherrypy.engine.subscribe('start', self.start)
        cherrypy.engine.subscribe('stop', self.stop)
        self.queue = None
        self.ql = None

    def start(self):
        self.queue = multiprocessing.Queue(-1)
        # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
        self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
        self.ql.start()

    def stop(self):
        self.ql.stop()

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def apply_configuration(self, **kwargs):
        '''
        Apply the application wide configuration.

        @param self: the class instance itself
        @param kwargs: the other arguments
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.apply_configuration()'
        helper.log_entrance(self.logger, signature, kwargs)

        try:

            # Prepare meshing directory
            self.logger.info('Applying configuration ...')

            # Call generate_mesh service
            ret = {
                'log': services.apply_configuration(ConfigurationParameters(**kwargs))
            }
            helper.log_exit(self.logger, signature, [ret])
            return ret

        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret


    @cherrypy.expose
    @cherrypy.tools.json_out()
    def generate_mesh(self, **kwargs):
        '''
        Launch Mesh Generator to generate mesh.

        @param self: the class instance itself
        @param kwargs: the other arguments
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.generate_mesh()'
        helper.log_entrance(self.logger, signature, kwargs)
        
        try:
            # Prepare meshing directory
            self.logger.info('Preparing meshing directory')
            meshing_dir = services.prepare_dir('meshing_')
            self.logger.info('Meshing files will be located at ' + str(meshing_dir))

            # Call generate_mesh service
            ret = {
                'log' : services.generate_mesh(meshing_dir, MeshingParameters(**kwargs))
            }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def simulate(self, json_str):
        '''
        Run simulation.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.simulate()'
        helper.log_entrance(self.logger, signature, {'json_str' : json_str})
        
        try:
            # Prepare simulation directory
            simulation_dir = services.prepare_dir('simulation_')
            cherrypy.session['simulation_dir'] = simulation_dir
            cherrypy.session['simulation_done'] = False
            # Call simulate service
            ret = {
                'log' : services.simulate(simulation_dir, self.construct_simulation_parameters(json_str), self.queue)
            }
            cherrypy.session['simulation_done'] = True
            # Set postprocess flag to False if a new simulation has been done successfully.
            cherrypy.session['postprocess_done'] = False
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def upload_file(self, uploadedFile):
        '''
        Upload a file via AJAX request, the file will be created in temporary directory and the full path will
        be sent back as JSON response.

        @param self: the class instance itself
        @param uploadedFile: the uploaded file
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.upload_file()'
        helper.log_entrance(self.logger, signature, {'uploadedFile' : uploadedFile})
        
        try:
            temp_dir = os.path.join(TEMP_DATA_DIRECTORY, uuid.uuid1().hex)
            os.mkdir(temp_dir)
            filepath = os.path.join(temp_dir, uploadedFile.filename)
            
            # We must use 'wb' mode here in case the uploaded file is not ascii format.
            with open(filepath, 'wb') as output:
                while True:
                    data = uploadedFile.file.read(1024)
                    if data:
                        output.write(data)
                    else:
                        break
            try:
                with open(filepath, 'r') as input:
                    points, panels = helper.determine_points_panels(input)
                    ret = {
                        'filepath' : filepath,
                        'points' : points,
                        'panels' : panels
                    }
                    helper.log_exit(self.logger, signature, [ret])
                    return ret
            except Exception as e:
                helper.log_exception(self.logger, signature, e)
                ret = { 'filepath' : filepath }
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def postprocess(self, json_str):
        '''
        Run post-processing.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.postprocess()'
        helper.log_entrance(self.logger, signature, {'json_str': json_str})
        # Set session variable postprocess_done to False by default.
        cherrypy.session['postprocess_done'] = False
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call post-processing service
                ret = {
                    'log' : services.postprocess(cherrypy.session['simulation_dir'],
                                                 self.construct_postprocess_parameters(json_str), self.queue)
                }
                cherrypy.session['postprocess_done'] = True
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def visualize(self):
        '''
        Launch ParaView to visualize simulation results.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.visualize()'
        helper.log_entrance(self.logger, signature, None)
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            elif not cherrypy.session.has_key('postprocess_done') or not cherrypy.session['postprocess_done']:
                # postprocess must be run first
                cherrypy.response.status = 400
                ret = { 'error' : '"SAVE AS TECPLOT" must be run right after a successful simulation.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call visualize service
                services.visualize(cherrypy.session['simulation_dir'])
                helper.log_exit(self.logger, signature, None)
                return {}
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def quit(self):
        '''
        Quit the application by shutting down the CherryPy server.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.quit()'
        helper.log_entrance(self.logger, signature, None)
        
        # Quit after sending response
        threading.Timer(2, lambda: os._exit(0)).start()
        helper.log_exit(self.logger, signature, None)
        return {}
    
    def construct_simulation_parameters(self, json_str):
        '''
        Construct the simulation parameters from json string.

        @param self: the class instance itself
        @param json_str: the json string to parse
        @return: the parsed SimulationParameters object
        '''
        # Since this is a internal method. The parameters won't be logged.
        json_obj = json.JSONDecoder().decode(json_str)
        return services.construct_simulation_parameters(json_obj)

    def construct_postprocess_parameters(self, json_str):
        # Since this is a internal method. The parameters won't be logged.
        json_obj = json.JSONDecoder().decode(json_str)
        return services.construct_postprocess_parameters(json_obj)
示例#29
0
文件: web.py 项目: standlove/OpenWARP
 def start(self):
     self.queue = multiprocessing.Queue(-1)
     # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
     self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
     self.ql.start()
示例#30
0
class Command(BaseCommand):
    def __init__(self, stdout=None, stderr=None, no_color=False):
        super(Command, self).__init__(stdout=stdout, stderr=stderr,
                                      no_color=no_color)
        self.processes = {}
        self.started = False
        self.logging_queue = self.logging_listener = None

    def add_arguments(self, parser):
        parser.add_argument(
            '--no-daemon', action='store_false', dest='daemon',
            default=True, help="Don't daemonize process")
        parser.add_argument(
            '--pidfile', action='store', dest='pidfile',
            default="collector.pid", help="pidfile location")
        parser.add_argument(
            '--index', action='store', dest='index',
            default=None, help='index id'
        )

    def handle(self, *args, **options):
        if options['index']:
            index = LoggerIndex.objects.get(id=int(options['index']))
            c = Collector(index)
            c()
            return

        if not options['daemon']:
            return self.start_worker_pool()

        path = os.path.join(os.getcwd(), options['pidfile'])
        pidfile = TimeoutPIDLockFile(path)
        log_files = file_handles(logger)
        context = DaemonContext(pidfile=pidfile, files_preserve=log_files)

        with context:
            logger.info("daemonized")
            self.start_worker_pool()

    def start_worker_pool(self):
        self.start_mp_logging()
        indices = list(LoggerIndex.objects.all())
        logger.info("Starting worker pool with %s indexers" % len(indices))
        n = 0
        self.started = True
        for index in indices:
            for i in range(index.num_processes):
                c = Collector(index)
                p = Process(target=c)
                p.start()
                logger.info("Indexer for %s#%s started pid=%s" %
                            (index.name, i, p.pid))
                self.processes[n] = (p, index)
                n += 1
        signal.signal(signal.SIGINT, self.handle_sigint)

        while len(self.processes) > 0:
            if not self.started:
                logger.info("Stopping indexers")
                for n in self.processes:
                    (p, index) = self.processes[n]
                    os.kill(p.pid, signal.SIGTERM)
            sleep(0.5)
            for n in list(self.processes):
                (p, index) = self.processes[n]
                if p.exitcode is None:
                    if not p.is_alive() and self.started:
                        logger.debug("Indexer with pid %s not finished "
                                     "and not running" % p.pid)
                        # Not finished and not running
                        os.kill(p.pid, signal.SIGKILL)
                        c = Collector(index)
                        p = Process(target=c)
                        p.start()
                        logger.warning("Indexer for %s restarted pid=%s"
                                       % (index.name, p.pid))
                        self.processes[n] = (p, index)
                elif p.exitcode != 0 and self.started:
                    logger.warning('Process %s exited with an error '
                                   'or terminated' % p.pid)
                    c = Collector(index)
                    p = Process(target=c)
                    p.start()
                    self.processes[n] = (p, index)
                elif p.exitcode != 0:
                    logger.warning("Process %s exited with return code %s while"
                                   " terminating" % (p.pid, p.exitcode))
                    p.join()
                    del self.processes[n]
                else:
                    logger.debug('Process %s exited correctly' % p.pid)
                    p.join()
                    del self.processes[n]

        self.stop_mp_logging()

    def handle_sigint(self, sig_num, frame):
        logger.info("Got signal %s, stopping" % sig_num)
        self.started = False

    def start_mp_logging(self):
        logger.debug("Start listening child logs")
        self.logging_queue = Queue()
        handlers = logger.handlers
        self.logging_listener = QueueListener(self.logging_queue, *handlers)
        self.logging_listener.start()

    def stop_mp_logging(self):
        logger.debug("Stop listening child logs")
        self.logging_listener.stop()
示例#31
0
    if os.path.isdir('./openwarpgui/nemoh'):
        subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'],
                        cwd='openwarpgui/nemoh')
    else:
        subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'],
                        cwd='nemoh')
    logger = logging.getLogger(__name__)

    if len(sys.argv) <= 1:
        utility.log_and_print(
            logger,
            'Error: No configurations file given to the CLI. Usage of script is: openwarp_cli configuration1 .. '
        )

    queue = multiprocessing.Queue(-1)
    ql = QueueListener(queue, *logging.getLogger().handlers)
    ql.start()

    for i in range(1, len(sys.argv)):
        path = sys.argv[i]
        user_config = None
        # Checking if it is a valid path
        if os.path.exists(path):
            utility.log_and_print(
                logger, 'Processing configuration file at ' + path + '\n')
            with open(path, 'rt') as f:
                user_config = json.load(f)
        else:  # Check if it is json string
            try:
                user_config = json.loads(path)
            except Exception as e:
示例#32
0
文件: log.py 项目: umaxyon/pac-job
 def __init__(self):
     self.q = Queue(-1)
     self.ql = QueueListener(self.q, *tuple(getLogger('PacPac').handlers))
     self.qh = QueueHandler(self.q)
示例#33
0
 def start_mp_logging(self):
     logger.debug("Start listening child logs")
     self.logging_queue = Queue()
     handlers = logger.handlers
     self.logging_listener = QueueListener(self.logging_queue, *handlers)
     self.logging_listener.start()
示例#34
0
class LogEventServer(object):
    def __init__(self):
        self.queue = mp.Queue()
        self.queue_listener = None
        self._pipelines = {}
        self._clients = {}

    @property
    def running(self):
        return self.queue_listener is not None

    @if_running
    def add_client(self, id, log_handler):
        self._clients[id] = log_handler

        # add handler to a QueueListener
        # TODO: this is bad because all other clients have to wait until this
        # function returns
        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.append(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()

    @if_running
    def remove_client(self, id):

        log_handler = self._clients[id]
        del self._clients[id]

        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.remove(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()

    @if_running
    def add_pipeline(self, ppl):
        self._pipelines[ppl.name] = ppl
        ppl.queue_handler = QueueHandler(self.queue)

        # direct messages from this pipeline to the main bus
        ppl.logger.addHandler(ppl.queue_handler)

    @if_running
    def remove_pipeline(self, ppl):
        del self._pipelines[ppl.name]
        
        ppl.logger.removeHandler(ppl.queue_handler)
        del ppl.queue_handler

    def start(self):
        if not self.running:
            self.queue_listener = QueueListener(self.queue)
            self.queue_listener.start()
        else:
            raise Exception("already running")

    @if_running
    def stop(self):
        self.queue_listener.stop()
        self.queue_listener = None
示例#35
0
 def start(self):
     if not self.running:
         self.queue_listener = QueueListener(self.queue)
         self.queue_listener.start()
     else:
         raise Exception("already running")
示例#36
0
 def start(self):
     self.queue = multiprocessing.Queue(-1)
     # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
     self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
     self.ql.start()
示例#37
0
 def __enter__(self):
     q = Queue()
     self.listener = QueueListener(q, self)
     self.listener.start()
     return q
示例#38
0
class WebController:
    '''
    This class exposes HTTP services for the frontend HTML to consume using AJAX.
    '''

    def __init__(self):
        self.logger = logging.getLogger(__name__ + '.WebController')
        cherrypy.engine.subscribe('start', self.start)
        cherrypy.engine.subscribe('stop', self.stop)
        self.queue = None
        self.ql = None

    def start(self):
        self.queue = multiprocessing.Queue(-1)
        # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
        self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
        self.ql.start()

    def stop(self):
        self.ql.stop()

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def apply_configuration(self, **kwargs):
        '''
        Apply the application wide configuration.

        @param self: the class instance itself
        @param kwargs: the other arguments
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.apply_configuration()'
        helper.log_entrance(self.logger, signature, kwargs)

        try:

            # Prepare meshing directory
            self.logger.info('Applying configuration ...')

            # Call generate_mesh service
            ret = {
                'log': services.apply_configuration(ConfigurationParameters(**kwargs))
            }
            helper.log_exit(self.logger, signature, [ret])
            return ret

        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret


    @cherrypy.expose
    @cherrypy.tools.json_out()
    def generate_mesh(self, **kwargs):
        '''
        Launch Mesh Generator to generate mesh.

        @param self: the class instance itself
        @param kwargs: the other arguments
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.generate_mesh()'
        helper.log_entrance(self.logger, signature, kwargs)
        
        try:
            # Prepare meshing directory
            self.logger.info('Preparing meshing directory')
            meshing_dir = services.prepare_dir('meshing_')
            self.logger.info('Meshing files will be located at ' + str(meshing_dir))

            # Call generate_mesh service
            ret = {
                'log' : services.generate_mesh(meshing_dir, MeshingParameters(**kwargs))
            }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def simulate(self, json_str):
        '''
        Run simulation.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.simulate()'
        helper.log_entrance(self.logger, signature, {'json_str' : json_str})
        
        try:
            # Prepare simulation directory
            simulation_dir = services.prepare_dir('simulation_')
            cherrypy.session['simulation_dir'] = simulation_dir
            cherrypy.session['simulation_done'] = False
            # Call simulate service
            ret = {
                'log' : services.simulate(simulation_dir, self.construct_simulation_parameters(json_str), self.queue)
            }
            cherrypy.session['simulation_done'] = True
            # Set postprocess flag to False if a new simulation has been done successfully.
            cherrypy.session['postprocess_done'] = False
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def upload_file(self, uploadedFile):
        '''
        Upload a file via AJAX request, the file will be created in temporary directory and the full path will
        be sent back as JSON response.

        @param self: the class instance itself
        @param uploadedFile: the uploaded file
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.upload_file()'
        helper.log_entrance(self.logger, signature, {'uploadedFile' : uploadedFile})
        
        try:
            temp_dir = os.path.join(TEMP_DATA_DIRECTORY, uuid.uuid1().hex)
            os.mkdir(temp_dir)
            filepath = os.path.join(temp_dir, uploadedFile.filename)
            
            # We must use 'wb' mode here in case the uploaded file is not ascii format.
            with open(filepath, 'wb') as output:
                while True:
                    data = uploadedFile.file.read(1024)
                    if data:
                        output.write(data)
                    else:
                        break
            try:
                with open(filepath, 'r') as input:
                    points, panels = self.determine_points_panels(input)
                    ret = {
                        'filepath' : filepath,
                        'points' : points,
                        'panels' : panels
                    }
                    helper.log_exit(self.logger, signature, [ret])
                    return ret
            except Exception as e:
                helper.log_exception(self.logger, signature, e)
                ret = { 'filepath' : filepath }
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def postprocess(self, json_str):
        '''
        Run post-processing.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.postprocess()'
        helper.log_entrance(self.logger, signature, {'json_str': json_str})
        # Set session variable postprocess_done to False by default.
        cherrypy.session['postprocess_done'] = False
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call post-processing service
                ret = {
                    'log' : services.postprocess(cherrypy.session['simulation_dir'],
                                                 self.construct_postprocess_parameters(json_str), self.queue)
                }
                cherrypy.session['postprocess_done'] = True
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def visualize(self):
        '''
        Launch ParaView to visualize simulation results.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.visualize()'
        helper.log_entrance(self.logger, signature, None)
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            elif not cherrypy.session.has_key('postprocess_done') or not cherrypy.session['postprocess_done']:
                # postprocess must be run first
                cherrypy.response.status = 400
                ret = { 'error' : '"SAVE AS TECPLOT" must be run right after a successful simulation.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call visualize service
                services.visualize(cherrypy.session['simulation_dir'])
                helper.log_exit(self.logger, signature, None)
                return {}
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret

    @cherrypy.expose
    @cherrypy.tools.json_out()
    def quit(self):
        '''
        Quit the application by shutting down the CherryPy server.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.quit()'
        helper.log_entrance(self.logger, signature, None)
        
        # Quit after sending response
        threading.Timer(2, lambda: os._exit(0)).start()
        helper.log_exit(self.logger, signature, None)
        return {}
    
    def construct_simulation_parameters(self, json_str):
        '''
        Construct the simulation parameters from json string.

        @param self: the class instance itself
        @param json_str: the json string to parse
        @return: the parsed SimulationParameters object
        '''
        # Since this is a internal method. The parameters won't be logged.
        json_obj = json.JSONDecoder().decode(json_str)
        para = SimulationParameters(**json_obj)
        if para.floating_bodies is not None:
            new_bodies = []
            for body in para.floating_bodies:
                new_bodies.append(FloatingBody(**body))
            del para.floating_bodies[:]
            para.floating_bodies.extend(new_bodies)
        return para

    def construct_postprocess_parameters(self, json_str):
        # Since this is a internal method. The parameters won't be logged.
        json_obj = json.JSONDecoder().decode(json_str)
        para = PostprocessingParameters(**json_obj)
        return para

    def determine_points_panels(self, dat_file):
        '''
        Determines the number of points and panels of a mesh file.

        @param self: the class instance itself
        @param dat_file: the mesh file to parse
        @return: the number of points and panels of a mesh file
        @raise Exception: if the file is not expected format
        '''
        # Since this is a internal method. The parameters won't be logged.
        lines = dat_file.readlines()
        num_lines = 0
        zero_line1 = 0
        zero_line2 = 0
        succeed = False
        for line in lines[1:]:
            if len(line.strip()) > 0:
                zero_line1 = zero_line1 + 1
                if line.strip().startswith('0'):
                    succeed = True
                    break
        if not succeed:
            raise Exception('Zero line 1 not found.')
        for line in lines[(zero_line1 + 1):]:
            if len(line.strip()) > 0:
                zero_line2 = zero_line2 + 1
                if line.strip().startswith('0'):
                    succeed = True
                    break
        if not succeed:
            raise Exception('Zero line 2 not found.')
        return zero_line1 - 1, zero_line2 - 1
示例#39
0
class Command(BaseCommand):
    def __init__(self, stdout=None, stderr=None, no_color=False):
        super(Command, self).__init__(stdout=stdout,
                                      stderr=stderr,
                                      no_color=no_color)
        self.processes = {}
        self.started = False
        self.logging_queue = self.logging_listener = None

    def add_arguments(self, parser):
        parser.add_argument('--no-daemon',
                            action='store_false',
                            dest='daemon',
                            default=True,
                            help="Don't daemonize process")
        parser.add_argument('--pidfile',
                            action='store',
                            dest='pidfile',
                            default="collector.pid",
                            help="pidfile location")
        parser.add_argument('--index',
                            action='store',
                            dest='index',
                            default=None,
                            help='index id')

    def handle(self, *args, **options):
        if options['index']:
            index = LoggerIndex.objects.get(id=int(options['index']))
            c = Collector(index)
            c()
            return

        if not options['daemon']:
            return self.start_worker_pool()

        path = os.path.join(os.getcwd(), options['pidfile'])
        pidfile = TimeoutPIDLockFile(path)
        log_files = file_handles(logger)
        context = DaemonContext(pidfile=pidfile, files_preserve=log_files)

        with context:
            logger.info("daemonized")
            self.start_worker_pool()

    def start_worker_pool(self):
        self.start_mp_logging()
        indices = list(LoggerIndex.objects.all())
        logger.info("Starting worker pool with %s indexers" % len(indices))
        n = 0
        self.started = True
        for index in indices:
            for i in range(index.num_processes):
                c = Collector(index)
                p = Process(target=c)
                p.start()
                logger.info("Indexer for %s#%s started pid=%s" %
                            (index.name, i, p.pid))
                self.processes[n] = (p, index)
                n += 1
        signal.signal(signal.SIGINT, self.handle_sigint)

        while len(self.processes) > 0:
            if not self.started:
                logger.info("Stopping indexers")
                for n in self.processes:
                    (p, index) = self.processes[n]
                    os.kill(p.pid, signal.SIGTERM)
            sleep(0.5)
            for n in list(self.processes):
                (p, index) = self.processes[n]
                if p.exitcode is None:
                    if not p.is_alive() and self.started:
                        logger.debug("Indexer with pid %s not finished "
                                     "and not running" % p.pid)
                        # Not finished and not running
                        os.kill(p.pid, signal.SIGKILL)
                        c = Collector(index)
                        p = Process(target=c)
                        p.start()
                        logger.warning("Indexer for %s restarted pid=%s" %
                                       (index.name, p.pid))
                        self.processes[n] = (p, index)
                elif p.exitcode != 0 and self.started:
                    logger.warning('Process %s exited with an error '
                                   'or terminated' % p.pid)
                    c = Collector(index)
                    p = Process(target=c)
                    p.start()
                    self.processes[n] = (p, index)
                elif p.exitcode != 0:
                    logger.warning(
                        "Process %s exited with return code %s while"
                        " terminating" % (p.pid, p.exitcode))
                    p.join()
                    del self.processes[n]
                else:
                    logger.debug('Process %s exited correctly' % p.pid)
                    p.join()
                    del self.processes[n]

        self.stop_mp_logging()

    def handle_sigint(self, sig_num, frame):
        logger.info("Got signal %s, stopping" % sig_num)
        self.started = False

    def start_mp_logging(self):
        logger.debug("Start listening child logs")
        self.logging_queue = Queue()
        handlers = logger.handlers
        self.logging_listener = QueueListener(self.logging_queue, *handlers)
        self.logging_listener.start()

    def stop_mp_logging(self):
        logger.debug("Stop listening child logs")
        self.logging_listener.stop()
示例#40
0
 def start_mp_logging(self):
     logger.debug("Start listening child logs")
     self.logging_queue = Queue()
     handlers = logger.handlers
     self.logging_listener = QueueListener(self.logging_queue, *handlers)
     self.logging_listener.start()
示例#41
0
 def start(self):
     if not self.running:
         self.queue_listener = QueueListener(self.queue)
         self.queue_listener.start()
     else:
         raise Exception("already running")
示例#42
0
class Asylog(with_metaclass(Singleton, object)):
    '''
    Asylog is asynchronous and singleton logging class
    extened from python standard lib logging.
    requirement: python2.7, logutils(pip install logutils)
    '''

    def __init__(self):


        self.logger = colorlog.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)

        logging.addLevelName(logging.INFO, 'I')
        # colorlog.default_log_colors['I'] = "bold_green"
        logging.addLevelName(logging.CRITICAL, 'C')
        colorlog.default_log_colors['C'] = "bold_red"
        logging.addLevelName(logging.DEBUG, 'D')
        logging.addLevelName(logging.WARNING, 'W')

        SUCCESS = logging.DEBUG + 1
        logging.addLevelName(SUCCESS, 'success')
        colorlog.default_log_colors['success'] = "bold_green"
        setattr(self.logger, 'success', lambda message, *args: self.logger._log(SUCCESS, message, args))

        # Console log msg setting
        sh = colorlog.StreamHandler()
        sh.setLevel(logging.DEBUG + 1)
        sh_fmt = colorlog.ColoredFormatter('%(log_color)s> %(message)s')
        sh.setFormatter(sh_fmt)
        self.logger.addHandler(sh)

        # File log msg setting
        self.config = Config()
        product_name = self.config.get_product_name()
        folder_name = "{}_Log_{}".format(product_name,
                                         datetime.now().year)
        folder_path = os.path.join(os.getcwd(), folder_name)
        self._make_sure_dir_exists(folder_path)

        filename = '{}.txt'.format(datetime.now().strftime("Log %Y%m%d"))
        self.log_path = os.path.join(folder_path, filename)

        fh = logging.FileHandler(self.log_path)
        fmt = logging.Formatter('%(asctime)s, %(levelname)s, %(module)s, %(station)s, %(serial)s, "%(message)s"',
                                datefmt='%Y-%m-%d %H:%M:%S')
        fh.setFormatter(fmt)
        que = Queue.Queue(-1)
        queue_handler = QueueHandler(que)
        queue_handler.setLevel(logging.INFO)
        self.logger.addHandler(queue_handler)
        self.listener = QueueListener(que, fh)

        self.latest_filter = None

    def _make_sure_dir_exists(self, path):
        try:
            os.makedirs(path)
        except OSError:
            if not os.path.isdir(path):
                raise

    def start(self):
        self.listener.start()

    def stop(self):
        self.listener.stop()

    def getLogger(self):
        return self.logger

    def change_filter(self, module, station, serial):
        class ContextFilter(logging.Filter):
            def filter(self, record):
                record.module = module
                record.station = station
                record.serial = serial
                return True

        if self.latest_filter:
            self.logger.removeFilter(self.latest_filter)
            del self.latest_filter
        self.latest_filter = ContextFilter()
        self.logger.addFilter(self.latest_filter)


    def change_adapter(self, module, station):
        class CustomAdapter(logging.LoggerAdapter):
            def process(self, msg, kwargs):
                return '%(module)s, %(station)s, "%(msg)s" ' % {"module": self.extra['module'],
                                                                "station": self.extra['station'],
                                                                "msg": msg}, kwargs

        self.logger = CustomAdapter(logging.getLogger(), {'module': module, "station": station})
示例#43
0
 def __enter__(self):
     q = multiprocessing.Queue()
     self.listener = QueueListener(q, self)
     self.listener.start()
     return q
示例#44
0
文件: cli.py 项目: standlove/OpenWARP
class OpenWarpCLI(cmd.Cmd):
    """Open Warp Command Line Interface"""

    prompt = '> '
    intro = "OpenWarp CLI\n"

    def __init__(self):
        '''
        Initialization
        '''
        cmd.Cmd.__init__(self)

        self.simulation_done = False
        self.simulation_dir = None
        self.logger = logging.getLogger(__name__ + '.OpenWarpCLI')

        self.start()

    def start(self):
        self.queue = multiprocessing.Queue(-1)
        # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
        self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
        self.ql.start()

    def stop(self):
        self.ql.stop()
    
    def do_m(self, json_file):
        '''
        Shortcut for the generate mesh command
        Args:
            json_file: the json file containing all the parameters
        '''
        self.do_meshing(json_file)
    
    def do_meshing(self, json_file):
        '''
        Launch Mesh Generator to generate mesh.
        Args:
            json_file: the json file containing all the parameters
        '''
        signature = __name__ + '.OpenWarpCLI.do_meshing()'
        helper.log_entrance(self.logger, signature, {'json_file' : json_file})

        try:
            json_obj = self.load_json(json_file)

            # Prepare meshing directory
            self.logger.info('Preparing meshing directory')
            meshing_dir = services.prepare_dir('meshing_')
            self.logger.info('Meshing files will be located at ' + str(meshing_dir))

            # Call generate_mesh service
            log = services.generate_mesh(meshing_dir, MeshingParameters(**json_obj))
            print log
            helper.log_exit(self.logger, signature, [ { 'log': log }])
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            print e
        
    def do_s(self, json_file):
        '''
        Shortcut for the run simulation command
        Args:
            json_file: the json file containing all the parameters
        '''
        self.do_simulation(json_file)

    def do_simulation(self, json_file):
        '''
        Run simulation 
        Args:
            json_file: the json file containing all the parameters
        '''
        signature = __name__ + '.OpenWarpCLI.do_simulation()'
        helper.log_entrance(self.logger, signature, {'json_file' : json_file})
        try:
            json_obj = self.load_json(json_file)

            # Prepare simulation directory
            self.logger.info('Preparing simulation directory')
            self.simulation_dir = services.prepare_dir('simulation_')
            self.logger.info('Simulations files will be located at ' + str(self.simulation_dir))
            
            # determine ponits and panels
            bodies = json_obj.get('floating_bodies')
            if bodies is not None and isinstance(bodies, list):
                for body in bodies:
                    mesh_file = body.get('mesh_file')
                    with open(mesh_file, 'r') as fd:
                        points, panels = helper.determine_points_panels(fd)
                        body['points'] = str(points)
                        body['panels'] = str(panels)

            # Call simulate service
            self.simulation_done = False
            log = services.simulate(self.simulation_dir, services.construct_simulation_parameters(json_obj), self.queue)
            print log
            self.simulation_done = True

            helper.log_exit(self.logger, signature, [ { 'log': log }])
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            print e
        

    def do_p(self, json_file):
        '''
        The shortcut for the run post-processing command
        Args:
            json_file: the json file containing all the parameters
        '''
        self.do_postprocessing(json_file)

    def do_postprocessing(self, json_file):
        '''
        Run post-processing.
        Args:
            json_file: the json file containing all the parameters            
        '''
        signature = __name__ + '.OpenWarpCLI.do_postprocessing()'
        helper.log_entrance(self.logger, signature, {'json_file' : json_file})

        if not self.simulation_done:
            ret = { 'error' : 'Simulation must be run first.' }
            helper.log_exit(self.logger, signature, [ret])
            print ret['error']
            return

        try:
            json_obj = self.load_json(json_file)
            log = services.postprocess(self.simulation_dir, services.construct_postprocess_parameters(json_obj), self.queue)
            print log
            helper.log_exit(self.logger, signature, [ { 'log': log }])
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            print e

    def do_q(self, line):
        '''
        The shortcut for the quit command
        '''
        return self.do_quit(line)

    def do_quit(self, line):
        '''
        Quit the cli.
        '''

        self.stop()
        return True

    def load_json(self, json_file):
        '''
        Load json from file
        Args:
            json_file: the json file
        '''
        with open(json_file, 'r') as fd:
            return json.load(fd)