Esempio n. 1
0
 def run(self):
     qh = QueueHandler(self.errq)
     qh.setLevel(logging.INFO)
     
     self.log = logging.getLogger('SW-{}'.format(self.pid))
     self.log.setLevel(logging.INFO)
     self.log.propagate = False
     self.log.addHandler(qh)
     self.log.info('{0} ({1}) now running'.format(self.name, self.pid)) # @DEBUG
     self.stream()
Esempio n. 2
0
    def __call__(self, *args, **kwargs):
        queue_in = self.queue_in
        queue_out = self.queue_out
        logger = logging.getLogger()
        logger.addHandler(QueueHandler(queue_out))
        logger.setLevel(logging.DEBUG if self._debug else logging.INFO)
        db.init(self._settings['db_path'], False)

        self._ready()

        heartbeat_sequence = 1
        while True:
            try:
                task = queue_in.get(timeout=1)
                if isinstance(task, tasks.Task):
                    self._work(str(task))
                    self._done(task(**self._settings))
            except queues.Empty:
                self._heartbeat(heartbeat_sequence)
                heartbeat_sequence += 1
            except Exception as e:
                self._error(e)
            except KeyboardInterrupt:
                break
Esempio n. 3
0
    def run_trial(self, **kwargs):

        qh = QueueHandler(self.log_queue)
        logger.addHandler(qh)

        mh_dict = kwargs["mh_dict"]

        mpmh = generate_dynamic_mh_class(mh_dict)

        n_tasks = kwargs["n_tasks"]

        while True:
            item = self.queue.get()
            if item is None:
                break

            (scale, seed) = item

            full_dataset = self.mh.prepare_dataset(scale, seed)

            mpmh.run_single(full_dataset, scale, seed)
            with n_tasks.get_lock():
                n_tasks.value -= 1
            self.queue.task_done()
Esempio n. 4
0
def child_process_logging(boot_info: BootInfo) -> Iterator[None]:
    # We get the root logger here to ensure that all logs are given a chance to
    # pass through this handler
    logger = logging.getLogger()
    logger.setLevel(boot_info.min_log_level)

    set_logger_levels(boot_info.logger_levels)

    ipc_handler = IPCHandler.connect(boot_info.trinity_config.logging_ipc_path)
    ipc_handler.setLevel(boot_info.min_log_level)

    # Push all logs into a queue, because sometimes pushing into the socket is
    #   slow and we don't want to block the event loop. Inspired by:
    # https://docs.python.org/3.8/howto/logging-cookbook.html#dealing-with-handlers-that-block
    log_queue: Queue[str] = Queue(-1)
    queue_handler = QueueHandler(log_queue)
    queue_listener = QueueListener(
        log_queue,
        ipc_handler,
        respect_handler_level=True,
    )

    logger.addHandler(queue_handler)

    logger.debug(
        'Logging initialized for file %s: PID=%s',
        boot_info.trinity_config.logging_ipc_path.resolve(),
        os.getpid(),
    )
    with ipc_handler:
        queue_listener.start()
        try:
            yield
        finally:
            logger.removeHandler(queue_handler)
            queue_listener.stop()
def init_logger():
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)
    if platform.python_version().startswith('2'):
        # python 2
        # log at stdout
        import sys
        ch = logging.StreamHandler(sys.stdout)
    else:
        # python 3
        # log into queue
        import queue
        que = queue.Queue(-1)  # no limit on size
        from logging.handlers import QueueHandler
        ch = QueueHandler(que)

    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    root.addHandler(ch)
    yield ch
Esempio n. 6
0
def setup_logger() -> logging.Logger:
    # create logger
    logger = logging.getLogger('cnstream_service')
    logger.propagate = False

    # create formatter
    # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    formatter = logging.Formatter('%(levelname)s - %(message)s')

    # create console handler and set level to debug
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)

    # create queue handler and set level to info
    qh = QueueHandler(log_queue)
    qh.setLevel(logging.INFO)
    qh.setFormatter(formatter)

    # add handler to logger
    logger.addHandler(ch)
    logger.addHandler(qh)

    return logger
Esempio n. 7
0
def process(conf):
    """
    """

    remote_addr = conf['host']['addr']
    remote_port = conf['host']['port']
    username = conf['host']['username']
    password = conf['host']['password']
    lime_module = conf['host']['module']
    filename = conf['host']['filename']
    key = conf['host']['key']
    bucket = conf['aws']['bucket']
    progressbar = conf['host']['progressbar']
    tunnel_addr = '127.0.0.1'
    tunnel_port = random.randint(10000, 30000)
    remote_module_path = '/tmp/lime.ko'

    repository_enabled = conf['repository']['enabled']
    repository_url = conf['repository']['url']

    queue_handler = QueueHandler(log_queue)
    logger = logging.getLogger('margaritashotgun')
    logger.addHandler(queue_handler)

    if bucket is not None:
        dest = OutputDestinations.s3
    else:
        dest = OutputDestinations.local

    if filename is None:
        tm = int(time.time())
        dt = datetime.utcfromtimestamp(tm).isoformat()
        filename = "{0}-{1}-mem.lime".format(remote_addr, dt)

    try:
        host = Host()
        host.connect(username, password, key, remote_addr, remote_port)
        host.start_tunnel(tunnel_port, tunnel_addr, tunnel_port)
        if lime_module is None:
            kernel_version = host.kernel_version()
            if repository_enabled:
                repo = Repository(repository_url)
                match = repo.search_modules(kernel_version)
                if match is not None:
                    lime_module = repo.fetch_module(match)
                    host.upload_module(lime_module)
                else:
                    raise KernelModuleNotFoundError(kernel_version, repo.url)
            else:
                # TODO: prompt user to search repository when running interactively
                raise KernelModuleNotProvidedError(kernel_version)
        else:
            host.upload_module(lime_module, remote_module_path)

        host.load_lime(remote_module_path, tunnel_port)
        lime_loaded = host.wait_for_lime(tunnel_port)

        if lime_loaded:
            result = host.capture_memory(dest, filename, bucket, progressbar)
        else:
            result = False

        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()

        return (remote_addr, result)
    except KeyboardInterrupt:
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        return (remote_addr, False)
    except Exception as ex:
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        logger.critical(ex)
        return (remote_addr, False)
Esempio n. 8
0
    def run(self):
        # we have to fix up the logger before we can start
        _root = logging.getLogger()
        _root.handlers = []
        _root.addHandler(QueueHandler(self.logging_queue))

        logger = logging.getLogger(self.name)
        logger.setLevel(self.log_level)
        logger.debug(f'Download worker reporting for duty!')

        last_filename = ''
        current_file = None

        while True:
            try:
                try:
                    j = self.q.get(timeout=10.0)
                except Empty:
                    logger.warning('Writer queue empty!')
                    continue

                if j.kill:
                    if current_file:
                        current_file.close()
                    self.o_q.put(WriterTaskResult(success=True, kill=True))
                    break

                # make directories if required
                path = os.path.split(j.filename)[0]
                if not os.path.exists(os.path.join(self.base_path, path)):
                    os.makedirs(os.path.join(self.base_path, path))

                full_path = os.path.join(self.base_path, j.filename)

                if j.empty:  # just create an empty file
                    open(full_path, 'a').close()
                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename))
                    continue
                elif j.open:
                    if current_file:
                        logger.warning(f'Opening new file {j.filename} without closing previous! {last_filename}')
                        current_file.close()

                    current_file = open(full_path, 'wb')
                    last_filename = j.filename

                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename))
                    continue
                elif j.close:
                    if current_file:
                        current_file.close()
                        current_file = None
                    else:
                        logger.warning(f'Asking to close file that is not open: {j.filename}')

                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename, closed=True))
                    continue
                elif j.rename:
                    if current_file:
                        logger.warning('Trying to rename file without closing first!')
                        current_file.close()
                        current_file = None
                    if j.delete:
                        try:
                            os.remove(full_path)
                        except OSError as e:
                            logger.error(f'Removing file failed: {e!r}')
                            self.o_q.put(WriterTaskResult(success=False, filename=j.filename))
                            continue

                    try:
                        os.rename(os.path.join(self.base_path, j.old_filename), full_path)
                    except OSError as e:
                        logger.error(f'Renaming file failed: {e!r}')
                        self.o_q.put(WriterTaskResult(success=False, filename=j.filename))
                        continue

                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename))
                    continue
                elif j.delete:
                    if current_file:
                        logger.warning('Trying to delete file without closing first!')
                        current_file.close()
                        current_file = None

                    try:
                        os.remove(full_path)
                    except OSError as e:
                        logger.error(f'Removing file failed: {e!r}')

                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename))
                    continue

                pre_write = post_write = 0

                try:
                    if j.shm:
                        pre_write = time.time()
                        shm_offset = j.shm.offset + j.chunk_offset
                        shm_end = shm_offset + j.chunk_size
                        current_file.write(self.shm.buf[shm_offset:shm_end].tobytes())
                        post_write = time.time()
                    elif j.cache_file:
                        pre_write = time.time()
                        with open(os.path.join(self.cache_path, j.cache_file), 'rb') as f:
                            if j.chunk_offset:
                                f.seek(j.chunk_offset)
                            current_file.write(f.read(j.chunk_size))
                        post_write = time.time()
                    elif j.old_file:
                        pre_write = time.time()
                        with open(os.path.join(self.base_path, j.old_file), 'rb') as f:
                            if j.chunk_offset:
                                f.seek(j.chunk_offset)
                            current_file.write(f.read(j.chunk_size))
                        post_write = time.time()
                except Exception as e:
                    logger.warning(f'Something in writing a file failed: {e!r}')
                    self.o_q.put(WriterTaskResult(success=False, filename=j.filename,
                                                  chunk_guid=j.chunk_guid,
                                                  release_memory=j.release_memory,
                                                  shm=j.shm, size=j.chunk_size,
                                                  time_delta=post_write-pre_write))
                else:
                    self.o_q.put(WriterTaskResult(success=True, filename=j.filename,
                                                  chunk_guid=j.chunk_guid,
                                                  release_memory=j.release_memory,
                                                  shm=j.shm, size=j.chunk_size,
                                                  time_delta=post_write-pre_write))
            except Exception as e:
                logger.warning(f'Job {j.filename} failed with: {e!r}, fetching next one...')
                self.o_q.put(WriterTaskResult(success=False, filename=j.filename, chunk_guid=j.chunk_guid))

                try:
                    if current_file:
                        current_file.close()
                        current_file = None
                except Exception as e:
                    logger.error(f'Closing file after error failed: {e!r}')
            except KeyboardInterrupt:
                logger.warning('Immediate exit requested, quitting...')
                if current_file:
                    current_file.close()
                return
Esempio n. 9
0
def worker_init(q, logger_name, config):
    qh = QueueHandler(q)
    logger = logging.getLogger(logger_name)
    logger.setLevel(logging.INFO)
    logger.addHandler(qh)
    return logger
Esempio n. 10
0
def query_wmi(
    query_str: str,
    namespace: str = "cimv2",
    name: str = "noname",
    depth: int = 1,
    can_be_skipped: bool = False,
    mp_queue: Union[Queue, SimpleQueue] = None,
    debug: bool = False,
    computer: str = "localhost",
) -> Union[list, None]:
    """
    Execute WMI queries that return pre-formatted python dictionaries
    Also allows to pass a queue for logging returns when using multiprocessing
    """
    if mp_queue:
        logging_handler = QueueHandler(mp_queue)
        local_logger = logging.getLogger()
        local_logger.handlers = []
        local_logger.addHandler(logging_handler)
        if debug:
            local_logger.setLevel(logging.DEBUG)
        else:
            local_logger.setLevel(logging.INFO)
    else:
        local_logger = logging.getLogger(__intname__)

    # Multiprocessing also requires to pickle results
    # Since we cannot send wmi objects, we'll convert them to dict first using wmi_object_2_list_of_dict

    # CoInitialize (and CoUninitialize) are needed when using WMI in threads, but not a multiprocessing child
    # pythoncom.CoInitialize()
    local_logger.debug("Running WMI query [%s]." % name)
    # noinspection PyBroadException

    # Full moniker example
    # wmi_handle = wmi.WMI(moniker=r'winmgmts:{impersonationLevel=impersonate,authenticationLevel=pktPrivacy,(LockMemory, !IncreaseQuota)}!\\localhost\root\cimv2/Security/MicrosoftVolumeEncryption')
    try:
        if namespace.startswith("cimv2"):
            wmi_handle = wmi.WMI(
                moniker=
                r"winmgmts:{impersonationLevel=impersonate,authenticationLevel=pktPrivacy,(LockMemory, !IncreaseQuota)}!\\%s\root\%s"
                % (computer, namespace))
            return wmi_object_2_list_of_dict(wmi_handle.query(query_str),
                                             depth)
        elif namespace == "wmi":
            wmi_handle = wmi.WMI(namespace="wmi")
            return wmi_object_2_list_of_dict(wmi_handle.query(query_str),
                                             depth)
        elif namespace == "SecurityCenter":
            # Try to fallback to securityCenter v1 for XP
            # noinspection PyBroadException
            try:
                wmi_handle = wmi.WMI(namespace="SecurityCenter2")
            except Exception:
                # noinspection PyBroadException
                try:
                    wmi_handle = wmi.WMI(namespace="SecurityCenter")
                except Exception:
                    logger.info("cannot get securityCenter handle.")
                    return None
            return wmi_object_2_list_of_dict(wmi_handle.query(query_str),
                                             depth)
        else:
            local_logger.critical("Bogus query path {}.".format(namespace))
    except pywintypes.com_error:
        if can_be_skipped is not True:
            local_logger.warning(
                "Cannot get WMI query (pywin) {}.".format(name), exc_info=True)
            local_logger.debug("Trace:", exc_info=True)
        else:
            local_logger.info("Cannot get WMI query (pywin) {}.".format(name))
    except wmi.x_access_denied:
        if can_be_skipped is not True:
            local_logger.warning(
                "Cannot get WMI request (access) {}.".format(name),
                exc_info=True)
            local_logger.debug("Trace:", exc_info=True)
        else:
            local_logger.info(
                "Cannot get WMI request (access) {}.".format(name))
    except wmi.x_wmi:
        if can_be_skipped is not True:
            local_logger.warning(
                "Cannot get WMI query (x_wmi) {}.".format(name), exc_info=True)
            local_logger.debug("Trace:", exc_info=True)
        else:
            local_logger.info("Cannot get WMI query (x_wmi) {}.".format(name))
    except NameError:
        if can_be_skipped is not True:
            local_logger.warning(
                "Cannot get WMI request (name) {}.".format(name))
            local_logger.debug("Trace:", exc_info=True)
        else:
            local_logger.info("Cannot get WMI query (name) {}.".format(name))
    except Exception:
        if can_be_skipped is not True:
            local_logger.warning(
                "Cannot get non skippable WMI request (uncaught) {}.".format(
                    name))
            local_logger.debug("Trace:", exc_info=True)
        else:
            local_logger.info(
                "Cannot get WMI request (uncaught) {}.".format(name))
    return None
Esempio n. 11
0
def process(conf):
    """
    """

    remote_addr = conf['host']['addr']
    remote_port = conf['host']['port']
    username = conf['host']['username']
    password = conf['host']['password']
    lime_module = conf['host']['module']
    filename = conf['host']['filename']
    key = conf['host']['key']
    bucket = conf['aws']['bucket']
    progressbar = conf['host']['progressbar']
    tunnel_addr = '127.0.0.1'
    tunnel_port = random.randint(10000, 30000)
    remote_module_path = '/tmp/lime.ko'

    repository_enabled = conf['repository']['enabled']
    repository_url = conf['repository']['url']

    queue_handler = QueueHandler(log_queue)
    logger = logging.getLogger('margaritashotgun')
    logger.addHandler(queue_handler)

    if bucket is not None:
        dest = OutputDestinations.s3
    else:
        dest = OutputDestinations.local

    if filename is None:
        filename = "{0}-mem.lime".format(remote_addr)

    try:
        host = Host()
        host.connect(username, password, key, remote_addr, remote_port)
        host.start_tunnel(tunnel_port, tunnel_addr, tunnel_port)
        if lime_module is None:
            kernel_version = host.kernel_version()
            if repository_enabled:
                repo = Repository(repository_url)
                match = repo.search_modules(kernel_version)
                if match is not None:
                    lime_module = repo.fetch_module(match)
                    host.upload_module(lime_module)
                else:
                    raise KernelModuleNotFoundError(kernel_version, repo.url)
            else:
                # TODO: prompt user to search repository when running interactively
                raise KernelModuleNotProvidedError(kernel_version)
        else:
            host.upload_module(lime_module, remote_module_path)

        host.load_lime(remote_module_path, tunnel_port)
        lime_loaded = host.wait_for_lime(tunnel_port)

        if lime_loaded:
            result = host.capture_memory(dest, filename, bucket, progressbar)
        else:
            result = False

        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()

        return (remote_addr, result)
    except KeyboardInterrupt:
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        return (remote_addr, False)
    except Exception as ex:
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        logger.critical(ex)
        return (remote_addr, False)
Esempio n. 12
0
 def configure_logging(self) -> None:
     """ Reconfigure the logging to catch the messages for the GUI """
     self.log_queue = queue.Queue()
     self.log_handler = QueueHandler(self.log_queue)
     self.log_handler.setFormatter(logging.Formatter(LOG_FORMAT, style="{"))
     logging.getLogger().handlers = [self.log_handler]
Esempio n. 13
0
class CustomQueueListener(QueueListener):
    '''
    Ignore errors in the monitor thread that result from a race condition when the program exits
    '''
    def _monitor(self):
        try:
            super()._monitor()
        except Exception:
            pass


### LOG TO STDERR ###

console = logging.StreamHandler(stdout)
# tell the handler to use this format
console.setFormatter(ColoredFormatter('%(levelname)s %(message)s'))

### LOG TO FILE ###

log_queue = Queue()
listener = CustomQueueListener(log_queue, console)
sender = QueueHandler(log_queue)
logging.getLogger('manspider').handlers = [sender]

logdir = Path(__file__).parent.parent / 'logs'
logdir.mkdir(exist_ok=True)
logfile = f'manspider_{datetime.now().strftime("%m-%d-%Y")}.log'
handler = logging.FileHandler(str(logdir / logfile))
handler.setFormatter(
    logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logging.getLogger('manspider').addHandler(handler)
Esempio n. 14
0
def setup_logging(output_file_dir,
                  args=None,
                  kwargs=None,
                  level=logging.NOTSET):
    """

    Parameters
    ----------
    output_file_dir
        Path of directory which will contain "logs" directory,
        which will contain a timestamped directory containing the log output
    args
        List of arguments given to original script, to be logged
    kwargs
        Dict of keyword arguments given to original script, to be logged
    level

    Returns
    -------
    logging.handlers.QueueListener
    """
    args = args or ()
    kwargs = kwargs or dict()

    # Don't warn about duplicate python bindings for opengm
    # (We import opengm twice, as 'opengm' 'opengm_with_cplex'.)
    warnings.filterwarnings("ignore",
                            message='.*second conversion method ignored.',
                            category=RuntimeWarning)

    # set up the log files and symlinks
    latest_ln = os.path.join(output_file_dir, 'logs', 'latest')
    try:
        os.remove(latest_ln)
    except FileNotFoundError:
        pass
    timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    log_dir = os.path.join(output_file_dir, 'logs', timestamp)
    mkdir_p(log_dir)
    os.symlink(log_dir, latest_ln)
    log_file = os.path.join(log_dir, 'locate_synapses.txt')

    # set up ilastik's default logging (without adding handlers)
    with open(os.path.join(PROJECT_ROOT, 'config',
                           'ilastik_logging.json')) as f:
        dictConfig(json.load(f))

    # set up handlers
    formatter = logging.Formatter(LOGGER_FORMAT)
    file_handler = logging.FileHandler(log_file)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(level)
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    console_handler.setLevel(level)

    # logs should be handled by a single process
    log_queue = mp.Queue()
    queue_handler = QueueHandler(log_queue)
    queue_listener = QueueListener(log_queue, file_handler, console_handler)

    #  set up the root logger
    root = logging.getLogger()
    root.setLevel(level)
    root.addHandler(queue_handler)

    # set up the performance logger
    performance_formatter = logging.Formatter(
        '%(asctime)s: elapsed %(message)s')
    performance_handler = logging.FileHandler(
        os.path.join(log_dir, 'timing.txt'))
    performance_handler.setFormatter(performance_formatter)
    performance_handler.setLevel(logging.INFO)
    performance_logger = logging.getLogger('PERFORMANCE_LOGGER')
    performance_logger.addHandler(performance_handler)
    performance_logger.propagate = True

    # write version information
    commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
    git_diff = subprocess.check_output(['git', 'diff']).strip()
    version_string = 'Commit hash: {}\n\nCurrent diff:\n{}'.format(
        commit_hash, git_diff)
    with open(os.path.join(log_dir, 'version.txt'), 'w') as f:
        f.write(version_string)

    # write argument information
    with open(os.path.join(log_dir, 'arguments.txt'), 'w') as f:
        f.write('Arguments:\n\t{}\nKeyword arguments:\n\t{}'.format(
            args, kwargs))

    queue_listener.start()

    return queue_listener
Esempio n. 15
0
 def __init__(self, queue=None):
     QueueHandler.__init__(self,queue)
Esempio n. 16
0
# 设置日志格式
fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s',
                        '%Y-%m-%d %H:%M:%S')

# 添加cmd handler
cmd_handler = logging.StreamHandler(sys.stdout)
cmd_handler.setLevel(logging.DEBUG)
cmd_handler.setFormatter(fmt)

# 添加文件的handler
file_handler = logging.handlers.TimedRotatingFileHandler(
    filename="logs\\log.txt",
    encoding='utf-8',
    when="D",
    interval=1,
    backupCount=31)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(fmt)

# 添加http handler
queue_handler = QueueHandler(log_queue)
queue_handler.setLevel(logging.INFO)
queue_handler.setFormatter(fmt)

# 将handlers添加到logger中
logger.addHandler(cmd_handler)
logger.addHandler(file_handler)
logger.addHandler(queue_handler)

# logger.debug("今天天气不错")
Esempio n. 17
0
def worker_configurer(queue):
    q_handler = QueueHandler(queue)
    root = logging.getLogger()
    root.addHandler(q_handler)
    root.setLevel(logging.DEBUG)
Esempio n. 18
0
mc.flush_all()

from .routes import *
from .api import *

admin.add_view(AdminView(name='Home'))
admin.add_view(DbView(User, db.session))

# Build the database
db.create_all()

# logging
if not app.debug:
    que = queue.Queue(-1)  # no limit on size
    # Create a QueueHandler to receive logging
    queue_handler = QueueHandler(que)
    queue_handler.setLevel(logging.ERROR)

    # Create the actual mail handler
    credentials = app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']
    mail_handler = SMTPHandler(('smtp.gmail.com', '587'),
                               app.config['APP_ADDRESS'],
                               [app.config['ADMIN_ADDRESS']],
                               'stock_loan exception',
                               credentials=credentials, secure=())

    # Create a listener handler to deque things from the QueueHandler and send to the mail handler
    listener = QueueListener(que, mail_handler)
    listener.start()

    # Add the queue handler to the app
Esempio n. 19
0
 def setUp(self):
     mp.set_start_method('spawn')
     log_queue = mp.Queue()
     basicConfig(level=DEBUG, handlers=[QueueHandler(log_queue)])
     self.log_proc = LoggerProcess(log_queue, 'app.log')
     self.log_proc.start()
Esempio n. 20
0
def setup_queued_logging(log_queue):
    """Set up queued logging."""
    root_logger = getLogger()
    root_logger.addHandler(QueueHandler(log_queue))
    root_logger.setLevel(DEBUG)
Esempio n. 21
0
def _io_process(device,
                tx_queue,
                rx_queue,
                log_queue,
                parent_pid,
                bitrate=None,
                baudrate=None,
                max_adapter_clock_rate_error_ppm=None,
                fixed_rx_delay=None,
                max_estimated_rx_delay_to_resync=None):
    try:
        # noinspection PyUnresolvedReferences
        from logging.handlers import QueueHandler
    except ImportError:
        pass  # Python 2.7, no logging for you
    else:
        getLogger().addHandler(QueueHandler(log_queue))
        getLogger().setLevel('INFO')

    logger.info('IO process started with PID %r', os.getpid())

    # We don't need stdin
    try:
        stdin_fileno = sys.stdin.fileno()
        sys.stdin.close()
        os.close(stdin_fileno)
    except Exception:
        pass

    def is_parent_process_alive():
        if RUNNING_ON_WINDOWS:
            return True  # TODO: Find a working solution for Windows (os.kill(ppid, 0) doesn't work)
        else:
            return os.getppid() == parent_pid

    try:
        _raise_self_process_priority()
    except Exception as ex:
        logger.warning('Could not adjust priority of the IO process: %r', ex)

    #
    # This is needed to convert timestamps from hardware clock to local clocks
    #
    if max_adapter_clock_rate_error_ppm is None:
        max_adapter_clock_rate_error = DEFAULT_MAX_ADAPTER_CLOCK_RATE_ERROR_PPM / 1e6
    else:
        max_adapter_clock_rate_error = max_adapter_clock_rate_error_ppm / 1e6

    fixed_rx_delay = fixed_rx_delay if fixed_rx_delay is not None else DEFAULT_FIXED_RX_DELAY
    max_estimated_rx_delay_to_resync = max_estimated_rx_delay_to_resync or DEFAULT_MAX_ESTIMATED_RX_DELAY_TO_RESYNC

    ts_estimator_mono = TimestampEstimator(
        max_rate_error=max_adapter_clock_rate_error,
        source_clock_overflow_period=TIMESTAMP_OVERFLOW_PERIOD,
        fixed_delay=fixed_rx_delay,
        max_phase_error_to_resync=max_estimated_rx_delay_to_resync)
    ts_estimator_real = copy.deepcopy(ts_estimator_mono)

    #
    # Preparing the RX thread
    #
    should_exit = False

    def rx_thread_wrapper():
        rx_worker = RxWorker(conn=conn,
                             rx_queue=rx_queue,
                             ts_estimator_mono=ts_estimator_mono,
                             ts_estimator_real=ts_estimator_real,
                             termination_condition=lambda: should_exit)
        try:
            rx_worker.run()
        except Exception as ex:
            logger.error('RX thread failed, exiting', exc_info=True)
            # Propagating the exception to the parent process
            rx_queue.put(ex)

    rxthd = threading.Thread(target=rx_thread_wrapper, name='slcan_rx')
    rxthd.daemon = True

    try:
        conn = serial.Serial(device, baudrate or DEFAULT_BAUDRATE)
    except Exception as ex:
        logger.error('Could not open port', exc_info=True)
        rx_queue.put(ex)
        return

    #
    # Actual work is here
    #
    try:
        _init_adapter(conn, bitrate)

        rxthd.start()

        logger.info('IO process initialization complete')
        rx_queue.put(IPC_SIGNAL_INIT_OK)

        tx_worker = TxWorker(conn=conn,
                             rx_queue=rx_queue,
                             tx_queue=tx_queue,
                             termination_condition=lambda:
                             (should_exit or not rxthd.is_alive() or
                              not is_parent_process_alive()))
        tx_worker.run()
    except Exception as ex:
        logger.error('IO process failed', exc_info=True)
        rx_queue.put(ex)
    finally:
        logger.info('IO process is terminating...')
        should_exit = True
        if rxthd.is_alive():
            rxthd.join()

        _stop_adapter(conn)
        conn.close()
        logger.info('IO process is now ready to die, goodbye')
Esempio n. 22
0
    def start(self, ):
        ''' starts logger for multiprocessing using queue.
        
        logdir: if provided, error and debug logs will be created in it.
        logging_level
        logger_format
        '''
        # create console handler and set level to info
        #global logger_initialized
        #global queue_listener

        #if MpLogger.logger_initialized:
        if self.logger_initialized:
            return

        if not self.record_format:
            record_format = "[ %(asctime)s ][ %(levelname)s ][ %(message)s ][ %(module)s.%(funcName)s ]"

        #MpLogger.logger_initialized=True
        self.logger_initialized = True
        logger = logging.getLogger(name=self.logging_root)
        logger.setLevel(self.logging_level)

        q = mp.Queue()
        queue_handler = QueueHandler(q)
        logger.addHandler(queue_handler)
        #MpLogger.queue_listener = MpQueueListener(q,)
        self.queue_listener = MpQueueListener(q, )

        handler = logging.StreamHandler()
        handler.setLevel(self.logging_level)
        formatter = logging.Formatter(record_format)
        handler.setFormatter(formatter)
        #MpLogger.queue_listener.addHandler(handler)
        self.queue_listener.addHandler(handler)

        if self.logdir:
            # create error file handler and set level to error
            handler = TimedSizedRotatingHandler(filename=os.path.join(
                self.logdir, "error.log"),
                                                encoding=None,
                                                delay="true")
            handler.setLevel(logging.ERROR)
            #formatter = logging.Formatter("%(levelname)s - %(message)s")
            formatter = logging.Formatter(record_format)
            handler.setFormatter(formatter)
            #logger.addHandler(handler)
            #MpLogger.queue_listener.addHandler(handler)
            self.queue_listener.addHandler(handler)

            # create debug file handler and set level to debug
            handler = TimedSizedRotatingHandler(filename=os.path.join(
                self.logdir, "debug.log"), )
            handler.setLevel(logging.DEBUG)
            formatter = logging.Formatter(record_format)
            handler.setFormatter(formatter)
            #MpLogger.queue_listener.addHandler(handler)
            self.queue_listener.addHandler(handler)

        #MpLogger.queue_listener.start()
        self.queue_listener.start()
Esempio n. 23
0
class GUIClient(TunnelClient):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.options = 10
        self.logs = []
        self.configure_logging()

    def configure_logging(self) -> None:
        """ Reconfigure the logging to catch the messages for the GUI """
        self.log_queue = queue.Queue()
        self.log_handler = QueueHandler(self.log_queue)
        self.log_handler.setFormatter(logging.Formatter(LOG_FORMAT, style="{"))
        logging.getLogger().handlers = [self.log_handler]

    def get_dimension(self) -> None:
        """ Get the dimensions of the current window """
        self.height, self.width = self.scr.getmaxyx()

    def _draw(self) -> None:
        """ Draw all GUI elements """
        self.scr.clear()
        self._draw_config()
        self._draw_info()
        self._draw_log()

    def _draw_info(self):
        """ Draw a box with main information about the current status """
        win = self.scr.subwin(self.options, self.width // 2, 0, 0)
        win.box()
        win.border(0)
        bytes_in = sum(cl.bytes_in for cl in self.clients.values())
        bytes_out = sum(cl.bytes_out for cl in self.clients.values())
        total = self.tunnel.bytes_in + self.tunnel.bytes_out
        win.addstr(0, 2, "Info")

        if self.last_ping and self.last_pong:
            ping_time = f"{1000 * (self.last_pong - self.last_ping):.0f}"
        else:
            ping_time = "-"

        overhead = total / (bytes_in + bytes_out) - 1 if bytes_in + bytes_out else 0

        self._draw_lines(
            win,
            [
                f"Clients: {len(self.clients)}",
                f"Domain: {self.domain}",
                f"Overhead: {100 * overhead:.2f} %",
                f"Ping: {ping_time}" f"Transfer In: {format_transfer(bytes_out)}",
                f"Transfer Out: {format_transfer(bytes_in)}",
                f"Transfer Total: {format_transfer(bytes_in + bytes_out)}",
            ],
        )
        win.refresh()
        return win

    def _draw_config(self):
        """ Draw a box with the current tunnel configuration """
        mx, my = self.width // 2, self.options
        win = self.scr.subwin(my, self.width - mx, 0, mx)
        win.box()
        win.border(0)
        win.addstr(0, 2, "Configuration")

        networks = self.networks if self.networks else ["0.0.0.0/0", "::/0"]
        self._draw_lines(
            win,
            [
                f"Allowed networks: {', '.join(map(str, networks))}",
                f"Ban time: {self.bantime or 'off'}",
                f"Clients: {self.max_clients or '-'}",
                f"Connections per IP: {self.max_connects or '-'}",
                f"Idle timeout: {self.idle_timeout or 'off'}",
                f"Ping: {'on' if self.ping_enabled else 'off'}",
                f"Protocol: {self.protocol.name}",
            ],
        )
        win.refresh()
        return win

    def _draw_log(self):
        """ Draw a box with the latest logs """
        h = self.height - self.options - 4
        w = self.width - 4

        win = self.scr.subwin(h + 4, w + 4, self.options, 0)
        win.box()
        win.border(0)
        win.addstr(0, 2, "Log")

        while not self.log_queue.empty():
            self.logs.append(self.log_queue.get().msg)

        self.logs = self.logs[-self.height :]

        self._draw_lines(win, self.logs)

        win.refresh()
        return win

    def _draw_lines(self, win, lines: List[str]) -> None:
        """ Draw multiple lines in a window with some border """
        h, w = [k - 4 for k in win.getmaxyx()]
        for y, line in enumerate(lines[:h]):
            win.addstr(y + 2, 2, line[:w])

    async def _handle(self) -> bool:
        """ Handle the drawing after each package """
        self.get_dimension()
        self._draw()
        return await super()._handle()

    def _gui(self, scr) -> None:
        """ Configure the main screen """
        self.scr = scr
        curses.noecho()
        curses.curs_set(0)

        super().start()

    def start(self) -> None:
        curses.wrapper(self._gui)
Esempio n. 24
0
def main(argv=None):
    parser = argparse.ArgumentParser(
        description="Tool for managing server hardware via the Redfish API."
    )
    parser.add_argument("-H", help="iDRAC host address")
    parser.add_argument("-u", help="iDRAC username", required=True)
    parser.add_argument("-p", help="iDRAC password", required=True)
    parser.add_argument("-i", help="Path to iDRAC interfaces yaml", default=None)
    parser.add_argument("-t", help="Type of host. Accepts: foreman, director")
    parser.add_argument(
        "-l", "--log", help="Optional argument for logging results to a file"
    )
    parser.add_argument(
        "-f",
        "--force",
        dest="force",
        action="store_true",
        help="Optional argument for forced clear-jobs",
    )
    parser.add_argument(
        "--host-list",
        help="Path to a plain text file with a list of hosts.",
        default=None,
    )
    parser.add_argument(
        "--pxe", help="Set next boot to one-shot boot PXE", action="store_true"
    )
    parser.add_argument(
        "--boot-to", help="Set next boot to one-shot boot to a specific device"
    )
    parser.add_argument(
        "--boot-to-type",
        help="Set next boot to one-shot boot to either director or foreman",
    )
    parser.add_argument(
        "--boot-to-mac",
        help="Set next boot to one-shot boot to a specific MAC address on the target",
    )
    parser.add_argument(
        "--reboot-only", help="Flag for only rebooting the host", action="store_true"
    )
    parser.add_argument(
        "--power-cycle",
        help="Flag for sending ForceOff instruction to the host",
        action="store_true",
    )
    parser.add_argument(
        "--power-state", help="Get power state", action="store_true",
    )
    parser.add_argument(
        "--power-on", help="Power on host", action="store_true",
    )
    parser.add_argument(
        "--power-off", help="Power off host", action="store_true",
    )
    parser.add_argument("--racreset", help="Flag for iDRAC reset", action="store_true")
    parser.add_argument(
        "--factory-reset",
        help="Reset BIOS to default factory settings",
        action="store_true",
    )
    parser.add_argument(
        "--check-boot",
        help="Flag for checking the host boot order",
        action="store_true",
    )
    parser.add_argument(
        "--firmware-inventory", help="Get firmware inventory", action="store_true"
    )
    parser.add_argument(
        "--clear-jobs",
        help="Clear any scheduled jobs from the queue",
        action="store_true",
    )
    parser.add_argument(
        "--ls-jobs", help="List any scheduled jobs in queue", action="store_true",
    )
    parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true")
    parser.add_argument(
        "-r",
        "--retries",
        help="Number of retries for executing actions.",
        default=RETRIES,
    )
    _args = vars(parser.parse_args(argv))

    log_level = DEBUG if _args["verbose"] else INFO

    host_list = _args["host_list"]
    host = _args["H"]
    result = True

    if host_list:
        FMT = "[%(name)s] - %(levelname)-8s - %(message)s"
        FILEFMT = "%(asctime)-12s: [%(name)s] - %(levelname)-8s - %(message)s"
    else:
        FMT = "- %(levelname)-8s - %(message)s"
        FILEFMT = "%(asctime)-12s: %(levelname)-8s - %(message)s"

    _queue = Queue()
    _stream_handler = StreamHandler()
    _stream_handler.setFormatter(Formatter(FMT))
    _queue_listener = QueueListener(_queue, _stream_handler)
    _logger = getLogger(__name__)
    _queue_handler = QueueHandler(_queue)
    _logger.addHandler(_queue_handler)
    _logger.setLevel(log_level)

    _queue_listener.start()

    if _args["log"]:
        file_handler = FileHandler(_args["log"])
        file_handler.setFormatter(Formatter(FILEFMT))
        file_handler.setLevel(log_level)
        _queue_listener.handlers = _queue_listener.handlers + (file_handler,)

    loop = asyncio.get_event_loop()
    tasks = []
    if host_list:
        try:
            with open(host_list, "r") as _file:
                for _host in _file.readlines():
                    logger = getLogger(_host.split(".")[0])
                    logger.addHandler(_queue_handler)
                    logger.setLevel(log_level)
                    fn = functools.partial(
                        execute_badfish, _host.strip(), _args, logger
                    )
                    tasks.append(fn)
        except IOError as ex:
            _logger.debug(ex)
            _logger.error("There was something wrong reading from %s" % host_list)
        results = []
        try:
            results = loop.run_until_complete(
                asyncio.gather(*[task() for task in tasks], return_exceptions=True)
            )
        except KeyboardInterrupt:
            _logger.warning("\nBadfish terminated.")
            result = False
        except (asyncio.CancelledError, BadfishException) as ex:
            _logger.warning("There was something wrong executing Badfish.")
            _logger.debug(ex)
            result = False
        if results:
            result = True
            _logger.info("RESULTS:")
            for res in results:
                if len(res) > 1 and res[1]:
                    _logger.info(f"{res[0]}: SUCCESSFUL")
                else:
                    _logger.info(f"{res[0]}: FAILED")
                    result = False
    elif not host:
        _logger.error(
            "You must specify at least either a host (-H) or a host list (--host-list)."
        )
    else:
        try:
            _host, result = loop.run_until_complete(
                execute_badfish(host, _args, _logger)
            )
        except KeyboardInterrupt:
            _logger.warning("Badfish terminated.")
        except BadfishException as ex:
            _logger.warning("There was something wrong executing Badfish.")
            _logger.debug(ex)
            result = False
    _queue_listener.stop()

    if result:
        return 0
    return 1
Esempio n. 25
0
    def run(self):
        print("starting DB Worker")

        qh = QueueHandler(self.errq)
        f = logging.Formatter('SW formatter: %(asctime)s: %(name)s|%(processName)s|%(process)d|%(levelname)s -- %(message)s')
        qh.setFormatter(f)
        qh.setLevel(logging.INFO)
        self.log = logging.getLogger(__name__)
        self.log.setLevel(logging.INFO)
        self.log.propagate = False
        self.log.addHandler(qh)

        self.log.info('DBWorker {} starting'.format(self.pid))

        self.tweet_lookup_queue = {}
        self.tweet_text_lookup_queue = []

        while True:
            
            try:
                d = self.tweet_queue.get(block=True)
            except Empty:
                time.sleep(SLEEP_TIME)
            else:
                try:
                    code, data = d
                except ValueError:
                    self.log.exception('Code, data assignment failed with:\n{}\n'.format(d))
                else:
                    if code==TWEET_MESSAGE:
                        # Set streamsession_id from data
                        ssid = data.pop('streamsession_id', None)
                        if ssid is not None:
                            self.streamsession_id = ssid
                        
                        retries = 0
                        retry_limit = 5
                        while retries < retry_limit:
                            
                            try:
                                self.process_tweet(data)
    
                            except (IntegrityError, OperationalError) as e:
                                msg =  '\n\n' + '*'*70
                                msg += '\n\nDB ingegrity error. Retrying ({}).'.format(retries)
                                msg += 'Exception: {}\n'
                                msg += '-'*70 + '\n'
                                msg += traceback.format_exc()
                                msg += '\n\n{0}\nTweet Data:\n{1}\n{0}'.format('*'*70, json_dumps(data, indent=4))
                                
                                self.log.warning(msg)
                                retries += 1
    
                            else:
                                retries = retry_limit
    
                        # Check/dump lookup queues
                        if (len(self.tweet_lookup_queue) >=
                            self.lookup_queue_limit):
                            self.dump_tweet_lookup_queue()
                        if (len(self.tweet_text_lookup_queue) >=
                            self.lookup_queue_limit):
                            self.dump_tweet_text_lookup_queue()
                            
                        self.countq.put(1)
                        self.count += 1
    
                    elif code == START_MESSAGE: # stream started, time passed as data
                        self.log.debug('received START_MESSAGE')
                        session = get_session(self.db_path)
                        ss = session.merge(StreamSession(starttime=data))
                        self.commit_transaction(session)
                        self.streamsession_id = ss.id
                        session.close()
    
    
                    elif code == STOP_STREAM_MESSAGE: # stream stopped, time passed as data
                        self.log.debug('received STOP_STREAM_MESSAGE')
                        session = get_session(self.db_path)
                        ss = session.merge(StreamSession(id=self.streamsession_id))
                        ss.endtime=data
                        self.commit_transaction(session)
                        session.close()
    
                    elif code == STOP_MESSAGE: # process stopped by parent
                        # replace message for other workers
                        
                        self.tweet_queue.put((code, data))
                        print('stopping DB worker')
                        print('    dumping tweet lookup queue...')
                        self.dump_tweet_lookup_queue()
                        print('    DONE.')
                        print('    dumping tweet text lookup queue...')
                        self.dump_tweet_text_lookup_queue()
                        print('    DONE.')
                        print('Recording session stop time')
                        print('    DONE.')
                        break
                    
        print('{}: Process {} (id={}) finished.'.format(str(dt.now()),
                                                        self.name,
                                                        self.pid))
Esempio n. 26
0
def get_handler(q: Optional[multiprocessing.Queue] = None) -> Handler:
    if q is None and __queue is None:
        raise ValueError("No queue defined")

    return QueueHandler(__queue if q is None else q)
Esempio n. 27
0
def main() -> None:

    config_fp = pathlib.Path(sys.argv[0]).parent / "config.json"
    with config_fp.open("r") as fh:
        config = json.load(fh)

    engine = sa.create_engine(config["db_uri"])
    letl.db.create_tables(engine=engine, recreate=True)

    jobs = [
        letl.Job(
            job_name=f"Job4",
            timeout_seconds=20,
            dependencies=frozenset({"Job1"}),
            retries=1,
            run=job4,
            config=Config("job4_payload"),
            schedule=frozenset({letl.Schedule.every_x_seconds(seconds=30)}),
        ),
        letl.Job(
            job_name=f"Job1",
            timeout_seconds=20,
            dependencies=frozenset(),
            retries=1,
            run=job1,
            config=Config("job1_payload"),
            schedule=frozenset({letl.Schedule.every_x_seconds(seconds=30)}),
        ),
        letl.Job(
            job_name=f"Job2",
            timeout_seconds=5,
            dependencies=frozenset(),
            retries=1,
            run=job2,
            config=Config("job2_payload"),
            schedule=frozenset({letl.Schedule.every_x_seconds(seconds=30)}),
        ),
        letl.Job(
            job_name=f"Job3",
            timeout_seconds=20,
            dependencies=frozenset(),
            retries=1,
            run=job3,
            config=Config("job3_payload"),
            schedule=frozenset({letl.Schedule.every_x_seconds(seconds=30)}),
        ),
    ]

    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")

    file_handler = RotatingFileHandler("error.log", maxBytes=2000, backupCount=0)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.ERROR)

    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    console_handler.setLevel(logging.INFO)

    log_queue = mp.Queue(-1)  # type: ignore
    queue_handler = QueueHandler(log_queue)
    log_listener = QueueListener(log_queue, console_handler, file_handler)

    letl.root_logger.setLevel(logging.INFO)
    letl.root_logger.addHandler(queue_handler)

    log_listener.start()

    try:
        letl.start(
            jobs=jobs,
            etl_db_uri=config["db_uri"],
            max_job_runners=3,
            log_sql_to_console=False,
            log_level=letl.LogLevel.Debug,
        )
    finally:
        log_listener.stop()
Esempio n. 28
0
 def _configure_logger(self):
     handler = QueueHandler(self.logging_queue)
     logger = logging.getLogger()
     logger.setLevel(logging.DEBUG)
     logger.addHandler(handler)
Esempio n. 29
0
File: game.py Progetto: unRARed/mpf
    def __init__(self, mpf_path, machine_path, args):
        """Run mpf game."""
        signal.signal(signal.SIGINT, self.exit)

        parser = argparse.ArgumentParser(
            description='Starts the MPF game engine')

        parser.add_argument("-a",
                            action="store_true",
                            dest="no_load_cache",
                            help="Forces the config to be loaded from files "
                            "and not cache")

        parser.add_argument("-A",
                            action="store_false",
                            dest="create_config_cache",
                            help="Does not create the cache config files")

        parser.add_argument("-b",
                            action="store_false",
                            dest="bcp",
                            default=True,
                            help="Runs MPF without making a connection "
                            "attempt to a "
                            "BCP Server")

        parser.add_argument("-c",
                            action="store",
                            dest="configfile",
                            default="config.yaml",
                            metavar='config_file',
                            help="The name of a config file to load. Default "
                            "is "
                            "config.yaml. Multiple files can be used "
                            "via a comma-"
                            "separated list (no spaces between)")

        parser.add_argument("-C",
                            action="store",
                            dest="mpfconfigfile",
                            default=os.path.join(mpf_path, "mpfconfig.yaml"),
                            metavar='config_file',
                            help="The MPF framework default config file. "
                            "Default is "
                            "mpf/mpfconfig.yaml")

        parser.add_argument("-f",
                            action="store_true",
                            dest="force_assets_load",
                            default=False,
                            help="Load all assets upon startup.  Useful for "
                            "ensuring all assets are set up properly "
                            "during development.")

        parser.add_argument(
            "-l",
            action="store",
            dest="logfile",
            metavar='file_name',
            default=os.path.join(
                "logs",
                datetime.now().strftime("%Y-%m-%d-%H-%M-%S-mpf-" +
                                        socket.gethostname() + ".log")),
            help="The name (and path) of the log file")

        parser.add_argument("-p",
                            action="store_true",
                            dest="pause",
                            default=False,
                            help="Pause the terminal window on exit. Useful "
                            "when launching in a separate window so you can "
                            "see any errors before the window closes.")

        parser.add_argument(
            "-P",
            action="store_true",
            dest="production",
            default=False,
            help=
            "Production mode. Will suppress errors, wait for hardware on start and "
            "try to exit when startup fails. Run this inside a loop.")

        parser.add_argument("-t",
                            action="store_false",
                            dest='text_ui',
                            default=True,
                            help="Use the ASCII test-based UI")

        parser.add_argument("-v",
                            action="store_const",
                            dest="loglevel",
                            const=logging.DEBUG,
                            default=15,
                            help="Enables verbose logging to the"
                            " log file")

        parser.add_argument(
            "-V",
            action="store_const",
            dest="consoleloglevel",
            const=logging.DEBUG,
            default=logging.INFO,
            help="Enables verbose logging to the console. DO "
            "NOTE: On Windows platforms you must also use -v for "
            "this to work.")

        parser.add_argument("-x",
                            action="store_const",
                            dest="force_platform",
                            const='virtual',
                            help="Forces the virtual platform to be "
                            "used for all devices")

        parser.add_argument("--syslog_address",
                            action="store",
                            dest="syslog_address",
                            help="Log to the specified syslog address. This "
                            "can be a domain socket such as /dev/og on "
                            "Linux or /var/run/syslog on Mac. "
                            "Alternatively, you an specify host:port for "
                            "remote logging over UDP.")

        parser.add_argument("-X",
                            action="store_const",
                            dest="force_platform",
                            const='smart_virtual',
                            help="Forces the smart virtual platform to be "
                            "used for all"
                            " devices")

        # The following are just included for full compatibility with mc
        # which is needed when using "mpf both".

        parser.add_argument("-L",
                            action="store",
                            dest="mc_file_name",
                            metavar='mc_file_name',
                            default=None,
                            help=argparse.SUPPRESS)

        self.args = parser.parse_args(args)
        self.args.configfile = Util.string_to_list(self.args.configfile)

        # Configure logging. Creates a logfile and logs to the console.
        # Formatting options are documented here:
        # https://docs.python.org/2.7/library/logging.html#logrecord-attributes

        try:
            os.makedirs(os.path.join(machine_path, 'logs'))
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

        full_logfile_path = os.path.join(machine_path, self.args.logfile)

        try:
            os.remove(full_logfile_path)
        except OSError:
            pass

        if self.args.text_ui:
            console_log = logging.NullHandler()
            console_log.setLevel(logging.ERROR)
        else:
            console_log = logging.StreamHandler()
            console_log.setLevel(self.args.consoleloglevel)

        # tell the handler to use this format
        console_log.setFormatter(
            logging.Formatter('%(levelname)s : %(name)s : %(message)s'))

        # initialise async handler for console
        console_log_queue = Queue()
        console_queue_handler = QueueHandler(console_log_queue)
        self.console_queue_listener = logging.handlers.QueueListener(
            console_log_queue, console_log)
        self.console_queue_listener.start()

        # initialise file log
        file_log = logging.FileHandler(full_logfile_path)
        file_log.setFormatter(
            logging.Formatter(
                '%(asctime)s : %(levelname)s : %(name)s : %(message)s'))

        # initialise async handler for file log
        file_log_queue = Queue()
        file_queue_handler = QueueHandler(file_log_queue)
        self.file_queue_listener = logging.handlers.QueueListener(
            file_log_queue, file_log)
        self.file_queue_listener.start()

        # add loggers
        logger = logging.getLogger()
        logger.addHandler(console_queue_handler)
        logger.addHandler(file_queue_handler)
        logger.setLevel(self.args.loglevel)

        if self.args.syslog_address:
            try:
                host, port = self.args.syslog_address.split(":")
            except ValueError:
                syslog_logger = SysLogHandler(self.args.syslog_address)
            else:
                syslog_logger = SysLogHandler((host, int(port)))

            logger.addHandler(syslog_logger)

        try:
            MachineController(mpf_path, machine_path, vars(self.args)).run()
            logging.info("MPF run loop ended.")
            self.exit()

        # pylint: disable-msg=broad-except
        except Exception as e:
            self.exit(exception=e)
Esempio n. 30
0
    def test_snapshot(self):
        width = 100
        height = 100
        frame_buffer = FrameBuffer(10, width, height)
        frame_queue = Queue(1)

        log_queue = Queue()
        getLogger().addHandler(QueueHandler(log_queue))

        stop_process_event = Event()

        effect = MockEffect()
        effect.draw_rect = MagicMock()

        snapshot = Snapshot("snapshot", stop_process_event, log_queue,
                            frame_queue, frame_buffer,
                            self._create_detect_config(width,
                                                       height), [effect])
        processes = [
            snapshot,
            LogHandler(Thread,
                       "logger",
                       stop_process_event,
                       log_queue,
                       filename=None)
        ]

        for process in processes:
            process.start()

        try:
            frame_index = 0
            frame = frame_buffer.frames[frame_index]

            frame.header.detections[0].label = COCO_CLASSES.index('book')
            frame.header.detections[0].bounding_box.x_min = 1
            frame.header.detections[0].bounding_box.y_min = 2
            frame.header.detections[0].bounding_box.x_max = 3
            frame.header.detections[0].bounding_box.y_max = 4
            frame.header.epoch = time()

            frame.latch.next()
            frame.latch.next()

            payload = Payload(None, frame_index)
            frame_queue.put(payload)

            self.assertTrue(
                frame.latch.wait_for(State.READY, stop_process_event.is_set,
                                     10))

            with self.assertRaises(AssertionError):
                snapshot.get('person')

            self.assertIsNotNone(snapshot.get('book'))

            effect.draw_rect.assert_called_with(1, 2, 3, 4)
        finally:
            stop_process_event.set()
            for process in processes:
                process.join(30)
Esempio n. 31
0
def init_worker(q):
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.addHandler(QueueHandler(q))
    return logger
Esempio n. 32
0
def log_client_configurer(queue):
    """Run this in child processes to configure sending logs to parent."""
    h = QueueHandler(queue)
    root = logging.getLogger()
    root.addHandler(h)
    root.setLevel(logging.INFO)
Esempio n. 33
0
    def run(self):
        # we have to fix up the logger before we can start
        _root = logging.getLogger()
        _root.handlers = []
        _root.addHandler(QueueHandler(self.logging_queue))

        logger = logging.getLogger(self.name)
        logger.setLevel(self.log_level)
        logger.debug(f'Download worker reporting for duty!')

        empty = False
        while True:
            try:
                job = self.q.get(timeout=10.0)
                empty = False
            except Empty:
                if not empty:
                    logger.debug(f'Queue Empty, waiting for more...')
                empty = True
                continue

            if job.kill:  # let worker die
                logger.debug(f'Worker received kill signal, shutting down...')
                break

            tries = 0
            dl_start = dl_end = 0
            compressed = 0
            chunk = None

            try:
                while tries < self.max_retries:
                    # print('Downloading', job.url)
                    logger.debug(f'Downloading {job.url}')
                    dl_start = time.time()

                    try:
                        r = self.session.get(job.url, timeout=self.dl_timeout)
                        r.raise_for_status()
                    except Exception as e:
                        logger.warning(f'Chunk download for {job.guid} failed: ({e!r}), retrying...')
                        continue

                    dl_end = time.time()
                    if r.status_code != 200:
                        logger.warning(f'Chunk download for {job.guid} failed: status {r.status_code}, retrying...')
                        continue
                    else:
                        compressed = len(r.content)
                        chunk = Chunk.read_buffer(r.content)
                        break
                else:
                    raise TimeoutError('Max retries reached')
            except Exception as e:
                logger.error(f'Job for {job.guid} failed with: {e!r}, fetching next one...')
                # add failed job to result queue to be requeued
                self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
            except KeyboardInterrupt:
                logger.warning('Immediate exit requested, quitting...')
                break

            if not chunk:
                logger.warning(f'Chunk somehow None?')
                self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
                continue

            # decompress stuff
            try:
                size = len(chunk.data)
                if size > job.shm.size:
                    logger.fatal(f'Downloaded chunk is longer than SharedMemorySegment!')

                self.shm.buf[job.shm.offset:job.shm.offset + size] = bytes(chunk.data)
                del chunk
                self.o_q.put(DownloaderTaskResult(success=True, chunk_guid=job.guid, shm=job.shm,
                                                  url=job.url, size=size, compressed_size=compressed,
                                                  time_delta=dl_end - dl_start))
            except Exception as e:
                logger.warning(f'Job for {job.guid} failed with: {e!r}, fetching next one...')
                self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
                continue
            except KeyboardInterrupt:
                logger.warning('Immediate exit requested, quitting...')
                break

        self.shm.close()
Esempio n. 34
0
def test_subprocess_trace(datadog_tracer: ddtrace.Tracer,
                          caplog: LogCaptureFixture):
    """Verify that spans created in subprocesses are written to the queue and then flushed to the server,
    when wrapped in the SubprocessTracer"""

    # Enable log output for this logger for duration of this test
    caplog.set_level(logging.DEBUG, DatadogLoggingTraceFilter._log.name)
    test = f"{inspect.stack()[0][3]}"
    # And also send its output through a multiprocessing queue to surface logs from the subprocess
    log_queue = mp.Queue()
    DatadogLoggingTraceFilter._log.addHandler(QueueHandler(log_queue))
    DatadogLoggingTraceFilter.activate()

    subproc_test_msg = f"a test message was logged in a subprocess of {test}"
    state = mp.Queue()
    stop_sentinel = "-->STOP<--"

    with ddtrace.tracer.trace(
            name=f"{test}_operation",
            service=f"{test}_service",
            resource=f"{test}_resource",
            span_type=SpanTypes.TEST,
    ) as span:
        trace_id = span.trace_id
        logger = logging.getLogger(f"{test}_logger")
        test_msg = f"a test message was logged during {test}"
        logger.warning(test_msg)
        ctx = mp.get_context("fork")
        worker = ctx.Process(
            name=f"{test}_subproc",
            target=_do_things_in_subproc,
            args=(
                subproc_test_msg,
                state,
            ),
        )
        worker.start()
        worker.join(timeout=10)
        DatadogLoggingTraceFilter._log.warning(stop_sentinel)

    subproc_trace_id, subproc_span_id = state.get(block=True, timeout=10)
    assert test_msg in caplog.text, "caplog.text did not seem to capture logging output during test"
    assert f"SPAN#{trace_id}" in caplog.text, "span marker not found in logging output"
    assert f"TRACE#{trace_id}" in caplog.text, "trace marker not found in logging output"
    assert f"resource {test}_resource" in caplog.text, "traced resource not found in logging output"
    assert subproc_trace_id == trace_id  # subprocess tracing should be a continuation of the trace in parent process

    # Drain the queue and redirect DatadogLoggingTraceFilter log output to the caplog handler
    log_records = []
    draining = True
    while draining:
        while not log_queue.empty():
            log_record = log_queue.get(block=True, timeout=5)
            log_records.append(log_record)
        log_msgs = [r.getMessage() for r in log_records]
        if stop_sentinel in log_msgs:  # check for sentinel, signaling end of queued records
            draining = False
    for log_record in log_records:
        if log_record.getMessage() != stop_sentinel:
            caplog.handler.handle(log_record)

    assert f"{subproc_span_id}" in caplog.text, "subproc span id not found in logging output"
    assert (
        f"resource {_do_things_in_subproc.__name__}_resource"
        in caplog.text), "subproc traced resource not found in logging output"
Esempio n. 35
0
def worker_init(q):
    qh = QueueHandler(q)
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    #logger.setLevel(logging.DEBUG)
    logger.addHandler(qh)
def process(conf):
    """
    """
    jump_host = conf['host']['jump_host']
    remote_addr = conf['host']['addr']
    remote_port = conf['host']['port']
    username = conf['host']['username']
    password = conf['host']['password']
    lime_module = conf['host']['module']
    filename = conf['host']['filename']
    key = conf['host']['key']
    bucket = conf['aws']['bucket']
    progressbar = conf['host']['progressbar']
    tunnel_addr = '127.0.0.1'
    tunnel_port = random.randint(10000, 30000)
    remote_module_path = '/tmp/lime.ko'

    repository_enabled = conf['repository']['enabled']
    repository_url = conf['repository']['url']
    repository_manifest = conf['repository']['manifest']
    repository_gpg_verify = conf['repository']['gpg_verify']

    queue_handler = QueueHandler(log_queue)
    logger = logging.getLogger('margaritashotgun')
    logger.addHandler(queue_handler)

    if bucket is not None:
        dest = OutputDestinations.s3
    else:
        dest = OutputDestinations.local

    if filename is None:
        tm = int(time.time())
        dt = datetime.utcfromtimestamp(tm).isoformat()
        filename = "{0}-{1}-mem.lime".format(remote_addr, dt)

    try:
        host = Host()
        host.connect(username, password, key, remote_addr, remote_port,
                     jump_host)
        host.start_tunnel(tunnel_port, tunnel_addr, tunnel_port)
        if lime_module is None:
            kernel_version = host.kernel_version()
            if repository_enabled:
                repo = Repository(repository_url, repository_gpg_verify)
                repo.init_gpg()
                lime_module = repo.fetch(kernel_version, repository_manifest)
                host.upload_module(lime_module)
            else:
                raise KernelModuleNotProvidedError(kernel_version)
        else:
            host.upload_module(lime_module, remote_module_path)

        host.load_lime(remote_module_path, tunnel_port)
        lime_loaded = host.wait_for_lime(tunnel_port)

        if lime_loaded:
            result = host.capture_memory(dest, filename, bucket, progressbar)
        else:
            logger.debug("lime failed to load on {0}".format(remote_addr))
            result = False

        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()

        return (remote_addr, result)
    except SSHConnectionError as ex:
        logger.error(ex)
        logger.removeHandler(queue_handler)
        queue_handler.close()
        return (remote_addr, False)
    except KeyboardInterrupt as ex:
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        return (remote_addr, False)
    except (SSHCommandError, Exception) as ex:
        logger.error(ex)
        logger.removeHandler(queue_handler)
        queue_handler.close()
        host.cleanup()
        return (remote_addr, False)