Exemple #1
0
def run(args):
    logger = setup_custom_logger('beaver', args)

    beaver_config = BeaverConfig(args, logger=logger)
    queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))

    worker = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def cleanup(signalnum, frame):
        sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]

        logger.info('{0} detected'.format(sig_name))
        logger.info('Shutting down. Please wait...')

        if worker is not None:
            logger.info('Closing worker...')
            try:
                worker.close()
            except RuntimeError:
                pass

        try:
            queue.put_nowait(('exit', ()))
        except Queue.Full:
            pass

        if ssh_tunnel is not None:
            logger.info('Closing ssh tunnel...')
            ssh_tunnel.close()

        logger.info('Shutdown complete.')
        return os._exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info('Starting queue consumer')
        proc.start()
        return proc

    while 1:
        try:
            if REOPEN_FILES:
                logger.debug('Detected non-linux platform. Files will be reopened for tailing')

            logger.info('Starting worker...')
            worker = Worker(beaver_config, queue_consumer_function=create_queue_consumer, callback=queue.put, logger=logger)

            logger.info('Working...')
            worker.loop()

        except KeyboardInterrupt:
            pass
Exemple #2
0
def run(args):
    logger = setup_custom_logger('beaver', args)

    beaver_config = BeaverConfig(args, logger=logger)
    queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))

    worker = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def cleanup(signalnum, frame):
        sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]

        logger.info('{0} detected'.format(sig_name))
        logger.info('Shutting down. Please wait...')

        if worker is not None:
            logger.info('Closing worker...')
            try:
                worker.close()
            except RuntimeError:
                pass

        try:
            queue.put_nowait(('exit', ()))
        except Queue.Full:
            pass

        if ssh_tunnel is not None:
            logger.info('Closing ssh tunnel...')
            ssh_tunnel.close()

        logger.info('Shutdown complete.')
        return sys.exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info('Starting queue consumer')
        proc.start()
        return proc

    while 1:
        try:
            if REOPEN_FILES:
                logger.debug('Detected non-linux platform. Files will be reopened for tailing')

            logger.info('Starting worker...')
            worker = Worker(beaver_config, queue_consumer_function=create_queue_consumer, callback=queue.put, logger=logger)

            logger.info('Working...')
            worker.loop()

        except KeyboardInterrupt:
            pass
Exemple #3
0
    def __init__(self, args):
        self.logger = setup_custom_logger('beaver', args)
        self.beaver_config = BeaverConfig(args, logger=self.logger)

        # so the config file can override the logger
        self.logger = setup_custom_logger('beaver', args, config=self.beaver_config)

        if self.beaver_config.get('logstash_version') not in [0, 1]:
            raise LookupError("Invalid logstash_version")

        self.queue = multiprocessing.Queue(self.beaver_config.get('max_queue_size'))

        self.manager_proc = None
        self.ssh_tunnel = create_ssh_tunnel(self.beaver_config)
        signal.signal(signal.SIGTERM, self.cleanup)
        signal.signal(signal.SIGINT, self.cleanup)
        if os.name != 'nt':
            signal.signal(signal.SIGQUIT, self.cleanup)
Exemple #4
0
def run(args):
    logger = setup_custom_logger('beaver', args)

    file_config = FileConfig(args, logger=logger)
    beaver_config = BeaverConfig(args, file_config=file_config, logger=logger)
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))

    def create_queue_consumer():
        process_args = (queue, beaver_config, file_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info("Starting queue consumer")
        proc.start()
        return proc

    while 1:
        try:
            if REOPEN_FILES:
                logger.debug("Detected non-linux platform. Files will be reopened for tailing")

            logger.info("Starting worker...")
            worker = Worker(beaver_config, file_config, queue_consumer_function=create_queue_consumer, callback=queue.put, logger=logger)

            logger.info("Working...")
            worker.loop()

        except KeyboardInterrupt:
            logger.info("Shutting down. Please wait.")
            worker.close()
            if ssh_tunnel is not None:
                logger.info("Closing ssh tunnel.")
                ssh_tunnel.close()

            logger.info("Shutdown complete.")
            sys.exit(0)
Exemple #5
0
def run_queue(queue, beaver_config, logger_name=None):

    signal.signal(signal.SIGTERM, signal.SIG_DFL)
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    if os.name != 'nt':
        signal.signal(signal.SIGQUIT, signal.SIG_DFL)

    last_update_time = int(time.time())
    queue_timeout = beaver_config.get('queue_timeout')
    wait_timeout = beaver_config.get('wait_timeout')
    count = 0

    transport = None
    try:
        logger = setup_custom_logger(logger_name)
        logger.debug('Logging using the {0} transport'.format(
            beaver_config.get('transport')))
        transport = create_transport(beaver_config, logger=logger)
        failure_count = 0
        while True:
            if not transport.valid():
                logger.info('Transport connection issues, stopping queue')
                break

            command = None
            try:
                if queue.full():
                    logger.error("Queue is full")

                else:
                    if count == 1000:
                        logger.debug("Main consumer queue Size is: " +
                                     str(queue.qsize()))
                        count = 0
                command, data = queue.get(block=True, timeout=wait_timeout)
                if command == "callback":
                    last_update_time = int(time.time())
                    logger.debug(
                        'Last update time now {0}'.format(last_update_time))
            except Queue.Empty:
                if not queue.empty():
                    logger.error(
                        'Recieved timeout from main consumer queue - stopping queue'
                    )
                    break
                else:
                    logger.debug('No data')

            if int(time.time()) - last_update_time > queue_timeout:
                logger.info(
                    'Queue timeout of "{0}" seconds exceeded, stopping queue'.
                    format(queue_timeout))
                break

            if command == 'callback':
                if data.get('ignore_empty', False):
                    logger.debug('removing empty lines')
                    lines = data['lines']
                    new_lines = []
                    for line in lines:
                        message = unicode_dammit(line)
                        if len(message) == 0:
                            continue
                        new_lines.append(message)
                    data['lines'] = new_lines

                if len(data['lines']) == 0:
                    logger.debug('0 active lines sent from worker')
                    continue
                while True:
                    try:
                        transport.callback(**data)
                        count += 1
                        logger.debug("Number of transports: " + str(count))
                        break
                    except TransportException as e:
                        failure_count = failure_count + 1
                        if failure_count > beaver_config.get('max_failure'):
                            failure_count = beaver_config.get('max_failure')

                        sleep_time = beaver_config.get(
                            'respawn_delay')**failure_count
                        logger.info('Caught transport exception: %s', e)
                        logger.info('Reconnecting in %d seconds' % sleep_time)

                        try:
                            transport.invalidate()
                            time.sleep(sleep_time)
                            transport.reconnect()
                            if transport.valid():
                                failure_count = 0
                                logger.info('Reconnected successfully')
                        except KeyboardInterrupt:
                            logger.info('User cancelled respawn.')
                            transport.interrupt()
                            sys.exit(0)
            elif command == 'addglob':
                beaver_config.addglob(*data)
                transport.addglob(*data)
            elif command == 'exit':
                break
    except KeyboardInterrupt:
        logger.debug('Queue Interruped')
        if transport is not None:
            transport.interrupt()

        logger.debug('Queue Shutdown')
Exemple #6
0
def run(args=None):
    logger = setup_custom_logger('beaver', args)

    beaver_config = BeaverConfig(args, logger=logger)
    if beaver_config.get('logstash_version') not in [0, 1]:
        raise LookupError("Invalid logstash_version")

    queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))

    worker_proc = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def cleanup(signalnum, frame):
        if signalnum is not None:
            sig_name = tuple((v) for v, k in signal.__dict__.iteritems()
                             if k == signalnum)[0]
            logger.info('{0} detected'.format(sig_name))
            logger.info('Shutting down. Please wait...')
        else:
            logger.info('Worker process cleanup in progress...')

        try:
            queue.put_nowait(('exit', ()))
        except Queue.Full:
            pass

        if worker_proc is not None:
            try:
                worker_proc.terminate()
                worker_proc.join()
            except RuntimeError:
                pass

        if ssh_tunnel is not None:
            logger.info('Closing ssh tunnel...')
            ssh_tunnel.close()

        if signalnum is not None:
            logger.info('Shutdown complete.')
            return os._exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info('Starting queue consumer')
        proc.start()
        return proc

    def create_queue_producer():
        worker = Worker(beaver_config,
                        queue_consumer_function=create_queue_consumer,
                        callback=queue.put,
                        logger=logger)
        worker.loop()

    while 1:

        try:
            if REOPEN_FILES:
                logger.debug(
                    'Detected non-linux platform. Files will be reopened for tailing'
                )

            t = time.time()
            while True:
                if worker_proc is None or not worker_proc.is_alive():
                    logger.info('Starting worker...')
                    t = time.time()
                    worker_proc = multiprocessing.Process(
                        target=create_queue_producer)
                    worker_proc.start()
                    logger.info('Working...')
                worker_proc.join(10)

                if beaver_config.get('refresh_worker_process'):
                    if beaver_config.get(
                            'refresh_worker_process') < time.time() - t:
                        logger.info(
                            'Worker has exceeded refresh limit. Terminating process...'
                        )
                        cleanup(None, None)

        except KeyboardInterrupt:
            pass
def run(args=None):

    logger = setup_custom_logger('beaver', args)
    beaver_config = BeaverConfig(args, logger=logger)
    # so the config file can override the logger
    logger = setup_custom_logger('beaver', args, config=beaver_config)

    if beaver_config.get('logstash_version') not in [0, 1]:
        raise LookupError("Invalid logstash_version")

    queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))

    worker_proc = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def cleanup(signalnum, frame):
        if signalnum is not None:
            sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]
            logger.info('{0} detected'.format(sig_name))
            logger.info('Shutting down. Please wait...')
        else:
            logger.info('Worker process cleanup in progress...')

        try:
            queue.put_nowait(('exit', ()))
        except Queue.Full:
            pass

        if worker_proc is not None:
            try:
                worker_proc.terminate()
                worker_proc.join()
            except RuntimeError:
                pass

        if ssh_tunnel is not None:
            logger.info('Closing ssh tunnel...')
            ssh_tunnel.close()

        if signalnum is not None:
            logger.info('Shutdown complete.')
            return os._exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info('Starting queue consumer')
        proc.start()
        return proc

    def create_queue_producer():
        worker = Worker(beaver_config, queue_consumer_function=create_queue_consumer, callback=queue.put, logger=logger)
        worker.loop()

    while 1:

        try:
            if REOPEN_FILES:
                logger.debug('Detected non-linux platform. Files will be reopened for tailing')

            t = time.time()
            while True:
                if worker_proc is None or not worker_proc.is_alive():
                    logger.info('Starting worker...')
                    t = time.time()
                    worker_proc = multiprocessing.Process(target=create_queue_producer)
                    worker_proc.start()
                    logger.info('Working...')
                worker_proc.join(10)

                if beaver_config.get('refresh_worker_process'):
                    if beaver_config.get('refresh_worker_process') < time.time() - t:
                        logger.info('Worker has exceeded refresh limit. Terminating process...')
                        cleanup(None, None)

        except KeyboardInterrupt:
            pass
 def __init__(self, beaver_config, file_config, logger=None):
     super(StdoutTransport, self).__init__(beaver_config, file_config, logger=logger)
     self._stdout = setup_custom_logger('stdout', formatter=False, output=beaver_config.get('output'))
Exemple #9
0
def run(args=None):
    logger = setup_custom_logger('beaver', args)

    beaver_config = BeaverConfig(args, logger=logger)
    queue = multiprocessing.JoinableQueue(beaver_config.get('max_queue_size'))

    manager = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def queue_put(*args):
        return queue.put(*args)

    def queue_put_nowait(*args):
        return queue.put_nowait(*args)

    def cleanup(signalnum, frame):
        sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]
        logger.info("{0} detected".format(sig_name))
        logger.info("Shutting down. Please wait...")

        if manager is not None:
            logger.info("Closing worker...")
            try:
                manager.close()
            except RuntimeError:
                pass

        try:
            queue_put_nowait(("exit", ()))
        except Queue.Full:
            pass

        if ssh_tunnel is not None:
            logger.info("Closing ssh tunnel...")
            ssh_tunnel.close()

        logger.info("Shutdown complete.")
        return sys.exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info("Starting queue consumer")
        proc.start()
        return proc

    if REOPEN_FILES:
        logger.debug("Detected non-linux platform. Files will be reopened for tailing")

    logger.info("Starting worker...")
    manager = TailManager(
        beaver_config=beaver_config,
        queue_consumer_function=create_queue_consumer,
        callback=queue_put,
        logger=logger
    )

    logger.info("Working...")
    manager.run()
Exemple #10
0
def run(args=None):
    logger = setup_custom_logger('beaver', args)

    beaver_config = BeaverConfig(args, logger=logger)
    queue = multiprocessing.JoinableQueue(beaver_config.get('max_queue_size'))

    manager = None
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def queue_put(*args):
        return queue.put(*args)

    def queue_put_nowait(*args):
        return queue.put_nowait(*args)

    def cleanup(signalnum, frame):
        sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]
        logger.info("{0} detected".format(sig_name))
        logger.info("Shutting down. Please wait...")

        if manager is not None:
            logger.info("Closing worker...")
            try:
                manager.close()
            except RuntimeError:
                pass

        try:
            queue_put_nowait(("exit", ()))
        except Queue.Full:
            pass

        if ssh_tunnel is not None:
            logger.info("Closing ssh tunnel...")
            ssh_tunnel.close()

        logger.info("Shutdown complete.")
        return sys.exit(signalnum)

    signal.signal(signal.SIGTERM, cleanup)
    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGQUIT, cleanup)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info("Starting queue consumer")
        proc.start()
        return proc

    if REOPEN_FILES:
        logger.debug("Detected non-linux platform. Files will be reopened for tailing")

    logger.info("Starting worker...")
    manager = TailManager(
        paths=["/var/log/system.log"],
        beaver_config=beaver_config,
        queue_consumer_function=create_queue_consumer,
        callback=queue_put,
        logger=logger
    )

    logger.info("Working...")
    manager.run()
Exemple #11
0
 def __setstate__(self, orig_dict):
     self.__dict__.update(orig_dict)
     self.logger = setup_custom_logger(orig_dict['logger'])
Exemple #12
0
 def __init__(self, beaver_config, logger=None):
     super(StdoutTransport, self).__init__(beaver_config, logger=logger)
     self._stdout = setup_custom_logger('stdout',
                                        formatter=False,
                                        output=beaver_config.get('output'))
Exemple #13
0
def run(args=None):

    logger = setup_custom_logger('beaver', args)
    beaver_config = BeaverConfig(args, logger=logger)

    if beaver_config.get('logstash_version') not in [0, 1]:
        raise LookupError("Invalid logstash_version")

    queue = multiprocessing.JoinableQueue(beaver_config.get('max_queue_size'))

    manager_proc = None
    termination_requested = multiprocessing.Event()
    ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)

    def queue_put(*args):
        return queue.put(*args)

    def queue_put_nowait(*args):
        return queue.put_nowait(*args)

    def request_shutdown(signalnum, frame):
        termination_requested.set()
        if signalnum is not None:
            sig_name = tuple((v) for v, k in signal.__dict__.iteritems()
                             if k == signalnum)[0]
            logger.info("{0} detected".format(sig_name))
            logger.info("Shutting down. Please wait...")
        else:
            logger.info('Worker process cleanup in progress...')

    def cleanup():
        try:
            queue_put_nowait(("exit", ()))
        except Queue.Full:
            pass

        if manager_proc is not None:
            try:
                manager_proc.close()
                manager_proc.join()
            except RuntimeError:
                pass

        if ssh_tunnel is not None:
            logger.info("Closing ssh tunnel...")
            ssh_tunnel.close()

    signal.signal(signal.SIGTERM, request_shutdown)
    signal.signal(signal.SIGINT, request_shutdown)
    signal.signal(signal.SIGQUIT, request_shutdown)

    def create_queue_consumer():
        process_args = (queue, beaver_config, logger)
        proc = multiprocessing.Process(target=run_queue, args=process_args)

        logger.info("Starting queue consumer")
        proc.start()
        return proc

    def create_queue_producer():
        return TailManager(beaver_config=beaver_config,
                           queue_consumer_function=create_queue_consumer,
                           callback=queue_put,
                           logger=logger)

    last_start = None
    while not termination_requested.is_set():

        try:

            if REOPEN_FILES:
                logger.debug(
                    "Detected non-linux platform. Files will be reopened for tailing"
                )

            if manager_proc is None or not manager_proc.is_alive():
                logger.info('Starting worker...')
                manager_proc = create_queue_producer()
                manager_proc.start()
                last_start = time.time()
                logger.info('Working...')

            if beaver_config.get(
                    'refresh_worker_process') and manager_proc.is_alive():
                if last_start and beaver_config.get(
                        'refresh_worker_process') < time.time() - last_start:
                    logger.info(
                        'Worker has exceeded refresh limit. Terminating process...'
                    )
                    cleanup()
            else:
                # Workaround for fact that multiprocessing.Event.wait() deadlocks on main thread
                # And blocks SIGINT signals from getting through.
                while not termination_requested.is_set():
                    time.sleep(0.5)

        except KeyboardInterrupt:
            pass
    cleanup()