Ejemplo n.º 1
0
async def _handle_zfs_events(middleware, event_type, args):
    data = args['data']
    if data.get('type') in ('misc.fs.zfs.resilver_start', 'misc.fs.zfs.scrub_start'):
        pool = data.get('pool_name')
        if not pool:
            return
        if pool in SCAN_THREADS:
            return
        scanwatch = ScanWatch(middleware, pool)
        SCAN_THREADS[pool] = scanwatch
        start_daemon_thread(target=scanwatch.run)

    elif data.get('type') in (
        'misc.fs.zfs.resilver_finish', 'misc.fs.zfs.scrub_finish', 'misc.fs.zfs.scrub_abort',
    ):
        pool = data.get('pool_name')
        if not pool:
            return
        scanwatch = SCAN_THREADS.pop(pool, None)
        if not scanwatch:
            return
        await middleware.run_in_thread(scanwatch.cancel)

        # Send the last event with SCRUB/RESILVER as FINISHED
        await middleware.run_in_thread(scanwatch.send_scan)

    if data.get('type') == 'misc.fs.zfs.scrub_finish':
        await middleware.call('mail.send', {
            'subject': f'{socket.gethostname()}: scrub finished',
            'text': f"scrub of pool '{data.get('pool_name')}' finished",
        })
Ejemplo n.º 2
0
    def start(self):
        try:
            definition, hold_tasks = self.middleware.call_sync(
                "zettarepl.get_definition")
        except Exception as e:
            self.logger.error("Error generating zettarepl definition",
                              exc_info=True)
            self.middleware.call_sync(
                "zettarepl.set_error", {
                    "state": "ERROR",
                    "datetime": datetime.utcnow(),
                    "error": make_sentence(str(e)),
                })
            raise CallError(f"Internal error: {e!r}")
        else:
            self.middleware.call_sync("zettarepl.set_error", None)

        with self.lock:
            if not self.is_running():
                self.queue = multiprocessing.Queue()
                self.process = multiprocessing.Process(
                    name="zettarepl",
                    target=ZettareplProcess(definition,
                                            self.middleware.debug_level,
                                            self.middleware.log_handler,
                                            self.queue, self.observer_queue))
                self.process.start()
                start_daemon_thread(target=self._join, args=(self.process, ))

                if self.observer_queue_reader is None:
                    self.observer_queue_reader = start_daemon_thread(
                        target=self._observer_queue_reader)

                self.middleware.call_sync("zettarepl.notify_definition",
                                          definition, hold_tasks)
Ejemplo n.º 3
0
async def devd_zfs_hook(middleware, data):
    if data.get('type') in ('misc.fs.zfs.resilver_start',
                            'misc.fs.zfs.scrub_start'):
        pool = data.get('pool_name')
        if not pool:
            return
        if pool in SCAN_THREADS:
            return
        scanwatch = ScanWatch(middleware, pool)
        SCAN_THREADS[pool] = scanwatch
        start_daemon_thread(target=scanwatch.run)

    elif data.get('type') in (
            'misc.fs.zfs.resilver_finish',
            'misc.fs.zfs.scrub_finish',
            'misc.fs.zfs.scrub_abort',
    ):
        pool = data.get('pool_name')
        if not pool:
            return
        scanwatch = SCAN_THREADS.pop(pool, None)
        if not scanwatch:
            return
        await middleware.run_in_thread(scanwatch.cancel)

        # Send the last event with SCRUB/RESILVER as FINISHED
        await middleware.run_in_thread(scanwatch.send_scan)

    if data.get('type') == 'misc.fs.zfs.scrub_finish':
        await middleware.call('alert.oneshot_delete', 'ScrubFinished',
                              data.get('pool_name'))
        await middleware.call('alert.oneshot_create', 'ScrubFinished',
                              data.get('pool_name'))
Ejemplo n.º 4
0
async def devd_zfs_hook(middleware, data):
    if data.get('type') in ('misc.fs.zfs.resilver_start', 'misc.fs.zfs.scrub_start'):
        pool = data.get('pool_name')
        if not pool:
            return
        if pool in SCAN_THREADS:
            return
        scanwatch = ScanWatch(middleware, pool)
        SCAN_THREADS[pool] = scanwatch
        start_daemon_thread(target=scanwatch.run)

    elif data.get('type') in (
        'misc.fs.zfs.resilver_finish', 'misc.fs.zfs.scrub_finish', 'misc.fs.zfs.scrub_abort',
    ):
        pool = data.get('pool_name')
        if not pool:
            return
        scanwatch = SCAN_THREADS.pop(pool, None)
        if not scanwatch:
            return
        await middleware.run_in_thread(scanwatch.cancel)

        # Send the last event with SCRUB/RESILVER as FINISHED
        await middleware.run_in_thread(scanwatch.send_scan)

    if data.get('type') == 'misc.fs.zfs.scrub_finish':
        await middleware.call('alert.oneshot_delete', 'ScrubFinished', data.get('pool_name'))
        await middleware.call('alert.oneshot_create', 'ScrubFinished', data.get('pool_name'))
Ejemplo n.º 5
0
    def __call__(self):
        if logging.getLevelName(self.debug_level) == logging.TRACE:
            # If we want TRACE then we want all debug from zettarepl
            debug_level = "DEBUG"
        elif logging.getLevelName(self.debug_level) == logging.DEBUG:
            # Regular development level. We don't need verbose debug from zettarepl
            debug_level = "INFO"
        else:
            debug_level = self.debug_level
        setup_logging("zettarepl", debug_level, self.log_handler)

        definition = Definition.from_data(self.definition)

        clock = Clock()
        tz_clock = TzClock(definition.timezone, clock.now)

        scheduler = Scheduler(clock, tz_clock)
        local_shell = LocalShell()

        self.zettarepl = Zettarepl(scheduler, local_shell)
        self.zettarepl.set_observer(self._observer)
        self.zettarepl.set_tasks(definition.tasks)

        start_daemon_thread(target=self._process_command_queue)

        while True:
            try:
                self.zettarepl.run()
            except Exception:
                logging.getLogger("zettarepl").error("Unhandled exception",
                                                     exc_info=True)
                time.sleep(10)
Ejemplo n.º 6
0
async def _handle_zfs_events(middleware, event_type, args):
    data = args['data']
    if data.get('type') in ('misc.fs.zfs.resilver_start', 'misc.fs.zfs.scrub_start'):
        pool = data.get('pool_name')
        if not pool:
            return
        if pool in SCAN_THREADS:
            return
        scanwatch = ScanWatch(middleware, pool)
        SCAN_THREADS[pool] = scanwatch
        start_daemon_thread(target=scanwatch.run)

    elif data.get('type') in (
        'misc.fs.zfs.resilver_finish', 'misc.fs.zfs.scrub_finish', 'misc.fs.zfs.scrub_abort',
    ):
        pool = data.get('pool_name')
        if not pool:
            return
        scanwatch = SCAN_THREADS.pop(pool, None)
        if not scanwatch:
            return
        await middleware.run_in_thread(scanwatch.cancel)

        # Send the last event with SCRUB/RESILVER as FINISHED
        await middleware.run_in_thread(scanwatch.send_scan)

    if data.get('type') == 'misc.fs.zfs.scrub_finish':
        await middleware.call('mail.send', {
            'subject': f'{socket.gethostname()}: scrub finished',
            'text': f"scrub of pool '{data.get('pool_name')}' finished",
        })
Ejemplo n.º 7
0
async def resilver_scrub_start(middleware, pool_name):
    if not pool_name:
        return
    if pool_name in SCAN_THREADS:
        return
    scanwatch = ScanWatch(middleware, pool_name)
    SCAN_THREADS[pool_name] = scanwatch
    start_daemon_thread(target=scanwatch.run)
Ejemplo n.º 8
0
 async def ensure_remote_client(self):
     if self.CLIENT.remote_ip is not None:
         return
     try:
         self.CLIENT.remote_ip = await self.middleware.call('failover.remote_ip')
         self.CLIENT.middleware = self.middleware
         start_daemon_thread(target=self.CLIENT.run)
     except CallError:
         pass
Ejemplo n.º 9
0
    def __call__(self):
        setproctitle.setproctitle('middlewared (zettarepl)')
        osc.die_with_parent()
        if logging.getLevelName(self.debug_level) == logging.TRACE:
            # If we want TRACE then we want all debug from zettarepl
            default_level = logging.DEBUG
        elif logging.getLevelName(self.debug_level) == logging.DEBUG:
            # Regular development level. We don't need verbose debug from zettarepl
            default_level = logging.INFO
        else:
            default_level = logging.getLevelName(self.debug_level)
        setup_logging("", "DEBUG", self.log_handler)
        oqlh = ObserverQueueLoggingHandler(self.observer_queue)
        oqlh.setFormatter(
            logging.Formatter(
                '[%(asctime)s] %(levelname)-8s [%(threadName)s] [%(name)s] %(message)s',
                '%Y/%m/%d %H:%M:%S'))
        logging.getLogger("zettarepl").addHandler(oqlh)
        for handler in logging.getLogger("zettarepl").handlers:
            handler.addFilter(LongStringsFilter())
            handler.addFilter(ReplicationTaskLoggingLevelFilter(default_level))

        c = Client('ws+unix:///var/run/middlewared-internal.sock',
                   py_exceptions=True)
        c.subscribe(
            'core.reconfigure_logging',
            lambda *args, **kwargs: reconfigure_logging('zettarepl_file'))

        definition = Definition.from_data(self.definition,
                                          raise_on_error=False)
        self.observer_queue.put(DefinitionErrors(definition.errors))

        clock = Clock()
        tz_clock = TzClock(definition.timezone, clock.now)

        scheduler = Scheduler(clock, tz_clock)
        local_shell = LocalShell()

        self.zettarepl = Zettarepl(scheduler, local_shell)
        self.zettarepl.set_observer(self._observer)
        self.zettarepl.set_tasks(definition.tasks)

        start_daemon_thread(target=self._process_command_queue)

        while True:
            try:
                self.zettarepl.run()
            except Exception:
                logging.getLogger("zettarepl").error("Unhandled exception",
                                                     exc_info=True)
                time.sleep(10)
Ejemplo n.º 10
0
async def setup(middleware):

    global VRRP_THREAD

    # only run on licensed systems
    if await middleware.call('failover.licensed'):
        if VRRP_THREAD is None:
            VRRP_THREAD = start_daemon_thread(target=vrrp_fifo_listen,
                                              args=(middleware, ))
Ejemplo n.º 11
0
    def stop(self):
        with self.lock:
            if self.process:
                self.process.terminate()
                event = threading.Event()

                def target():
                    try:
                        os.waitpid(self.process.pid, 0)
                    except ChildProcessError:
                        pass
                    event.set()

                start_daemon_thread(target=target)
                if not event.wait(5):
                    self.logger.warning("Zettarepl was not joined in time, sending SIGKILL")
                    os.kill(self.process.pid, signal.SIGKILL)

                self.process = None
Ejemplo n.º 12
0
    async def register_graphite_queue(self, queue):
        async with self.lock:
            if self.server_shutdown_timer is not None:
                self.middleware.logger.debug(
                    "Canceling internal Graphite server shutdown")
                self.server_shutdown_timer.cancel()
                self.server_shutdown_timer = None

            self.queues.append(queue)

            if self.server is None:
                self.middleware.logger.debug(
                    "Starting internal Graphite server")
                GraphiteHandler.middleware = self.middleware
                self.server = GraphiteServer(("127.0.0.1", 2003),
                                             GraphiteHandler)
                start_daemon_thread(target=self.server.serve_forever)
                self.has_server = True
                await self.middleware.call("service.restart", "collectd")
Ejemplo n.º 13
0
    def start(self, definition=None):
        if definition is None:
            try:
                definition = self.middleware.call_sync("zettarepl.get_definition")
            except Exception as e:
                self.logger.error("Error generating zettarepl definition", exc_info=True)
                raise CallError(f"Internal error: {e!r}")

        with self.lock:
            if not self.is_running():
                self.queue = multiprocessing.Queue()
                self.process = multiprocessing.Process(
                    name="zettarepl",
                    target=ZettareplProcess(definition, self.middleware.debug_level, self.middleware.log_handler,
                                            self.queue, self.observer_queue)
                )
                self.process.start()

                if self.observer_queue_reader is None:
                    self.observer_queue_reader = start_daemon_thread(target=self._observer_queue_reader)
Ejemplo n.º 14
0
def setup(middleware):
    start_daemon_thread(target=udev_events, args=(middleware, ))
Ejemplo n.º 15
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self._check_update = None
     start_daemon_thread(target=self.check_update)
Ejemplo n.º 16
0
def setup(middleware):
    start_daemon_thread(target=collectd_graphite, args=[middleware])
    middleware.register_event_source('reporting.get_data', ReportingEventSource)
    middleware.register_event_source('reporting.realtime', RealtimeEventSource)
Ejemplo n.º 17
0
async def setup(middleware):
    start_daemon_thread(target=setup_zfs_events_process, args=(middleware, ))
Ejemplo n.º 18
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self._check_update = None
     start_daemon_thread(target=self.check_update)
Ejemplo n.º 19
0
def setup(middleware):
    start_daemon_thread(target=collectd_graphite, args=[middleware])
    middleware.register_event_source('reporting.get_data', ReportingEventSource)
    middleware.register_event_source('reporting.realtime', RealtimeEventSource)
"""
Will read integers from stdin and submit tasks that execute specified number of seconds
In parallel, will print pool state every second
"""

import time

from middlewared.logger import setup_logging
from middlewared.utils.io_thread_pool_executor import IoThreadPoolExecutor
from middlewared.utils import start_daemon_thread


def monitor_executor(executor):
    while True:
        print(" ".join([repr(worker) for worker in executor.workers]))
        time.sleep(1)


if __name__ == "__main__":
    setup_logging("middleware", "TRACE", "console")

    executor = IoThreadPoolExecutor("IoThread", 5)
    start_daemon_thread(target=monitor_executor, args=(executor, ))

    while True:
        sleep = int(input())
        print(f"Starting task {sleep} seconds long")
        executor.submit(time.sleep, sleep)