def main(name, *args):
    connection = Client()
    connection.connect('127.0.0.1')
    connection.login_service('ups')

    connection.emit_event('service.ups.signal', {
        'name': name,
        'type': os.environ['NOTIFYTYPE'],
    })

    connection.disconnect()
Beispiel #2
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc)
        self.scheduler.start()

    def register_schemas(self):
        self.client.register_schema('calendar-task', {
            'type': 'object',
            'additionalProperties': False,
            'properties': {
                'id': {'type': 'string'},
                'name': {'type': 'string'},
                'args': {'type': 'array'},
                'description': {'type': 'string'},
                'enabled': {'type': 'boolean'},
                'hidden': {'type': 'boolean'},
                'protected': {'type': 'boolean'},
                'status': {'$ref': 'calendar-task-status'},
                'schedule': {
                    'type': 'object',
                    'additionalProperties': False,
                    'properties': {
                        'coalesce': {'type': ['boolean', 'integer', 'null']},
                        'year': {'type': ['string', 'integer', 'null']},
                        'month': {'type': ['string', 'integer', 'null']},
                        'day': {'type': ['string', 'integer', 'null']},
                        'week': {'type': ['string', 'integer', 'null']},
                        'day_of_week': {'type': ['string', 'integer', 'null']},
                        'hour': {'type': ['string', 'integer', 'null']},
                        'minute': {'type': ['string', 'integer', 'null']},
                        'second': {'type': ['string', 'integer', 'null']},
                        'timezone': {'type': ['string', 'null']}
                    }
                }
            }
        })

        self.client.register_schema('calendar-task-status', {
            'type': 'object',
            'properties': {
                'next_run_time': {'type': 'string'},
                'last_run_status': {'type': 'string'},
                'current_run_status': {'type': ['string', 'null']},
                'current_run_progress': {'type': ['object', 'null']}
            }
        })

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.submit_task(*args)
        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alerts.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.register_schemas()
        self.client.wait_forever()
Beispiel #3
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(
                ', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(
                                    job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK
            | select.KQ_NOTE_TRACK, 0, 0)

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0,
                           0, 0)

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #4
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation', FileGenerationService(self))
                self.client.register_service('etcd.management', ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #5
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation',
                                             FileGenerationService(self))
                self.client.register_service('etcd.management',
                                             ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(
                file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        parser.add_argument('mountpoint',
                            metavar='MOUNTPOINT',
                            default='/etc',
                            help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #6
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management', ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK,
            0, 0
        )

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_DELETE,
            0, 0, 0
        )

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #7
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas',
                                collection='calendar_tasks',
                                client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store},
                                             timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management',
                                             ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync(
            'task.submit_with_env', args[0], args[1:], {
                'RUN_AS_USER': '******',
                'CALENDAR_TASK_NAME': kwargs.get('name')
            })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync(
                    'alert.emit', {
                        'name':
                        'scheduler.task.failed',
                        'severity':
                        'CRITICAL',
                        'description':
                        'Task {0} has failed: {1}'.format(
                            kwargs.get('name', tid),
                            result['error']['message']),
                    })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #8
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = FreeNASJobStore()
        self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], {
            'RUN_AS_USER': '******',
            'CALENDAR_TASK_NAME': kwargs.get('name')
        })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alert.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(
                        kwargs.get('name', tid),
                        result['error']['message']
                    ),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.checkin()
        self.client.wait_forever()