Beispiel #1
0
class Context(object):
    def __init__(self, *args, **kwargs):
        self.client = None
        self.entity_subscribers = {}

    def start_entity_subscribers(self):
        for i in ENTITY_SUBSCRIBERS:
            if i in self.entity_subscribers:
                self.entity_subscribers[i].stop()
                del self.entity_subscribers[i]

            e = EntitySubscriber(self.client, i)
            e.start()
            self.entity_subscribers[i] = e

    def wait_entity_subscribers(self):
        for i in self.entity_subscribers.values():
            i.wait_ready()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('collectd_{0}'.format(PLUGIN_NAME))
                # enable streaming responses as they are needed but entitysubscriber for
                # reliable performace and such
                self.client.call_sync('management.enable_features',
                                      ['streaming_responses'])
                self.start_entity_subscribers()
                self.wait_entity_subscribers()
                return
            except (OSError, RpcException) as err:
                collectd.warning(
                    "{0} collectd plugin could not connect to server retrying in 5 seconds"
                    .format(PLUGIN_NAME))
                time.sleep(5)

    def connection_error(self, event, **kwargs):
        if event in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
            collectd.info(
                '{0} collectd plugin connection to dispatcher lost'.format(
                    PLUGIN_NAME))
            self.connect()

    def init_dispatcher(self):
        self.client = Client()
        self.client.on_error(self.connection_error)
        self.connect()

    def disk_temps(self):
        for disk in self.entity_subscribers['disk'].query(
            ('status.smart_info.temperature', '!=', None)):
            yield (disk['name'], disk['status']['smart_info']['temperature'])
class Context(object):
    def __init__(self, *args, **kwargs):
        self.client = None
        self.entity_subscribers = {}

    def start_entity_subscribers(self):
        for i in ENTITY_SUBSCRIBERS:
            if i in self.entity_subscribers:
                self.entity_subscribers[i].stop()
                del self.entity_subscribers[i]

            e = EntitySubscriber(self.client, i)
            e.start()
            self.entity_subscribers[i] = e

    def wait_entity_subscribers(self):
        for i in self.entity_subscribers.values():
            i.wait_ready()

    def connect(self):
        while True:
            try:
                self.client.connect("unix:")
                self.client.login_service("collectd_{0}".format(PLUGIN_NAME))
                # enable streaming responses as they are needed but entitysubscriber for
                # reliable performace and such
                self.client.call_sync("management.enable_features", ["streaming_responses"])
                self.start_entity_subscribers()
                self.wait_entity_subscribers()
                return
            except (OSError, RpcException) as err:
                collectd.warning(
                    "{0} collectd plugin could not connect to server retrying in 5 seconds".format(PLUGIN_NAME)
                )
                time.sleep(5)

    def connection_error(self, event, **kwargs):
        if event in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
            collectd.info("{0} collectd plugin connection to dispatcher lost".format(PLUGIN_NAME))
            self.connect()

    def init_dispatcher(self):
        self.client = Client()
        self.client.on_error(self.connection_error)
        self.connect()

    def disk_temps(self):
        for disk in self.entity_subscribers["disk"].query(("status.smart_info.temperature", "!=", None)):
            yield (disk["name"], disk["status"]["smart_info"]["temperature"])
Beispiel #3
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc)
        self.scheduler.start()

    def register_schemas(self):
        self.client.register_schema('calendar-task', {
            'type': 'object',
            'additionalProperties': False,
            'properties': {
                'id': {'type': 'string'},
                'name': {'type': 'string'},
                'args': {'type': 'array'},
                'description': {'type': 'string'},
                'enabled': {'type': 'boolean'},
                'hidden': {'type': 'boolean'},
                'protected': {'type': 'boolean'},
                'status': {'$ref': 'calendar-task-status'},
                'schedule': {
                    'type': 'object',
                    'additionalProperties': False,
                    'properties': {
                        'coalesce': {'type': ['boolean', 'integer', 'null']},
                        'year': {'type': ['string', 'integer', 'null']},
                        'month': {'type': ['string', 'integer', 'null']},
                        'day': {'type': ['string', 'integer', 'null']},
                        'week': {'type': ['string', 'integer', 'null']},
                        'day_of_week': {'type': ['string', 'integer', 'null']},
                        'hour': {'type': ['string', 'integer', 'null']},
                        'minute': {'type': ['string', 'integer', 'null']},
                        'second': {'type': ['string', 'integer', 'null']},
                        'timezone': {'type': ['string', 'null']}
                    }
                }
            }
        })

        self.client.register_schema('calendar-task-status', {
            'type': 'object',
            'properties': {
                'next_run_time': {'type': 'string'},
                'last_run_status': {'type': 'string'},
                'current_run_status': {'type': ['string', 'null']},
                'current_run_progress': {'type': ['object', 'null']}
            }
        })

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.submit_task(*args)
        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alerts.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.register_schemas()
        self.client.wait_forever()
Beispiel #4
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.msock = msock.client.Client()
        self.msock.on_closed = self.on_msock_close
        self.rpc_fd = -1
        self.connection_id = None
        self.jobs = []
        self.state = ConnectionState.OFFLINE
        self.config = None
        self.keepalive = None
        self.connected_at = None
        self.cv = Condition()
        self.rpc = RpcContext()
        self.client = Client()
        self.server = Server()
        self.middleware_endpoint = None

    def start(self, configpath, sockpath):
        signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
        self.read_config(configpath)
        self.server.rpc = RpcContext()
        self.server.rpc.register_service_instance('control',
                                                  ControlService(self))
        self.server.start(sockpath)
        threading.Thread(target=self.server.serve_forever,
                         name='server thread',
                         daemon=True).start()

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect_dispatcher()

        self.middleware_endpoint = Client()
        self.middleware_endpoint.on_error(on_error)
        self.connect_dispatcher()

    def connect_dispatcher(self):
        while True:
            try:
                self.middleware_endpoint.connect('unix:')
                self.middleware_endpoint.login_service('debugd')
                self.middleware_endpoint.enable_server()
                self.middleware_endpoint.register_service(
                    'debugd.management', ControlService(self))
                self.middleware_endpoint.resume_service('debugd.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def read_config(self, path):
        try:
            with open(path) as f:
                self.config = json.load(f)
        except (IOError, OSError, ValueError) as err:
            self.logger.fatal('Cannot open config file: {0}'.format(str(err)))
            self.logger.fatal('Exiting.')
            sys.exit(1)

    def connect(self, discard=False):
        if discard:
            self.connection_id = None

        self.keepalive = threading.Thread(target=self.connect_keepalive,
                                          daemon=True)
        self.keepalive.start()

    def connect_keepalive(self):
        while True:
            try:
                if not self.connection_id:
                    self.connection_id = uuid.uuid4()

                self.msock.connect(SUPPORT_PROXY_ADDRESS)
                self.logger.info(
                    'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS))
                self.rpc_fd = self.msock.create_channel(0)
                time.sleep(1)  # FIXME
                self.client = Client()
                self.client.connect('fd://', fobj=self.rpc_fd)
                self.client.channel_serializer = MSockChannelSerializer(
                    self.msock)
                self.client.standalone_server = True
                self.client.enable_server()
                self.client.register_service('debug', DebugService(self))
                self.client.call_sync('server.login', str(self.connection_id),
                                      socket.gethostname(), get_version(),
                                      'none')
                self.set_state(ConnectionState.CONNECTED)
            except BaseException as err:
                self.logger.warning(
                    'Failed to initiate support connection: {0}'.format(err),
                    exc_info=True)
                self.msock.disconnect()
            else:
                self.connected_at = datetime.now()
                with self.cv:
                    self.cv.wait_for(lambda: self.state in (
                        ConnectionState.LOST, ConnectionState.OFFLINE))
                    if self.state == ConnectionState.OFFLINE:
                        return

            self.logger.warning(
                'Support connection lost, retrying in 10 seconds')
            time.sleep(10)

    def disconnect(self):
        self.connected_at = None
        self.set_state(ConnectionState.OFFLINE)
        self.client.disconnect()
        self.msock.destroy_channel(0)
        self.msock.disconnect()
        self.jobs.clear()

    def on_msock_close(self):
        self.connected_at = None
        self.set_state(ConnectionState.LOST)

    def run_job(self, job):
        self.jobs.append(job)
        job.context = self
        job.start()

    def set_state(self, state):
        with self.cv:
            self.state = state
            self.cv.notify_all()
Beispiel #5
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(
                ', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(
                                    job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK
            | select.KQ_NOTE_TRACK, 0, 0)

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0,
                           0, 0)

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #6
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.rpc.register_service_instance('dscached.account', AccountService(self))
        self.rpc.register_service_instance('dscached.group', GroupService(self))
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.management', ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_enabled_directories(self):
        return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order())))

    def get_search_order(self):
        return ['local', 'system'] + self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name, self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories
            )

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories
            )

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.start(address)
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                directory.configure()
                self.directories.append(directory)
            except BaseException as err:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get('directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('/var/log/dscached.log', 'DEBUG')

        setproctitle.setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.init_directories()
        self.client.wait_forever()
Beispiel #7
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('alertd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.emitters = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_reminder(self):
        t = threading.Thread(target=self.reminder_thread)
        t.daemon = True
        t.start()

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['alertd']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('alertd')
                self.client.enable_server()
                self.client.register_service('alertd.management', ManagementService(self))
                self.client.register_service('alertd.alert', AlertService(self))
                self.client.register_service('alertd.debug', DebugService())
                self.client.resume_service('alertd.management')
                self.client.resume_service('alertd.alert')
                self.client.resume_service('alertd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def emit_alert(self, alert):
        self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class']))
        for i in self.datastore.query('alert.filters'):
            for predicate in i.get('predicates', []):
                if predicate['operator'] not in operators_table:
                    continue

                if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']):
                    break
            else:
                try:
                    emitter = self.emitters.get(i['emitter'])
                    if not emitter:
                        self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id']))
                        continue

                    self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id']))
                    if alert['send_count'] > 0:
                        emitter.emit_again(alert, i['parameters'])
                    else:
                        emitter.emit_first(alert, i['parameters'])
                except BaseException as err:
                    # Failed to emit alert using alert emitter
                    # XXX: generate another alert about that
                    self.logger.error('Cannot emit alert <id:{0}> using {1}: {2}'.format(
                        alert['id'],
                        i['emitter'],
                        str(err))
                    )

        alert['send_count'] += 1
        alert['last_emitted_at'] = datetime.utcnow()

        if alert['one_shot']:
            alert['active'] = False

        self.datastore.update('alerts', alert['id'], alert)

    def cancel_alert(self, alert):
        self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(alert['id'], alert['class']))

        alert.update({
            'active': False,
            'cancelled': datetime.utcnow()
        })

        self.datastore.update('alerts', alert['id'], alert)

    def register_emitter(self, name, cls):
        self.emitters[name] = cls(self)
        self.logger.info('Registered emitter {0} (class {1})'.format(name, cls))

    def reminder_thread(self):
        while True:
            time.sleep(REMINDER_SECONDS)
            for i in self.datastore.query('alerts'):
                if not i['active'] or i['dismissed']:
                    continue

                last_emission = i.get('last_emitted_at') or i['created_at']
                interval = REMINDER_SCHEDULE[i['severity']]

                if not interval:
                    continue

                if last_emission + timedelta(seconds=interval) <= datetime.utcnow():
                    self.emit_alert(i)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/alertd.log', 'DEBUG')

        setproctitle('alertd')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.init_reminder()
        self.checkin()
        self.client.wait_forever()
class Main(object):
    def __init__(self):
        self.client = None
        self.server = None
        self.datastore = None
        self.hdf = None
        self.hdf_group = None
        self.config = None
        self.logger = logging.getLogger('statd')
        self.data_sources = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def init_database(self):
        # adding this try/except till system-dataset plugin is added back in in full fidelity
        # just a hack (since that directory's data will not persist)
        # Please remove this when system-dataset plugin is added back in
        try:
            directory = self.client.call_sync('system_dataset.request_directory', 'statd')
        except RpcException:
            directory = '/var/tmp/statd'
            if not os.path.exists(directory):
                os.makedirs(directory)
        self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a')
        if not hasattr(self.hdf.root, 'stats'):
            self.hdf.create_group('/', 'stats')

        self.hdf_group = self.hdf.root.stats

    def request_table(self, name):
        try:
            if hasattr(self.hdf_group, name):
                return getattr(self.hdf_group, name)

            return self.hdf.create_table(self.hdf_group, name, DataPoint, name)
        except Exception as e:
            self.logger.error(str(e))

    def init_alert_config(self, name):
        config_name = name if self.datastore.exists('statd.alerts', ('id', '=', name)) else 'default'
        alert_config = self.datastore.get_by_id('statd.alerts', config_name)

        self.client.call_sync(
            'alert.register_alert',
            'stat.{0}.too_high'.format(name),
            '{0} statistic value is too high'.format(name)
        )

        self.client.call_sync(
            'alert.register_alert',
            'stat.{0}.too_low'.format(name),
            '{0} statistic value is too low'.format(name)
        )

        return alert_config

    def get_data_source(self, name):
        if name not in list(self.data_sources.keys()):
            config = DataSourceConfig(self.datastore, name)
            alert_config = self.init_alert_config(name)
            ds = DataSource(self, name, config, alert_config)
            self.data_sources[name] = ds
            self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name))

        return self.data_sources[name]

    def register_schemas(self):
        self.client.register_schema('stat', {
            'type': 'object',
            'additionalProperties': True,
            'properties': {
                'name': {'type': 'string'},
                'last_value': {'type': ['integer', 'number', 'null']},
                'alerts': {'$ref': 'stat-alert'},
            }
        })
        self.client.register_schema('stat-alert', {
            'type': 'object',
            'additionalProperties': True,
            'properties': {
                'alert_high': {'type': ['integer', 'number', 'null']},
                'alert_high_enabled': {'type': 'boolean'},
                'alert_low': {'type': ['integer', 'number', 'null']},
                'alert_low_enabled': {'type': 'boolean'}
            }
        })

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('statd')
                self.client.enable_server()
                self.client.register_service('statd.output', OutputService(self))
                self.client.register_service('statd.alert', AlertService(self))
                self.client.register_service('statd.debug', DebugService(gevent=True))
                self.client.resume_service('statd.output')
                self.client.resume_service('statd.alert')
                self.client.resume_service('statd.debug')
                for i in list(self.data_sources.keys()):
                    self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i))

                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.use_bursts = True
        self.client.on_error(on_error)
        self.connect()

    def die(self):
        self.logger.warning('Exiting')
        self.server.stop()
        self.client.disconnect()
        sys.exit(0)

    def dispatcher_error(self, error):
        self.die()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/fnstatd.log', 'DEBUG')
        setproctitle.setproctitle('fnstatd')

        # Signal handlers
        gevent.signal(signal.SIGQUIT, self.die)
        gevent.signal(signal.SIGTERM, self.die)
        gevent.signal(signal.SIGINT, self.die)

        self.server = InputServer(self)
        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.init_database()
        self.register_schemas()
        self.server.start()
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #9
0
class Main(object):
    def __init__(self):
        self.config = None
        self.client = None
        self.datastore = None
        self.configstore = None
        self.rtsock_thread = None
        self.dhcp_clients = {}
        self.dhcp_lock = threading.RLock()
        self.logger = logging.getLogger('networkd')
        self.default_interface = None
        self.cv = Condition()

    def dhclient_pid(self, interface):
        path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface))
        if not os.path.exists(path):
            return None

        try:
            with open(path) as f:
                pid = int(f.read().strip())
                return pid
        except (IOError, ValueError):
            return None

    def dhclient_running(self, interface):
        pid = self.dhclient_pid(interface)
        if not pid:
            return False

        try:
            os.kill(pid, 0)
            return True
        except OSError:
            return False

    def configure_dhcp(self, interface, block=False, timeout=None):
        if interface in self.dhcp_clients:
            self.logger.info('Interface {0} already configured by DHCP'.format(interface))
            return True

        def bind(old_lease, lease):
            self.logger.info('{0} DHCP lease on {1} from {2}, valid for {3} seconds'.format(
                'Renewed' if old_lease else 'Acquired',
                interface,
                client.server_address,
                lease.lifetime,
                interface
            ))

            if old_lease is None or lease.client_ip != old_lease.client_ip:
                self.logger.info('Assigning IP address {0} to interface {1}'.format(lease.client_ip, interface))
                alias = lease.client_interface
                iface = netif.get_interface(interface)

                if old_lease:
                    try:
                        addr = first_or_default(lambda a: a.address == old_lease.client_ip, iface.addresses)
                        if addr:
                            iface.remove_address(addr)
                    except OSError as err:
                        self.logger.error('Cannot remove alias {0}: {1}'.format(old_lease.client_ip, err.strerror))

                try:
                    iface.add_address(netif.InterfaceAddress(netif.AddressFamily.INET, alias))
                except OSError as err:
                    self.logger.error('Cannot add alias to {0}: {1}'.format(interface, err.strerror))

            if lease.router and self.configstore.get('network.dhcp.assign_gateway'):
                try:
                    rtable = netif.RoutingTable()
                    newroute = default_route(lease.router)
                    if rtable.default_route_ipv4 != newroute:
                        if rtable.default_route_ipv4:
                            self.logger.info('DHCP default route changed from {0} to {1}'.format(
                                rtable.default_route_ipv4,
                                newroute
                            ))
                            rtable.delete(rtable.default_route_ipv4)
                            rtable.add(default_route(lease.router))
                        else:
                            self.logger.info('Adding default route via {0}'.format(lease.router))
                            rtable.add(default_route(lease.router))
                except OSError as err:
                    self.logger.error('Cannot configure default route: {0}'.format(err.strerror))

            if lease.dns_addresses and self.configstore.get('network.dhcp.assign_dns'):
                inp = []
                addrs = []
                proc = subprocess.Popen(
                    ['/sbin/resolvconf', '-a', interface],
                    stdout=subprocess.PIPE,
                    stdin=subprocess.PIPE
                )

                for i in lease.dns_addresses:
                    # Filter out bogus DNS server addresses
                    if str(i) in ('127.0.0.1', '0.0.0.0', '255.255.255.255'):
                        continue

                    inp.append('nameserver {0}'.format(i))
                    addrs.append(i)

                if lease.domain_name:
                    inp.append('search {0}'.format(lease.domain_name))

                proc.communicate('\n'.join(inp).encode('ascii'))
                proc.wait()
                self.client.emit_event('network.dns.configured', {
                    'addresses': addrs,
                })
                self.logger.info('Updated DNS configuration')
            else:
                subprocess.call(['/sbin/resolvconf', '-d', interface])
                self.client.emit_event('network.dns.configured', {
                    'addresses': [],
                })
                self.logger.info('Deleted DNS configuration')

        def reject(reason):
            self.logger.info('DHCP request rejected on {0}: {1}'.format(interface, reason))
            self.deconfigure_dhcp(interface)
            if not block:
                t = threading.Timer(60, self.configure_dhcp, args=(interface,))
                t.start()

        def unbind(lease, reason):
            reasons = {
                dhcp.client.UnbindReason.EXPIRE: 'expired',
                dhcp.client.UnbindReason.REVOKE: 'revoked'
            }

            self.logger.info('DHCP lease on {0}: {1}'.format(interface, reasons.get(reason, 'revoked')))

        def state_change(state):
            self.client.emit_event('network.interface.changed', {
                'operation': 'update',
                'ids': [interface]
            })

            self.client.emit_event('network.changed', {
                'operation': 'update'
            })

        with self.dhcp_lock:
            client = dhcp.client.Client(interface, lambda: socket.gethostname().split('.')[0])
            client.on_bind = bind
            client.on_unbind = unbind
            client.on_reject = reject
            client.on_state_change = state_change
            client.start()
            self.dhcp_clients[interface] = client

        if block:
            ret = client.wait_for_bind(timeout)
            if ret is None:
                client.stop()
                del self.dhcp_clients[interface]

            return ret is not None

        return True

    def deconfigure_dhcp(self, interface):
        with self.dhcp_lock:
            client = self.dhcp_clients[interface]
            client.release()
            client.stop()
            del self.dhcp_clients[interface]

    def renew_dhcp(self, interface):
        if interface not in self.dhcp_clients:
            raise RpcException(errno.ENXIO, 'Interface {0} is not configured for DHCP'.format(interface))

        if not self.dhcp_clients[interface].lease:
            raise RpcException(errno.ENOENT, 'Cannot renew without a lease')

        self.dhcp_clients[interface].request(renew=True, timeout=30)

    def interface_detached(self, name):
        self.logger.warn('Interface {0} detached from the system'.format(name))

    def interface_attached(self, name):
        self.logger.warn('Interface {0} attached to the system'.format(name))

    def using_dhcp_for_gateway(self):
        for i in self.datastore.query('network.interfaces'):
            if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'):
                return True

        return False

    def scan_interfaces(self):
        self.logger.info('Scanning available network interfaces...')
        existing = []

        # Add newly plugged NICs to DB
        for i in list(netif.list_interfaces().values()):
            existing.append(i.name)

            # We want only physical NICs
            if i.cloned:
                continue

            if i.name in ('mgmt0', 'nat0'):
                continue

            if i.name.startswith(('tap', 'brg')):
                continue

            if not self.datastore.exists('network.interfaces', ('id', '=', i.name)):
                self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name))
                self.datastore.insert('network.interfaces', {
                    'enabled': False,
                    'id': i.name,
                    'name': None,
                    'cloned': False,
                    'type': i.type.name,
                    'dhcp': False,
                    'noipv6': False,
                    'rtadv': False,
                    'mtu': i.mtu,
                    'media': None,
                    'mediaopts': [],
                    'aliases': [],
                    'capabilities': {
                        'add': [],
                        'del': []
                    }
                })

        # Remove unplugged NICs from DB
        for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)):
            self.datastore.delete('network.interfaces', i['id'])

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def connect(self, resume=False):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('networkd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('networkd.configuration', ConfigurationService(self))
                self.client.register_service('networkd.debug', DebugService())
                if resume:
                    self.client.resume_service('networkd.configuration')
                    self.client.resume_service('networkd.debug')

                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect(resume=True)

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_routing_socket(self):
        self.rtsock_thread = RoutingSocketEventSource(self)
        self.rtsock_thread.start()

    def register_schemas(self):
        self.client.register_schema('network-aggregation-protocols', {
            'type': 'string',
            'enum': list(netif.AggregationProtocol.__members__.keys())
        })

        self.client.register_schema('network-lagg-port-flags', {
            'type': 'array',
            'items': {'$ref': 'network-lagg-port-flags-items'}
        })

        self.client.register_schema('network-lagg-port-flags-items', {
            'type': 'string',
            'enum': list(netif.LaggPortFlags.__members__.keys())
        })

        self.client.register_schema('network-interface-flags', {
            'type': 'array',
            'items': {'$ref': 'network-interface-flags-items'}
        })

        self.client.register_schema('network-interface-flags-items', {
            'type': 'string',
            'enum': list(netif.InterfaceFlags.__members__.keys())
        })

        self.client.register_schema('network-interface-capabilities', {
            'type': 'array',
            'items': {'$ref': 'network-interface-capabilities-items'}
        })

        self.client.register_schema('network-interface-capabilities-items', {
            'type': 'string',
            'enum': list(netif.InterfaceCapability.__members__.keys())
        })

        self.client.register_schema('network-interface-mediaopts', {
            'type': 'array',
            'items': {'$ref': 'network-interface-mediaopts-items'}
        })

        self.client.register_schema('network-interface-mediaopts-items', {
            'type': 'string',
            'enum': list(netif.InterfaceMediaOption.__members__.keys())
        })

        self.client.register_schema('network-interface-nd6-flag', {
            'type': 'array',
            'items': {'$ref': 'network-interface-nd6-flag-items'}
        })

        self.client.register_schema('network-interface-nd6-flag-items', {
            'type': 'string',
            'enum': list(netif.NeighborDiscoveryFlags.__members__.keys())
        })

        self.client.register_schema('network-interface-type', {
            'type': 'string',
            'enum': [
                'LOOPBACK',
                'ETHER',
                'VLAN',
                'BRIDGE',
                'LAGG'
            ]
        })

        self.client.register_schema('network-interface-dhcp-state', {
            'type': 'string',
            'enum': [
                'INIT',
                'SELECTING',
                'REQUESTING',
                'INIT_REBOOT',
                'REBOOTING',
                'BOUND',
                'RENEWING',
                'REBINDING'
            ]
        })

        self.client.register_schema('network-interface-status', {
            'type': 'object',
            'properties': {
                'name': {'type': 'string'},
                'link_state': {'$ref': 'network-interface-status-linkstate'},
                'link_address': {'type': 'string'},
                'mtu': {'type': 'integer'},
                'media_type': {'type': 'string'},
                'media_subtype': {'type': 'string'},
                'active_media_type': {'type': 'string'},
                'active_media_subtype': {'type': 'string'},
                'media_options': {'$ref': 'network-interface-mediaopts'},
                'cloned': {'type': 'boolean'},
                'capabilities': {'$ref': 'network-interface-capabilities'},
                'flags': {'$ref': 'network-interface-flags'},
                'dhcp': {
                    'type': 'object',
                    'properties': {
                        'state': {'$ref': 'network-interface-dhcp-state'},
                        'server_address': {'type': 'string'},
                        'server_name': {'type': 'string'},
                        'lease_starts_at': {'type': 'datetime'},
                        'lease_ends_at': {'type': 'datetime'}
                    }
                },
                'aliases': {
                    'type': 'array',
                    'items': {'$ref': 'network-interface-alias'}
                },
                'nd6_flags': {
                    'type': 'array',
                    'items': {'$ref': 'network-interface-nd6-flag'}
                },
                'ports': {
                    'oneOf': [
                        {'type': 'null'},
                        {
                            'type': 'array',
                            'members': {
                                'type': 'object',
                                'properties': {
                                    'name': {'type': 'string'},
                                    'flags': {'$ref': 'network-lagg-port-flags'}
                                }
                            }
                        }
                    ]
                },
                'members': {
                    'oneOf': [
                        {'type': 'null'},
                        {
                            'type': 'array',
                            'members': {'type': 'string'}
                        }
                    ]
                },
                'parent': {'type': ['string', 'null']},
                'tag': {'type': ['integer', 'null']}
            }
        })

        self.client.register_schema('network-interface-status-linkstate', {
            'type': 'string',
            'enum': list(netif.InterfaceLinkState.__members__.keys())
        })

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/networkd.log', 'DEBUG')
        setproctitle.setproctitle('networkd')
        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.scan_interfaces()
        self.init_routing_socket()
        self.client.resume_service('networkd.configuration')
        self.client.resume_service('networkd.debug')
        self.logger.info('Started')
        self.client.wait_forever()
class Main:
    def __init__(self):
        self.config = None
        self.client = None
        self.datastore = None
        self.configstore = None
        self.rtsock_thread = None
        self.logger = logging.getLogger('networkd')

    def dhclient_pid(self, interface):
        path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface))
        if not os.path.exists(path):
            return None

        try:
            with open(path) as f:
                pid = int(f.read().strip())
                return pid
        except (IOError, ValueError):
            return None

    def dhclient_running(self, interface):
        pid = self.dhclient_pid(interface)
        if not pid:
            return False

        try:
            os.kill(pid, 0)
            return True
        except OSError:
            return False

    def configure_dhcp(self, interface):
        # Check if dhclient is running
        if self.dhclient_running(interface):
            self.logger.info('Interface {0} already configured by DHCP'.format(interface))
            return True

        def unblock_signals():
            signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM, signal.SIGINT])

        ret = subprocess.call(['/sbin/dhclient', interface], close_fds=True, preexec_fn=unblock_signals)
        return ret == 0

    def interface_detached(self, name):
        self.logger.warn('Interface {0} detached from the system'.format(name))

    def interface_attached(self, name):
        self.logger.warn('Interface {0} attached to the system'.format(name))

    def using_dhcp_for_gateway(self):
        for i in self.datastore.query('network.interfaces'):
            if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'):
                    return True

        return False

    def scan_interfaces(self):
        self.logger.info('Scanning available network interfaces...')
        existing = []

        # Add newly plugged NICs to DB
        for i in list(netif.list_interfaces().values()):
            existing.append(i.name)

            # We want only physical NICs
            if i.cloned:
                continue

            if not self.datastore.exists('network.interfaces', ('id', '=', i.name)):
                self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name))
                self.datastore.insert('network.interfaces', {
                    'enabled': False,
                    'id': i.name,
                    'type': i.type.name,
                    'dhcp': False,
                    'noipv6': False,
                    'rtadv': False,
                    'mtu': None,
                    'media': None,
                    'aliases': []
                })

        # Remove unplugged NICs from DB
        for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)):
            self.datastore.delete('network.interfaces', i['id'])

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def connect(self, resume=False):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('networkd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('networkd.configuration', ConfigurationService(self))
                self.client.register_service('networkd.debug', DebugService())
                if resume:
                    self.client.resume_service('networkd.configuration')
                    self.client.resume_service('networkd.debug')

                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect(resume=True)

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_routing_socket(self):
        self.rtsock_thread = RoutingSocketEventSource(self)
        self.rtsock_thread.start()

    def register_schemas(self):
        self.client.register_schema('network-aggregation-protocols', {
            'type': 'string',
            'enum': list(netif.AggregationProtocol.__members__.keys())
        })

        self.client.register_schema('network-interface-flags', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceFlags.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-capabilities', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceCapability.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-mediaopts', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceMediaOption.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-type', {
            'type': 'string',
            'enum': [
                'LOOPBACK',
                'ETHER',
                'VLAN',
                'BRIDGE',
                'LAGG'
            ]
        })

        self.client.register_schema('network-interface-status', {
            'type': 'object',
            'properties': {
                'name': {'type': 'string'},
                'link_state': {
                    'type': 'string',
                    'enum': list(netif.InterfaceLinkState.__members__.keys())
                },
                'link_address': {'type': 'string'},
                'mtu': {'type': 'integer'},
                'media_type': {'type': 'string'},
                'media_subtype': {'type': 'string'},
                'media_options': {'$ref': 'network-interface-mediaopts'},
                'capabilities': {'$ref': 'network-interface-capabilities'},
                'flags': {'$ref': 'network-interface-flags'},
                'aliases': {
                    'type': 'array',
                    'items': {'$ref': 'network-interface-alias'}
                }
            }
        })

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/networkd.log', 'DEBUG')
        setproctitle.setproctitle('networkd')
        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.scan_interfaces()
        self.init_routing_socket()
        self.client.resume_service('networkd.configuration')
        self.client.resume_service('networkd.debug')
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #11
0
class Main(object):
    def __init__(self):
        self.config = None
        self.client = None
        self.datastore = None
        self.configstore = None
        self.logger = logging.getLogger('dsd')

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError as err:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

    def init_datastore(self, resume=False):
        try:
            self.datastore = get_datastore(self.config['datastore']['driver'],
                self.config['datastore']['dsn'])
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def connect(self, resume=False):
        while True:  
            try:
                self.client.connect('127.0.0.1')
                self.client.login_service('dsd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('dsd.configuration', DSDConfigurationService(self))
                self.client.register_service('dsd.debug', DebugService())
                if resume:
                    self.client.resume_service('dsd.configuration')
                    self.client.resume_service('dsd.debug')

                return
            except socket.error as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)


    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect(resume=True)

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def register_schemas(self):
        # XXX do stuff here? To be determined ...
        pass

    def report_error(self, message, exception):
        if not os.path.isdir('/var/tmp/crash'):
            try:
                os.mkdir('/var/tmp/crash')
            except:
                return

        report = {
            'timestamp': str(datetime.datetime.now()),
            'type': 'exception',
            'application': 'dsd',
            'message': message,
            'exception': str(exception),
            'traceback': traceback.format_exc()
        }

        try:
            with tempfile.NamedTemporaryFile(dir='/var/tmp/crash', suffix='.json', prefix='report-', delete=False) as f:
                json.dump(report, f, indent=4)
        except:
            pass

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/dsd.log', 'DEBUG')
        setproctitle.setproctitle('dsd')
        self.parse_config(args.c)
        self.init_datastore()
        self.init_dispatcher()
        self.client.resume_service('dsd.configuration')
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #12
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('clid')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.ml = None
        self.context = None

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_cli(self):
        self.logger.info('Initializing CLI instance')
        self.context = Context()
        self.context.connection = self.client
        self.context.plugin_dirs = PLUGIN_DIRS
        self.context.discover_plugins()
        self.context.start_entity_subscribers()
        self.context.login_plugins()
        self.ml = MainLoop(self.context)
        self.logger.info('CLI instance ready')

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('clid')
                self.client.enable_server()
                self.client.call_sync('management.enable_features', ['streaming_responses'])
                self.client.register_service('clid.management', ManagementService(self))
                self.client.register_service('clid.eval', EvalService(self))
                self.client.register_service('clid.debug', DebugService())
                self.client.resume_service('clid.management')
                self.client.resume_service('clid.eval')
                self.client.resume_service('clid.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('clid', 'DEBUG')

        setproctitle('clid')
        self.init_dispatcher()
        self.init_cli()
        self.client.wait_forever()
Beispiel #13
0
class Main:
    def __init__(self):
        self.config = None
        self.client = None
        self.datastore = None
        self.configstore = None
        self.rtsock_thread = None
        self.logger = logging.getLogger('networkd')

    def configure_dhcp(self, interface):
        # Check if dhclient is running
        if os.path.exists(os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface))):
            self.logger.info('Interface {0} already configured by DHCP'.format(interface))
            return True

        # XXX: start dhclient through launchd in the future
        ret = subprocess.call(['/sbin/dhclient', interface])
        return ret == 0

    def interface_detached(self, name):
        self.logger.warn('Interface {0} detached from the system'.format(name))

    def interface_attached(self, name):
        self.logger.warn('Interface {0} attached to the system'.format(name))

    def using_dhcp_for_gateway(self):
        for i in self.datastore.query('network.interfaces'):
            if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'):
                    return True

        return False

    def scan_interfaces(self):
        self.logger.info('Scanning available network interfaces...')
        existing = []

        # Add newly plugged NICs to DB
        for i in list(netif.list_interfaces().values()):
            # We want only physical NICs
            if i.cloned:
                continue

            existing.append(i.name)
            if not self.datastore.exists('network.interfaces', ('id', '=', i.name)):
                self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name))
                self.datastore.insert('network.interfaces', {
                    'enabled': False,
                    'id': i.name,
                    'type': i.type.name
                })

        # Remove unplugged NICs from DB
        for i in self.datastore.query('network.interfaces', ('id', 'nin', existing)):
            self.datastore.delete('network.interfaces', i['id'])

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError as err:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn'])
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def connect(self, resume=False):
        while True:
            try:
                self.client.connect('127.0.0.1')
                self.client.login_service('networkd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('networkd.configuration', ConfigurationService(self))
                self.client.register_service('networkd.debug', DebugService())
                if resume:
                    self.client.resume_service('networkd.configuration')
                    self.client.resume_service('networkd.debug')

                return
            except socket.error as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect(resume=True)

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_routing_socket(self):
        self.rtsock_thread = RoutingSocketEventSource(self)
        self.rtsock_thread.start()

    def register_schemas(self):
        self.client.register_schema('network-aggregation-protocols', {
            'type': 'string',
            'enum': list(netif.AggregationProtocol.__members__.keys())
        })

        self.client.register_schema('network-interface-flags', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceFlags.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-capabilities', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceCapability.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-mediaopts', {
            'type': 'array',
            'items': {
                'type': 'string',
                'enum': list(netif.InterfaceMediaOption.__members__.keys())
            }
        })

        self.client.register_schema('network-interface-type', {
            'type': 'string',
            'enum': [
                'LOOPBACK',
                'ETHER',
                'VLAN',
                'BRIDGE',
                'LAGG'
            ]
        })

        self.client.register_schema('network-interface-status', {
            'type': 'object',
            'properties': {
                'name': {'type': 'string'},
                'link_state': {'type': 'string'},
                'link_address': {'type': 'string'},
                'mtu': {'type': 'integer'},
                'media_type': {'type': 'string'},
                'media_subtype': {'type': 'string'},
                'media_options': {'$ref': 'network-interface-media-options'},
                'capabilities': {'$ref': 'network-interface-capabilities'},
                'flags': {'$ref': 'network-interface-flags'},
                'aliases': {
                    'type': 'array',
                    'items': {'$ref': 'network-interface-alias'}
                }
            }
        })

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/networkd.log', 'DEBUG')
        setproctitle.setproctitle('networkd')
        self.parse_config(args.c)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_interfaces()
        self.init_routing_socket()
        self.client.resume_service('networkd.configuration')
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #14
0
class Main(object):
    def __init__(self):
        self.client = None
        self.datastore = None
        self.config = None
        self.containers = {}
        self.tokens = {}
        self.logger = logging.getLogger('containerd')

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn'])
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def connect(self):
        while True:
            try:
                self.client.connect('127.0.0.1')
                self.client.login_service('containerd')
                self.client.enable_server()
                self.client.register_service('containerd.management', ManagementService(self))
                self.client.register_service('containerd.debug', DebugService(gevent=True))
                self.client.resume_service('containerd.management')
                self.client.resume_service('containerd.debug')

                return
            except socket.error as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.use_bursts = True
        self.client.on_error(on_error)
        self.connect()

    def die(self):
        self.logger.warning('Exiting')
        self.client.disconnect()
        sys.exit(0)

    def generate_id(self):
        return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])

    def dispatcher_error(self, error):
        self.die()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port")
        args = parser.parse_args()
        configure_logging('/var/log/containerd.log', 'DEBUG')
        setproctitle.setproctitle('containerd')

        self.parse_config(args.c)
        self.init_datastore()
        self.init_dispatcher()
        self.logger.info('Started')

        # WebSockets server
        kwargs = {}
        s4 = WebSocketServer(('', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        s6 = WebSocketServer(('::', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)]
        gevent.joinall(serv_threads)
Beispiel #15
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.home_directory_root = None
        self.account_service = AccountService(self)
        self.group_service = GroupService(self)
        self.rpc.register_service_instance('dscached.account',
                                           self.account_service)
        self.rpc.register_service_instance('dscached.group',
                                           self.group_service)
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.idmap',
                                           IdmapService(self))
        self.rpc.register_service_instance('dscached.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_active_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   self.directories))

    def get_searched_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   (self.get_directory_by_name(n)
                    for n in self.get_search_order())))

    def get_search_order(self):
        return self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name,
                                self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories)

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories)

    def get_home_directory(self, directory, username):
        if not self.home_directory_root:
            return '/nonexistent'

        return os.path.join(self.home_directory_root,
                            f'{username}@{directory.domain_name}')

    def wait_for_etcd(self):
        self.client.test_or_wait_for_event(
            'plugin.service_resume',
            lambda args: args['name'] == 'etcd.generation',
            lambda: 'etcd.generation' in self.client.call_sync(
                'discovery.get_services'))

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.idmap')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def register_schemas(self):
        from freenas.dispatcher.model import context
        for name, schema in (s.__named_json_schema__()
                             for s in context.local_json_schema_objects):
            self.logger.debug(f'Registering schema: {name}')
            self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                self.directories.append(directory)
                directory.configure()
            except:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get(
            'directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')
        self.home_directory_root = self.configstore.get(
            'system.home_directory_root')

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('dscached', 'DEBUG')

        setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.register_schemas()
        self.wait_for_etcd()
        self.init_directories()
        self.checkin()
        self.client.wait_forever()
Beispiel #16
0
class Main(object):
    def __init__(self):
        self.client = None
        self.server = None
        self.datastore = None
        self.hdf = None
        self.hdf_group = None
        self.config = None
        self.logger = logging.getLogger('statd')
        self.data_sources = {}

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn'])
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def init_database(self):
        # adding this try/except till system-dataset plugin is added back in in full fidelity
        # just a hack (since that directory's data will not persist)
        # Please remove this when system-dataset plugin is added back in
        try:
            directory = self.client.call_sync('system_dataset.request_directory', 'statd')
        except RpcException:
            directory = '/var/tmp/statd'
            if not os.path.exists(directory):
                os.makedirs(directory)
        self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE), mode='a')
        if not hasattr(self.hdf.root, 'stats'):
            self.hdf.create_group('/', 'stats')

        self.hdf_group = self.hdf.root.stats

    def request_table(self, name):
        try:
            if hasattr(self.hdf_group, name):
                return getattr(self.hdf_group, name)

            return self.hdf.create_table(self.hdf_group, name, DataPoint, name)
        except Exception as e:
            self.logger.error(str(e))

    def get_data_source(self, name):
        if name not in list(self.data_sources.keys()):
            config = DataSourceConfig(self.datastore, name)
            ds = DataSource(self, name, config)
            self.data_sources[name] = ds
            self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(name))

        return self.data_sources[name]

    def connect(self):
        while True:
            try:
                self.client.connect('127.0.0.1')
                self.client.login_service('statd')
                self.client.enable_server()
                self.client.register_service('statd.output', OutputService(self))
                self.client.register_service('statd.debug', DebugService(gevent=True))
                self.client.resume_service('statd.output')
                self.client.resume_service('statd.debug')
                for i in list(self.data_sources.keys()):
                    self.client.call_sync('plugin.register_event_type', 'statd.output', 'statd.{0}.pulse'.format(i))

                return
            except socket.error as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.use_bursts = True
        self.client.on_error(on_error)
        self.connect()

    def die(self):
        self.logger.warning('Exiting')
        self.server.stop()
        self.client.disconnect()
        sys.exit(0)

    def dispatcher_error(self, error):
        self.die()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/fnstatd.log', 'DEBUG')
        setproctitle.setproctitle('fnstatd')

        # Signal handlers
        gevent.signal(signal.SIGQUIT, self.die)
        gevent.signal(signal.SIGQUIT, self.die)
        gevent.signal(signal.SIGINT, self.die)

        self.server = InputServer(self)
        self.parse_config(args.c)
        self.init_datastore()
        self.init_dispatcher()
        self.init_database()
        self.server.start()
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #17
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = FreeNASJobStore()
        self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], {
            'RUN_AS_USER': '******',
            'CALENDAR_TASK_NAME': kwargs.get('name')
        })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alert.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(
                        kwargs.get('name', tid),
                        result['error']['message']
                    ),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.checkin()
        self.client.wait_forever()
Beispiel #18
0
class Main(object):
    def __init__(self):
        self.config = None
        self.client = None
        self.datastore = None
        self.configstore = None
        self.rtsock_thread = None
        self.dhcp_clients = {}
        self.dhcp_lock = threading.RLock()
        self.configure_network = None
        self.logger = logging.getLogger('networkd')
        self.default_interface = None
        self.cv = Condition()

    def dhclient_pid(self, interface):
        path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface))
        if not os.path.exists(path):
            return None

        try:
            with open(path) as f:
                pid = int(f.read().strip())
                return pid
        except (IOError, ValueError):
            return None

    def dhclient_running(self, interface):
        pid = self.dhclient_pid(interface)
        if not pid:
            return False

        try:
            os.kill(pid, 0)
            return True
        except OSError:
            return False

    def configure_dhcp(self, interface, block=False, timeout=None):
        if interface in self.dhcp_clients:
            self.logger.info('Interface {0} already configured by DHCP'.format(interface))
            return True

        def bind(old_lease, lease):
            self.logger.info('{0} DHCP lease on {1} from {2}, valid for {3} seconds'.format(
                'Renewed' if old_lease else 'Acquired',
                interface,
                client.server_address,
                lease.lifetime,
                interface
            ))

            if old_lease is None or lease.client_ip != old_lease.client_ip:
                self.logger.info('Assigning IP address {0} to interface {1}'.format(lease.client_ip, interface))
                alias = lease.client_interface
                iface = netif.get_interface(interface)

                if old_lease:
                    try:
                        addr = first_or_default(lambda a: a.address == old_lease.client_ip, iface.addresses)
                        if addr:
                            iface.remove_address(addr)
                    except OSError as err:
                        self.logger.error('Cannot remove alias {0}: {1}'.format(old_lease.client_ip, err.strerror))

                try:
                    iface.add_address(netif.InterfaceAddress(netif.AddressFamily.INET, alias))
                except OSError as err:
                    self.logger.error('Cannot add alias to {0}: {1}'.format(interface, err.strerror))

            if lease.router and self.configstore.get('network.dhcp.assign_gateway'):
                try:
                    rtable = netif.RoutingTable()
                    newroute = default_route(lease.router)
                    if rtable.default_route_ipv4 != newroute:
                        if rtable.default_route_ipv4:
                            self.logger.info('DHCP default route changed from {0} to {1}'.format(
                                rtable.default_route_ipv4,
                                newroute
                            ))
                            rtable.delete(rtable.default_route_ipv4)
                            rtable.add(default_route(lease.router))
                        else:
                            self.logger.info('Adding default route via {0}'.format(lease.router))
                            rtable.add(default_route(lease.router))
                except OSError as err:
                    self.logger.error('Cannot configure default route: {0}'.format(err.strerror))

            if lease.dns_addresses and self.configstore.get('network.dhcp.assign_dns'):
                inp = []
                addrs = []
                proc = subprocess.Popen(
                    ['/sbin/resolvconf', '-a', interface],
                    stdout=subprocess.PIPE,
                    stdin=subprocess.PIPE
                )

                for i in lease.dns_addresses:
                    # Filter out bogus DNS server addresses
                    if str(i) in ('127.0.0.1', '0.0.0.0', '255.255.255.255'):
                        continue

                    inp.append('nameserver {0}'.format(i))
                    addrs.append(i)

                if lease.domain_name:
                    inp.append('search {0}'.format(lease.domain_name))

                proc.communicate('\n'.join(inp).encode('ascii'))
                proc.wait()
                self.client.emit_event('network.dns.configured', {
                    'addresses': addrs,
                })
                self.logger.info('Updated DNS configuration')
            else:
                subprocess.call(['/sbin/resolvconf', '-d', interface])
                self.client.emit_event('network.dns.configured', {
                    'addresses': [],
                })
                self.logger.info('Deleted DNS configuration')

        def reject(reason):
            self.logger.info('DHCP request rejected on {0}: {1}'.format(interface, reason))
            self.deconfigure_dhcp(interface)
            if not block:
                t = threading.Timer(60, self.configure_dhcp, args=(interface,))
                t.start()

        def unbind(lease, reason):
            reasons = {
                dhcp.client.UnbindReason.EXPIRE: 'expired',
                dhcp.client.UnbindReason.REVOKE: 'revoked'
            }

            self.logger.info('DHCP lease on {0}: {1}'.format(interface, reasons.get(reason, 'revoked')))

        def state_change(state):
            self.client.emit_event('network.interface.changed', {
                'operation': 'update',
                'ids': [interface]
            })

            self.client.emit_event('network.changed', {
                'operation': 'update'
            })

        with self.dhcp_lock:
            client = dhcp.client.Client(interface, lambda: socket.gethostname().split('.')[0])
            client.on_bind = bind
            client.on_unbind = unbind
            client.on_reject = reject
            client.on_state_change = state_change
            client.start()
            self.dhcp_clients[interface] = client

        if block:
            ret = client.wait_for_bind(timeout)
            if ret is None:
                client.stop()
                del self.dhcp_clients[interface]

            return ret is not None

        return True

    def deconfigure_dhcp(self, interface):
        with self.dhcp_lock:
            client = self.dhcp_clients[interface]
            client.release()
            client.stop()
            del self.dhcp_clients[interface]

    def renew_dhcp(self, interface):
        if interface not in self.dhcp_clients:
            raise RpcException(errno.ENXIO, 'Interface {0} is not configured for DHCP'.format(interface))

        if not self.dhcp_clients[interface].lease:
            raise RpcException(errno.ENOENT, 'Cannot renew without a lease')

        self.dhcp_clients[interface].request(renew=True, timeout=30)

    def interface_detached(self, name):
        self.logger.warn('Interface {0} detached from the system'.format(name))

    def interface_attached(self, name):
        self.logger.warn('Interface {0} attached to the system'.format(name))

    def using_dhcp_for_gateway(self):
        for i in self.datastore.query('network.interfaces'):
            if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'):
                return True

        return False

    def scan_interfaces(self):
        self.logger.info('Scanning available network interfaces...')
        existing = []

        # Add newly plugged NICs to DB
        for i in list(netif.list_interfaces().values()):
            existing.append(i.name)

            # We want only physical NICs
            if i.cloned:
                continue

            if i.name in ('mgmt0', 'nat0'):
                continue

            if i.name.startswith(('tap', 'brg')):
                continue

            if not self.datastore.exists('network.interfaces', ('id', '=', i.name)):
                self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name))
                self.datastore.insert('network.interfaces', {
                    'enabled': False,
                    'id': i.name,
                    'name': None,
                    'cloned': False,
                    'type': i.type.name,
                    'dhcp': False,
                    'noipv6': False,
                    'rtadv': False,
                    'mtu': i.mtu,
                    'media': None,
                    'mediaopts': [],
                    'aliases': [],
                    'capabilities': {
                        'add': [],
                        'del': []
                    }
                })

        # Remove unplugged NICs from DB
        for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)):
            self.datastore.delete('network.interfaces', i['id'])

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def connect(self, resume=False):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('networkd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('networkd.configuration', ConfigurationService(self))
                self.client.register_service('networkd.debug', DebugService())
                if resume:
                    self.client.resume_service('networkd.configuration')
                    self.client.resume_service('networkd.debug')

                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect(resume=True)

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_routing_socket(self):
        self.rtsock_thread = RoutingSocketEventSource(self)
        self.rtsock_thread.start()

    def register_schemas(self):
        self.client.register_schema('NetworkAggregationProtocols', {
            'type': 'string',
            'enum': list(netif.AggregationProtocol.__members__.keys())
        })

        self.client.register_schema('NetworkLaggPortFlags', {
            'type': 'array',
            'items': {'$ref': 'NetworkLaggPortFlagsItems'}
        })

        self.client.register_schema('NetworkLaggPortFlagsItems', {
            'type': 'string',
            'enum': list(netif.LaggPortFlags.__members__.keys())
        })

        self.client.register_schema('NetworkInterfaceFlags', {
            'type': 'array',
            'items': {'$ref': 'NetworkInterfaceFlagsItems'}
        })

        self.client.register_schema('NetworkInterfaceFlagsItems', {
            'type': 'string',
            'enum': list(netif.InterfaceFlags.__members__.keys())
        })

        self.client.register_schema('NetworkInterfaceCapabilities', {
            'type': 'array',
            'items': {'$ref': 'NetworkInterfaceCapabilitiesItems'}
        })

        self.client.register_schema('NetworkInterfaceCapabilitiesItems', {
            'type': 'string',
            'enum': list(netif.InterfaceCapability.__members__.keys())
        })

        self.client.register_schema('NetworkInterfaceMediaopts', {
            'type': 'array',
            'items': {'$ref': 'NetworkInterfaceMediaoptsItems'}
        })

        self.client.register_schema('NetworkInterfaceMediaoptsItems', {
            'type': 'string',
            'enum': list(netif.InterfaceMediaOption.__members__.keys())
        })

        self.client.register_schema('NetworkInterfaceNd6Flag', {
            'type': 'array',
            'items': {'$ref': 'NetworkInterfaceNd6FlagItems'}
        })

        self.client.register_schema('NetworkInterfaceNd6FlagItems', {
            'type': 'string',
            'enum': list(netif.NeighborDiscoveryFlags.__members__.keys())
        })

        self.client.register_schema('NetworkInterfaceType', {
            'type': 'string',
            'enum': [
                'LOOPBACK',
                'ETHER',
                'VLAN',
                'BRIDGE',
                'LAGG'
            ]
        })

        self.client.register_schema('NetworkInterfaceDhcpState', {
            'type': 'string',
            'enum': [
                'INIT',
                'SELECTING',
                'REQUESTING',
                'INIT_REBOOT',
                'REBOOTING',
                'BOUND',
                'RENEWING',
                'REBINDING'
            ]
        })

        self.client.register_schema('NetworkInterfaceStatus', {
            'type': 'object',
            'properties': {
                'name': {'type': 'string'},
                'link_state': {'$ref': 'NetworkInterfaceStatusLinkstate'},
                'link_address': {'type': 'string'},
                'mtu': {'type': 'integer'},
                'media_type': {'type': 'string'},
                'media_subtype': {'type': 'string'},
                'active_media_type': {'type': 'string'},
                'active_media_subtype': {'type': 'string'},
                'media_options': {'$ref': 'NetworkInterfaceMediaopts'},
                'supported_media': {
                    'type': 'array',
                    'items': {'type': 'string'}
                },
                'cloned': {'type': 'boolean'},
                'capabilities': {'$ref': 'NetworkInterfaceCapabilities'},
                'flags': {'$ref': 'NetworkInterfaceFlags'},
                'dhcp': {
                    'type': 'object',
                    'properties': {
                        'state': {'$ref': 'NetworkInterfaceDhcpState'},
                        'server_address': {'type': 'string'},
                        'server_name': {'type': 'string'},
                        'lease_starts_at': {'type': 'datetime'},
                        'lease_ends_at': {'type': 'datetime'}
                    }
                },
                'aliases': {
                    'type': 'array',
                    'items': {'$ref': 'NetworkInterfaceAlias'}
                },
                'nd6_flags': {
                    'type': 'array',
                    'items': {'$ref': 'NetworkInterfaceNd6Flag'}
                },
                'ports': {
                    'oneOf': [
                        {'type': 'null'},
                        {
                            'type': 'array',
                            'members': {
                                'type': 'object',
                                'properties': {
                                    'name': {'type': 'string'},
                                    'flags': {'$ref': 'NetworkLaggPortFlags'}
                                }
                            }
                        }
                    ]
                },
                'members': {
                    'oneOf': [
                        {'type': 'null'},
                        {
                            'type': 'array',
                            'members': {'type': 'string'}
                        }
                    ]
                },
                'parent': {'type': ['string', 'null']},
                'tag': {'type': ['integer', 'null']}
            }
        })

        self.client.register_schema('NetworkInterfaceStatusLinkstate', {
            'type': 'string',
            'enum': list(netif.InterfaceLinkState.__members__.keys())
        })

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('networkd', 'DEBUG')
        setproctitle('networkd')
        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.scan_interfaces()
        self.init_routing_socket()
        self.client.resume_service('networkd.configuration')
        self.client.resume_service('networkd.debug')

        for i in self.configure_network():
            self.logger.info('Initial network configuration: {0}, {1}'.format(*i))

        self.checkin()
        self.logger.info('Started')
        self.client.wait_forever()
Beispiel #19
0
class Context(object):
    def __init__(self):
        self.hostname = None
        self.connection = Client()
        self.ml = None
        self.logger = logging.getLogger('cli')
        self.plugin_dirs = []
        self.task_callbacks = {}
        self.plugins = {}
        self.variables = VariableStore()
        self.root_ns = RootNamespace('')
        self.event_masks = ['*']
        self.event_divert = False
        self.event_queue = six.moves.queue.Queue()
        self.keepalive_timer = None
        self.argparse_parser = None
        config.instance = self

    @property
    def is_interactive(self):
        return os.isatty(sys.stdout.fileno())

    def start(self):
        self.discover_plugins()
        self.connect()

    def connect(self):
        try:
            self.connection.connect(self.hostname)
        except socket_error as err:
            output_msg(_(
                "Could not connect to host: {0} due to error: {1}".format(self.hostname, err)
            ))
            self.argparse_parser.print_help()
            sys.exit(1)

    def login(self, user, password):
        try:
            self.connection.login_user(user, password)
            self.connection.subscribe_events(*EVENT_MASKS)
            self.connection.on_event(self.handle_event)
            self.connection.on_error(self.connection_error)

        except RpcException as e:
            if e.code == errno.EACCES:
                self.connection.disconnect()
                output_msg(_("Wrong username or password"))
                sys.exit(1)

        self.login_plugins()

    def keepalive(self):
        if self.connection.opened:
            self.connection.call_sync('management.ping')

    def read_middleware_config_file(self, file):
        """
        If there is a cli['plugin-dirs'] in middleware.conf use that,
        otherwise use the default plugins dir within cli namespace
        """
        plug_dirs = None
        if file:
            with open(file, 'r') as f:
                data = json.load(f)

            if 'cli' in data and 'plugin-dirs' in data['cli']:

                if type(data['cli']['plugin-dirs']) != list:
                    return

                self.plugin_dirs += data['cli']['plugin-dirs']

        if plug_dirs is None:
            plug_dirs = os.path.dirname(os.path.realpath(__file__))
            plug_dirs = os.path.join(plug_dirs, 'plugins')
            self.plugin_dirs += [plug_dirs]


    def discover_plugins(self):
        for dir in self.plugin_dirs:
            self.logger.debug(_("Searching for plugins in %s"), dir)
            self.__discover_plugin_dir(dir)

    def login_plugins(self):
        for i in list(self.plugins.values()):
            if hasattr(i, '_login'):
                i._login(self)

    def __discover_plugin_dir(self, dir):
        for i in glob.glob1(dir, "*.py"):
            self.__try_load_plugin(os.path.join(dir, i))

    def __try_load_plugin(self, path):
        if path in self.plugins:
            return

        self.logger.debug(_("Loading plugin from %s"), path)
        name, ext = os.path.splitext(os.path.basename(path))
        plugin = imp.load_source(name, path)

        if hasattr(plugin, '_init'):
            plugin._init(self)
            self.plugins[path] = plugin

    def __try_reconnect(self):
        output_lock.acquire()
        self.ml.blank_readline()

        output_msg(_('Connection lost! Trying to reconnect...'))
        retries = 0
        while True:
            retries += 1
            try:
                time.sleep(2)
                self.connect()
                try:
                    if self.hostname == '127.0.0.1':
                        self.connection.login_user(getpass.getuser(), '')
                    else:
                        self.connection.login_token(self.connection.token)

                    self.connection.subscribe_events(*EVENT_MASKS)
                except RpcException:
                    output_msg(_("Reauthentication failed (most likely token expired or server was restarted)"))
                    sys.exit(1)
                break
            except Exception as e:
                output_msg(_('Cannot reconnect: {0}'.format(str(e))))

        self.ml.restore_readline()
        output_lock.release()

    def attach_namespace(self, path, ns):
        splitpath = path.split('/')
        ptr = self.root_ns
        ptr_namespaces = ptr.namespaces()

        for n in splitpath[1:-1]:

            if n not in list(ptr_namespaces().keys()):
                self.logger.warn(_("Cannot attach to namespace %s"), path)
                return

            ptr = ptr_namespaces()[n]

        ptr.register_namespace(ns)

    def connection_error(self, event, **kwargs):
        if event == ClientError.LOGOUT:
            output_msg('Logged out from server.')
            self.connection.disconnect()
            sys.exit(0)

        if event == ClientError.CONNECTION_CLOSED:
            time.sleep(1)
            self.__try_reconnect()
            return

    def handle_event(self, event, data):
        if event == 'task.updated':
            if data['id'] in self.task_callbacks:
                self.handle_task_callback(data)

        self.print_event(event, data)

    def handle_task_callback(self, data):
        if data['state'] in ('FINISHED', 'CANCELLED', 'ABORTED', 'FAILED'):
            self.task_callbacks[data['id']](data['state'])

    def print_event(self, event, data):
        if self.event_divert:
            self.event_queue.put((event, data))
            return

        if event == 'task.progress':
            return

        output_lock.acquire()
        self.ml.blank_readline()

        translation = events.translate(self, event, data)
        if translation:
            output_msg(translation)
            if 'state' in data:
                if data['state'] == 'FAILED':
                    status = self.connection.call_sync('task.status', data['id'])
                    output_msg(_(
                        "Task #{0} error: {1}".format(
                            data['id'],
                            status['error'].get('message', '') if status.get('error') else ''
                        )
                    ))

        sys.stdout.flush()
        self.ml.restore_readline()
        output_lock.release()

    def call_sync(self, name, *args, **kwargs):
        return wrap(self.connection.call_sync(name, *args, **kwargs))

    def call_task_sync(self, name, *args, **kwargs):
        self.ml.skip_prompt_print = True
        wrapped_result = wrap(self.connection.call_task_sync(name, *args))
        self.ml.skip_prompt_print = False
        return wrapped_result

    def submit_task(self, name, *args, **kwargs):
        callback = kwargs.pop('callback', None)
        message_formatter = kwargs.pop('message_formatter', None)

        if not self.variables.get('tasks_blocking'):
            tid = self.connection.call_sync('task.submit', name, args)
            if callback:
                self.task_callbacks[tid] = callback

            return tid
        else:
            output_msg(_("Hit Ctrl+C to terminate task if needed"))
            self.event_divert = True
            tid = self.connection.call_sync('task.submit', name, args)
            progress = ProgressBar()
            try:
                while True:
                    event, data = self.event_queue.get()

                    if event == 'task.progress' and data['id'] == tid:
                        message = data['message']
                        if isinstance(message_formatter, collections.Callable):
                            message = message_formatter(message)
                        progress.update(percentage=data['percentage'], message=message)

                    if event == 'task.updated' and data['id'] == tid:
                        progress.update(message=data['state'])
                        if data['state'] == 'FINISHED':
                            progress.finish()
                            break

                        if data['state'] == 'FAILED':
                            print()
                            break
            except KeyboardInterrupt:
                print()
                output_msg(_("User requested task termination. Task abort signal sent"))
                self.call_sync('task.abort', tid)

        self.event_divert = False
        return tid
Beispiel #20
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation',
                                             FileGenerationService(self))
                self.client.register_service('etcd.management',
                                             ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(
                file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        parser.add_argument('mountpoint',
                            metavar='MOUNTPOINT',
                            default='/etc',
                            help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #21
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('alertd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.emitters = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_reminder(self):
        t = threading.Thread(target=self.reminder_thread)
        t.daemon = True
        t.start()

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['alertd']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('alertd')
                self.client.enable_server()
                self.client.register_service('alertd.management',
                                             ManagementService(self))
                self.client.register_service('alertd.alert',
                                             AlertService(self))
                self.client.register_service('alertd.debug', DebugService())
                self.client.resume_service('alertd.management')
                self.client.resume_service('alertd.alert')
                self.client.resume_service('alertd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def emit_alert(self, alert):
        self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(
            alert['id'], alert['clazz']))
        for i in self.datastore.query(
                'alert.filters', ('or', [('clazz', '=', None),
                                         ('clazz', '=', alert['clazz'])])):
            for pr in i.get('predicates', []):
                if pr['operator'] not in operators_table:
                    continue

                try:
                    if not operators_table[pr['operator']](
                            alert.properties.get(pr['property']), pr['value']):
                        break
                except:
                    continue
            else:
                try:
                    emitter = self.emitters.get(i['emitter'])
                    if not emitter:
                        self.logger.warning(
                            'Invalid emitter {0} for alert filter {1}'.format(
                                i['emitter'], i['id']))
                        continue

                    self.logger.debug(
                        'Alert <id:{0}> matched filter {1}'.format(
                            alert['id'], i['id']))
                    if alert['send_count'] > 0:
                        if not alert['one_shot']:
                            emitter.emit_again(alert, i['parameters'])
                    else:
                        emitter.emit_first(alert, i['parameters'])
                except BaseException as err:
                    # Failed to emit alert using alert emitter
                    # XXX: generate another alert about that
                    self.logger.error(
                        'Cannot emit alert <id:{0}> using {1}: {2}'.format(
                            alert['id'], i['emitter'], str(err)))

        alert['send_count'] += 1
        alert['last_emitted_at'] = datetime.utcnow()
        self.datastore.update('alerts', alert['id'], alert)

    def cancel_alert(self, alert):
        self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(
            alert['id'], alert['clazz']))

        alert.update({'active': False, 'cancelled': datetime.utcnow()})

        self.datastore.update('alerts', alert['id'], alert)

    def register_emitter(self, name, cls):
        self.emitters[name] = cls(self)
        self.logger.info('Registered emitter {0} (class {1})'.format(
            name, cls))

    def reminder_thread(self):
        while True:
            time.sleep(REMINDER_SECONDS)
            for i in self.datastore.query('alerts'):
                if not i['active'] or i['dismissed']:
                    continue

                last_emission = i.get('last_emitted_at') or i['created_at']
                interval = REMINDER_SCHEDULE[i['severity']]

                if not interval:
                    continue

                if last_emission + timedelta(
                        seconds=interval) <= datetime.utcnow():
                    self.emit_alert(i)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        args = parser.parse_args()
        configure_logging('alertd', 'DEBUG')

        setproctitle('alertd')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.init_reminder()
        self.checkin()
        self.client.wait_forever()
Beispiel #22
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation', FileGenerationService(self))
                self.client.register_service('etcd.management', ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #23
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('neighbord')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.plugins = {}

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['neighbord']['plugin-dirs']

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_service(self, name, regtype, port, properties=None):
        for plugin in self.plugins.values():
            plugin.register(regtype, name, port, properties)

    def register(self):
        try:
            hostname = socket.gethostname()
            general = self.client.call_sync('system.general.get_config')
            properties = {
                'version': self.client.call_sync('system.info.version'),
                'description': general['description'],
                'tags': ','.join(general['tags'])
            }

            self.register_service(hostname, 'freenas', 80, properties)
            self.register_service(hostname, 'http', 80)
            self.register_service(hostname, 'ssh', 22)
            self.register_service(hostname, 'sftp-ssh', 22)
        except BaseException as err:
            self.logger.error('Failed to register services: {0}'.format(str(err)))

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('neighbord')
                self.client.enable_server()
                self.client.register_service('neighbord.management', ManagementService(self))
                self.client.register_service('neighbord.discovery', DiscoveryService(self))
                self.client.register_service('neighbord.debug', DebugService())
                self.client.resume_service('neighbord.management')
                self.client.resume_service('neighbord.discovery')
                self.client.resume_service('neighbord.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/neighbord.log', 'DEBUG')

        setproctitle('neighbord')
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.register()
        self.checkin()
        self.client.wait_forever()
Beispiel #24
0
class Main(object):
    def __init__(self):
        self.client = None
        self.server = None
        self.datastore = None
        self.hdf = None
        self.hdf_group = None
        self.config = None
        self.logger = logging.getLogger('statd')
        self.data_sources = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def init_database(self):
        # adding this try/except till system-dataset plugin is added back in in full fidelity
        # just a hack (since that directory's data will not persist)
        # Please remove this when system-dataset plugin is added back in
        try:
            directory = self.client.call_sync(
                'system_dataset.request_directory', 'statd')
        except RpcException:
            directory = '/var/tmp/statd'
            if not os.path.exists(directory):
                os.makedirs(directory)
        self.hdf = tables.open_file(os.path.join(directory, DEFAULT_DBFILE),
                                    mode='a')
        if not hasattr(self.hdf.root, 'stats'):
            self.hdf.create_group('/', 'stats')

        self.hdf_group = self.hdf.root.stats

    def request_table(self, name):
        try:
            if hasattr(self.hdf_group, name):
                return getattr(self.hdf_group, name)

            return self.hdf.create_table(self.hdf_group, name, DataPoint, name)
        except Exception as e:
            self.logger.error(str(e))

    def init_alert_config(self, name):
        config_name = name if self.datastore.exists('statd.alerts',
                                                    ('id', '=',
                                                     name)) else 'default'
        alert_config = self.datastore.get_by_id('statd.alerts', config_name)
        return alert_config

    def get_data_source(self, name):
        if name not in list(self.data_sources.keys()):
            config = DataSourceConfig(self.datastore, name)
            alert_config = self.init_alert_config(name)
            ds = DataSource(self, name, config, alert_config)
            self.data_sources[name] = ds
            self.client.call_sync('plugin.register_event_type', 'statd.output',
                                  'statd.{0}.pulse'.format(name))

        return self.data_sources[name]

    def register_schemas(self):
        self.client.register_schema(
            'GetStatsParams', {
                'type': 'object',
                'additionalProperties': False,
                'properties': {
                    'start': {
                        'type': 'datetime'
                    },
                    'end': {
                        'type': 'datetime'
                    },
                    'timespan': {
                        'type': 'integer'
                    },
                    'frequency': {
                        'type': 'string'
                    }
                }
            })

        self.client.register_schema(
            'GetStatsResult', {
                'type': 'object',
                'additionalProperties': False,
                'properties': {
                    'data': {
                        'type': 'array',
                    }
                }
            })

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('statd')
                self.client.enable_server()
                self.register_schemas()
                self.client.register_service('statd.output',
                                             OutputService(self))
                self.client.register_service('statd.alert', AlertService(self))
                self.client.register_service('statd.debug',
                                             DebugService(gevent=True))
                self.client.resume_service('statd.output')
                self.client.resume_service('statd.alert')
                self.client.resume_service('statd.debug')
                for i in list(self.data_sources.keys()):
                    self.client.call_sync('plugin.register_event_type',
                                          'statd.output',
                                          'statd.{0}.pulse'.format(i))

                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def die(self):
        self.logger.warning('Exiting')
        self.server.stop()
        self.client.disconnect()
        sys.exit(0)

    def dispatcher_error(self, error):
        self.die()

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/fnstatd.log', 'DEBUG')
        setproctitle('fnstatd')

        # Signal handlers
        gevent.signal(signal.SIGQUIT, self.die)
        gevent.signal(signal.SIGTERM, self.die)
        gevent.signal(signal.SIGINT, self.die)

        self.server = InputServer(self)
        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.init_database()
        self.server.start()
        self.logger.info('Started')
        self.checkin()
        self.client.wait_forever()
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.plugins = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server()
                self.client.register_service('dscached.account', AccountService(self))
                self.client.register_service('dscached.group', GroupService(self))
                self.client.register_service('dscached.debug', DebugService())
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/dscached.log', 'DEBUG')

        setproctitle.setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.client.wait_forever()
Beispiel #26
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('neighbord')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.plugins = {}

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['neighbord']['plugin-dirs']

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_service(self, name, regtype, port, properties=None):
        for plugin in self.plugins.values():
            plugin.register(regtype, name, port, properties)

    def register(self):
        try:
            hostname = socket.gethostname()
            general = self.client.call_sync('system.general.get_config')
            properties = {
                'version': self.client.call_sync('system.info.version'),
                'description': general['description'],
                'tags': ','.join(general['tags'])
            }

            self.register_service(hostname, 'freenas', 80, properties)
            self.register_service(hostname, 'http', 80)
            self.register_service(hostname, 'ssh', 22)
            self.register_service(hostname, 'sftp-ssh', 22)
        except BaseException as err:
            self.logger.error('Failed to register services: {0}'.format(
                str(err)))

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('neighbord')
                self.client.enable_server()
                self.client.register_service('neighbord.management',
                                             ManagementService(self))
                self.client.register_service('neighbord.discovery',
                                             DiscoveryService(self))
                self.client.register_service('neighbord.debug', DebugService())
                self.client.resume_service('neighbord.management')
                self.client.resume_service('neighbord.discovery')
                self.client.resume_service('neighbord.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/neighbord.log', 'DEBUG')

        setproctitle.setproctitle('neighbord')
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.register()
        self.client.wait_forever()
Beispiel #27
0
class RESTApi(object):
    def __init__(self):
        self.logger = logging.getLogger('restd')
        self._cruds = []
        self._threads = []
        self._rpcs = {}
        self._schemas = {}
        self._used_schemas = set()
        self._services = {}
        self._tasks = {}
        self.api = falcon.API(middleware=[
            AuthMiddleware(),
            JSONTranslator(),
        ])
        self.api.add_route('/', SwaggerResource(self))

        gevent.signal(signal.SIGINT, self.die)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.dispatcher = Client()
        self.dispatcher.on_error(on_error)
        self.connect()

    def init_metadata(self):
        self._tasks = self.dispatcher.call_sync('discovery.get_tasks')
        self._schemas = self.dispatcher.call_sync('discovery.get_schema')
        for service in self.dispatcher.call_sync('discovery.get_services'):
            self._services[service] = self.dispatcher.call_sync(
                'discovery.get_methods', service)
            for method in self._services[service]:
                self._rpcs['{0}.{1}'.format(service, method['name'])] = method

    def load_plugins(self):
        pluginsdir = os.path.realpath(
            os.path.join(os.path.dirname(__file__), '..', 'plugins'))
        for i in glob.glob1(pluginsdir, "*.py"):
            try:
                loader = importlib.machinery.SourceFileLoader(
                    i.split('.')[0], os.path.join(pluginsdir, i))
                mod = loader.load_module()
            except:
                self.logger.error('Failed to load plugin %s', i, exc_info=True)
                raise
            mod._init(self)

    def connect(self):
        while True:
            try:
                self.dispatcher.connect('unix:')
                self.dispatcher.login_service('restd')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def __call__(self, environ, start_response):
        if 'HTTP_X_REAL_IP' in environ:
            environ['PATH_INFO'] = environ.get('PATH_INFO',
                                               '').replace('/api/v2.0', '', 1)
        return self.api.__call__(environ, start_response)

    def register_crud(self, klass):
        ins = klass(self, self.dispatcher)
        self._cruds.append(ins)

    def register_singleitem(self, klass):
        klass(self, self.dispatcher)

    def register_resource(self, klass):
        klass(self)

    def run(self):
        self.init_dispatcher()
        self.init_metadata()
        self.load_plugins()

        server4 = WSGIServer(('0.0.0.0', 8889),
                             self,
                             handler_class=RESTWSGIHandler)
        self._threads = [gevent.spawn(server4.serve_forever)]
        checkin()
        gevent.joinall(self._threads)

    def die(self, *args):
        gevent.killall(self._threads)
        sys.exit(0)
class Main(object):
    def __init__(self):
        self.client = None
        self.datastore = None
        self.configstore = None
        self.config = None
        self.mgmt = None
        self.vm_started = Event()
        self.containers = {}
        self.tokens = {}
        self.logger = logging.getLogger('containerd')
        self.bridge_interface = None
        self.used_nmdms = []

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def allocate_nmdm(self):
        for i in range(0, 255):
            if i not in self.used_nmdms:
                self.used_nmdms.append(i)
                return i

    def release_nmdm(self, index):
        self.used_nmdms.remove(index)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('containerd')
                self.client.enable_server()
                self.client.register_service('containerd.management', ManagementService(self))
                self.client.register_service('containerd.debug', DebugService(gevent=True, builtins={"context": self}))
                self.client.resume_service('containerd.management')
                self.client.resume_service('containerd.debug')

                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.use_bursts = True
        self.client.on_error(on_error)
        self.connect()

    def init_mgmt(self):
        self.mgmt = ManagementNetwork(self, MGMT_INTERFACE, MGMT_ADDR)
        self.mgmt.up()
        self.mgmt.bridge_if.add_address(netif.InterfaceAddress(
            netif.AddressFamily.INET,
            ipaddress.ip_interface('169.254.169.254/32')
        ))

    def init_nat(self):
        default_if = self.client.call_sync('networkd.configuration.get_default_interface')
        if not default_if:
            self.logger.warning('No default route interface; not configuring NAT')
            return

        p = pf.PF()

        # Try to find and remove existing NAT rules for the same subnet
        oldrule = first_or_default(
            lambda r: r.src.address.address == MGMT_ADDR.network.network_address,
            p.get_rules('nat')
        )

        if oldrule:
            p.delete_rule('nat', oldrule.index)

        rule = pf.Rule()
        rule.src.address.address = MGMT_ADDR.network.network_address
        rule.src.address.netmask = MGMT_ADDR.netmask
        rule.action = pf.RuleAction.NAT
        rule.af = socket.AF_INET
        rule.ifname = default_if
        rule.redirect_pool.append(pf.Address(ifname=default_if))
        rule.proxy_ports = [50001, 65535]
        p.append_rule('nat', rule)

        try:
            p.enable()
        except OSError as err:
            if err.errno != errno.EEXIST:
                raise err

    def init_ec2(self):
        self.ec2 = EC2MetadataServer(self)
        self.ec2.start()

    def vm_by_mgmt_mac(self, mac):
        for i in self.containers.values():
            for tapmac in i.tap_interfaces.values():
                if tapmac == mac:
                    return i

        return None

    def vm_by_mgmt_ip(self, ip):
        for i in self.mgmt.allocations.values():
            if i.lease.client_ip == ip:
                return i.vm()

    def die(self):
        self.logger.warning('Exiting')
        for i in self.containers.values():
            i.stop(True)

        self.client.disconnect()
        sys.exit(0)

    def generate_id(self):
        return ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])

    def dispatcher_error(self, error):
        self.die()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port")
        args = parser.parse_args()
        configure_logging('/var/log/containerd.log', 'DEBUG')
        setproctitle.setproctitle('containerd')

        gevent.signal(signal.SIGTERM, self.die)
        gevent.signal(signal.SIGQUIT, self.die)

        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.init_mgmt()
        self.init_nat()
        self.init_ec2()
        self.logger.info('Started')

        # WebSockets server
        kwargs = {}
        s4 = WebSocketServer(('', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        s6 = WebSocketServer(('::', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)]
        gevent.joinall(serv_threads)
Beispiel #29
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas',
                                collection='calendar_tasks',
                                client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store},
                                             timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management',
                                             ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync(
            'task.submit_with_env', args[0], args[1:], {
                'RUN_AS_USER': '******',
                'CALENDAR_TASK_NAME': kwargs.get('name')
            })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync(
                    'alert.emit', {
                        'name':
                        'scheduler.task.failed',
                        'severity':
                        'CRITICAL',
                        'description':
                        'Task {0} has failed: {1}'.format(
                            kwargs.get('name', tid),
                            result['error']['message']),
                    })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #30
0
class RESTApi(object):

    def __init__(self):
        self.logger = logging.getLogger('restd')
        self._cruds = []
        self._threads = []
        self._rpcs = {}
        self._schemas = {}
        self._used_schemas = set()
        self._services = {}
        self._tasks = {}
        self.api = falcon.API(middleware=[
            AuthMiddleware(),
            JSONTranslator(),
        ])
        self.api.add_route('/', SwaggerResource(self))

        gevent.signal(signal.SIGINT, self.die)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.dispatcher = Client()
        self.dispatcher.on_error(on_error)
        self.connect()

    def init_metadata(self):
        self._tasks = self.dispatcher.call_sync('discovery.get_tasks')
        self._schemas = self.dispatcher.call_sync('discovery.get_schema')
        for service in self.dispatcher.call_sync('discovery.get_services'):
            self._services[service] = self.dispatcher.call_sync('discovery.get_methods', service)
            for method in self._services[service]:
                self._rpcs['{0}.{1}'.format(service, method['name'])] = method

    def load_plugins(self):
        pluginsdir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'plugins'))
        for i in glob.glob1(pluginsdir, "*.py"):
            try:
                loader = importlib.machinery.SourceFileLoader(i.split('.')[0], os.path.join(pluginsdir, i))
                mod = loader.load_module()
            except:
                self.logger.error('Failed to load plugin %s', i, exc_info=True)
                raise
            mod._init(self)

    def connect(self):
        while True:
            try:
                self.dispatcher.connect('unix:')
                self.dispatcher.login_service('restd')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def __call__(self, environ, start_response):
        if 'HTTP_X_REAL_IP' in environ:
            environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1)
        return self.api.__call__(environ, start_response)

    def register_crud(self, klass):
        ins = klass(self, self.dispatcher)
        self._cruds.append(ins)

    def register_singleitem(self, klass):
        klass(self, self.dispatcher)

    def register_resource(self, klass):
        klass(self)

    def run(self):
        self.init_dispatcher()
        self.init_metadata()
        self.load_plugins()

        server4 = WSGIServer(('', 8889), self, handler_class=RESTWSGIHandler)
        self._threads = [gevent.spawn(server4.serve_forever)]
        checkin()
        gevent.joinall(self._threads)

    def die(self, *args):
        gevent.killall(self._threads)
        sys.exit(0)
Beispiel #31
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management', ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK,
            0, 0
        )

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_DELETE,
            0, 0, 0
        )

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #32
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('clid')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.ml = None
        self.context = None

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_cli(self):
        self.logger.info('Initializing CLI instance')
        self.context = Context()
        self.context.connection = self.client
        self.context.plugin_dirs = PLUGIN_DIRS
        self.context.discover_plugins()
        self.context.start_entity_subscribers()
        self.context.login_plugins()
        self.ml = MainLoop(self.context)
        self.logger.info('CLI instance ready')

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('clid')
                self.client.enable_server()
                self.client.call_sync('management.enable_features', ['streaming_responses'])
                self.client.register_service('clid.management', ManagementService(self))
                self.client.register_service('clid.eval', EvalService(self))
                self.client.register_service('clid.debug', DebugService())
                self.client.resume_service('clid.management')
                self.client.resume_service('clid.eval')
                self.client.resume_service('clid.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/clid.log', 'DEBUG')

        setproctitle('clid')
        self.init_dispatcher()
        self.init_cli()
        self.client.wait_forever()
Beispiel #33
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.msock = msock.client.Client()
        self.msock.on_closed = self.on_msock_close
        self.rpc_fd = -1
        self.connection_id = None
        self.jobs = []
        self.state = ConnectionState.OFFLINE
        self.config = None
        self.keepalive = None
        self.connected_at = None
        self.cv = Condition()
        self.rpc = RpcContext()
        self.client = Client()
        self.server = Server()
        self.middleware_endpoint = None

    def start(self, configpath, sockpath):
        signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
        self.read_config(configpath)
        self.server.rpc = RpcContext()
        self.server.rpc.register_service_instance("control", ControlService(self))
        self.server.start(sockpath)
        threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start()

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning("Connection to dispatcher lost")
                self.connect_dispatcher()

        self.middleware_endpoint = Client()
        self.middleware_endpoint.on_error(on_error)
        self.connect_dispatcher()

    def connect_dispatcher(self):
        while True:
            try:
                self.middleware_endpoint.connect("unix:")
                self.middleware_endpoint.login_service("debugd")
                self.middleware_endpoint.enable_server()
                self.middleware_endpoint.register_service("debugd.management", ControlService(self))
                self.middleware_endpoint.resume_service("debugd.management")
                return
            except (OSError, RpcException) as err:
                self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err)))
                time.sleep(1)

    def read_config(self, path):
        try:
            with open(path) as f:
                self.config = json.load(f)
        except (IOError, OSError, ValueError) as err:
            self.logger.fatal("Cannot open config file: {0}".format(str(err)))
            self.logger.fatal("Exiting.")
            sys.exit(1)

    def connect(self, discard=False):
        if discard:
            self.connection_id = None

        self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True)
        self.keepalive.start()

    def connect_keepalive(self):
        while True:
            try:
                if not self.connection_id:
                    self.connection_id = uuid.uuid4()

                self.msock.connect(SUPPORT_PROXY_ADDRESS)
                self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS))
                self.rpc_fd = self.msock.create_channel(0)
                time.sleep(1)  # FIXME
                self.client = Client()
                self.client.connect("fd://", fobj=self.rpc_fd)
                self.client.channel_serializer = MSockChannelSerializer(self.msock)
                self.client.standalone_server = True
                self.client.enable_server()
                self.client.register_service("debug", DebugService(self))
                self.client.call_sync(
                    "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none"
                )
                self.set_state(ConnectionState.CONNECTED)
            except BaseException as err:
                self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True)
                self.msock.disconnect()
            else:
                self.connected_at = datetime.now()
                with self.cv:
                    self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE))
                    if self.state == ConnectionState.OFFLINE:
                        return

            self.logger.warning("Support connection lost, retrying in 10 seconds")
            time.sleep(10)

    def disconnect(self):
        self.connected_at = None
        self.set_state(ConnectionState.OFFLINE)
        self.client.disconnect()
        self.msock.destroy_channel(0)
        self.msock.disconnect()
        self.jobs.clear()

    def on_msock_close(self):
        self.connected_at = None
        self.set_state(ConnectionState.LOST)

    def run_job(self, job):
        self.jobs.append(job)
        job.context = self
        job.start()

    def set_state(self, state):
        with self.cv:
            self.state = state
            self.cv.notify_all()