Esempio n. 1
0
    def test_unix_server(self):
        sockpath = os.path.join(os.getcwd(),
                                'test.{0}.sock'.format(os.getpid()))
        sockurl = 'unix://' + sockpath

        context = RpcContext()
        context.register_service('test', TestService)
        server = Server()
        server.rpc = context
        server.start(sockurl)
        threading.Thread(target=server.serve_forever, daemon=True).start()

        # Spin until server is ready
        while not os.path.exists(sockpath):
            time.sleep(0.1)

        client = Client()
        client.connect(sockurl)
        self.assertTrue(client.connected)
        self.assertEqual(client.call_sync('test.hello', 'freenas'),
                         'Hello World, freenas')

        client.disconnect()
        server.close()
        os.unlink(sockpath)
Esempio n. 2
0
 def __init__(self):
     self.logger = logging.getLogger('dscached')
     self.config = None
     self.datastore = None
     self.configstore = None
     self.rpc = RpcContext()
     self.rpc.streaming_enabled = True
     self.rpc.streaming_burst = 16
     self.client = None
     self.server = None
     self.plugin_dirs = []
     self.plugins = {}
     self.directories = []
     self.users_cache = TTLCacheStore()
     self.groups_cache = TTLCacheStore()
     self.hosts_cache = TTLCacheStore()
     self.cache_ttl = 7200
     self.search_order = []
     self.cache_enumerations = True
     self.cache_lookups = True
     self.home_directory_root = None
     self.account_service = AccountService(self)
     self.group_service = GroupService(self)
     self.rpc.register_service_instance('dscached.account',
                                        self.account_service)
     self.rpc.register_service_instance('dscached.group',
                                        self.group_service)
     self.rpc.register_service_instance('dscached.host', HostService(self))
     self.rpc.register_service_instance('dscached.idmap',
                                        IdmapService(self))
     self.rpc.register_service_instance('dscached.management',
                                        ManagementService(self))
     self.rpc.register_service_instance('dscached.debug', DebugService())
Esempio n. 3
0
 def __init__(self):
     self.server = None
     self.client = None
     self.jobs = {}
     self.provides = set()
     self.lock = RLock()
     self.kq = select.kqueue()
     self.devnull = os.open('/dev/null', os.O_RDWR)
     self.logger = logging.getLogger('Context')
     self.rpc = RpcContext()
     self.rpc.register_service_instance('serviced.management', ManagementService(self))
     self.rpc.register_service_instance('serviced.job', JobService(self))
Esempio n. 4
0
 def __init__(self):
     self.logger = logging.getLogger('dscached')
     self.config = None
     self.datastore = None
     self.configstore = None
     self.rpc = RpcContext()
     self.rpc.streaming_enabled = True
     self.rpc.streaming_burst = 16
     self.client = None
     self.server = None
     self.plugin_dirs = []
     self.plugins = {}
     self.directories = []
     self.users_cache = TTLCacheStore()
     self.groups_cache = TTLCacheStore()
     self.hosts_cache = TTLCacheStore()
     self.cache_ttl = 7200
     self.search_order = []
     self.cache_enumerations = True
     self.cache_lookups = True
     self.rpc.register_service_instance('dscached.account', AccountService(self))
     self.rpc.register_service_instance('dscached.group', GroupService(self))
     self.rpc.register_service_instance('dscached.host', HostService(self))
     self.rpc.register_service_instance('dscached.management', ManagementService(self))
     self.rpc.register_service_instance('dscached.debug', DebugService())
Esempio n. 5
0
 def start(self, configpath, sockpath):
     signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
     self.read_config(configpath)
     self.server.rpc = RpcContext()
     self.server.rpc.register_service_instance('control',
                                               ControlService(self))
     self.server.start(sockpath)
     threading.Thread(target=self.server.serve_forever,
                      name='server thread',
                      daemon=True).start()
Esempio n. 6
0
 def __init__(self):
     self.store = collections.deque()
     self.lock = threading.Lock()
     self.seqno = 0
     self.rpc_server = Server(self)
     self.boot_id = str(uuid.uuid4())
     self.exiting = False
     self.server = None
     self.servers = []
     self.klog_reader = None
     self.flush = False
     self.flush_thread = None
     self.datastore = None
     self.started_at = datetime.utcnow()
     self.rpc = RpcContext()
     self.rpc.register_service_instance('logd.logging',
                                        LoggingService(self))
     self.rpc.register_service_instance('logd.debug', DebugService())
     self.cv = threading.Condition()
Esempio n. 7
0
 def __init__(self, context=None, connection_class=ServerConnection):
     self.server_transport = None
     self.connection_class = connection_class
     self.parsed_url = None
     self.scheme = None
     self.streaming = False
     self.transport = None
     self.rpc = None
     self.channel_serializer = None
     self.context = context or RpcContext()
     self.connections = []
Esempio n. 8
0
 def __init__(self):
     self.server = None
     self.client = None
     self.jobs = {}
     self.provides = set()
     self.lock = RLock()
     self.kq = select.kqueue()
     self.devnull = os.open('/dev/null', os.O_RDWR)
     self.logger = logging.getLogger('Context')
     self.rpc = RpcContext()
     self.rpc.register_service_instance('serviced.management', ManagementService(self))
     self.rpc.register_service_instance('serviced.job', JobService(self))
Esempio n. 9
0
    def test_unix_server(self):
        sockpath = os.path.join(os.getcwd(), 'test.{0}.sock'.format(os.getpid()))
        sockurl = 'unix://' + sockpath

        context = RpcContext()
        context.register_service('test', TestService)
        server = Server()
        server.rpc = context
        server.start(sockurl)
        threading.Thread(target=server.serve_forever, daemon=True).start()

        # Spin until server is ready
        while not os.path.exists(sockpath):
            time.sleep(0.1)

        client = Client()
        client.connect(sockurl)
        self.assertTrue(client.connected)
        self.assertEqual(client.call_sync('test.hello', 'freenas'), 'Hello World, freenas')

        client.disconnect()
        server.close()
        os.unlink(sockpath)
Esempio n. 10
0
 def __init__(self):
     self.logger = logging.getLogger(self.__class__.__name__)
     self.msock = msock.client.Client()
     self.msock.on_closed = self.on_msock_close
     self.rpc_fd = -1
     self.connection_id = None
     self.jobs = []
     self.state = ConnectionState.OFFLINE
     self.config = None
     self.keepalive = None
     self.connected_at = None
     self.cv = Condition()
     self.rpc = RpcContext()
     self.client = Client()
     self.server = Server()
     self.middleware_endpoint = None
Esempio n. 11
0
 def __init__(self):
     self.store = collections.deque()
     self.lock = threading.Lock()
     self.seqno = 0
     self.rpc_server = Server(self)
     self.boot_id = str(uuid.uuid4())
     self.exiting = False
     self.server = None
     self.servers = []
     self.forwarders = []
     self.klog_reader = None
     self.flush = False
     self.flush_thread = None
     self.datastore = None
     self.configstore = None
     self.started_at = datetime.utcnow()
     self.rpc = RpcContext()
     self.rpc.register_service_instance('logd.logging', LoggingService(self))
     self.rpc.register_service_instance('logd.debug', DebugService())
     self.cv = threading.Condition()
Esempio n. 12
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management', ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK,
            0, 0
        )

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_DELETE,
            0, 0, 0
        )

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Esempio n. 13
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.home_directory_root = None
        self.account_service = AccountService(self)
        self.group_service = GroupService(self)
        self.rpc.register_service_instance('dscached.account',
                                           self.account_service)
        self.rpc.register_service_instance('dscached.group',
                                           self.group_service)
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.idmap',
                                           IdmapService(self))
        self.rpc.register_service_instance('dscached.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_active_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   self.directories))

    def get_searched_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   (self.get_directory_by_name(n)
                    for n in self.get_search_order())))

    def get_search_order(self):
        return self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name,
                                self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories)

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories)

    def get_home_directory(self, directory, username):
        if not self.home_directory_root:
            return '/nonexistent'

        return os.path.join(self.home_directory_root,
                            f'{username}@{directory.domain_name}')

    def wait_for_etcd(self):
        self.client.test_or_wait_for_event(
            'plugin.service_resume',
            lambda args: args['name'] == 'etcd.generation',
            lambda: 'etcd.generation' in self.client.call_sync(
                'discovery.get_services'))

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.idmap')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def register_schemas(self):
        from freenas.dispatcher.model import context
        for name, schema in (s.__named_json_schema__()
                             for s in context.local_json_schema_objects):
            self.logger.debug(f'Registering schema: {name}')
            self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                self.directories.append(directory)
                directory.configure()
            except:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get(
            'directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')
        self.home_directory_root = self.configstore.get(
            'system.home_directory_root')

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('dscached', 'DEBUG')

        setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.register_schemas()
        self.wait_for_etcd()
        self.init_directories()
        self.checkin()
        self.client.wait_forever()
Esempio n. 14
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(
                ', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(
                                    job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK
            | select.KQ_NOTE_TRACK, 0, 0)

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0,
                           0, 0)

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Esempio n. 15
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.rpc.register_service_instance('dscached.account', AccountService(self))
        self.rpc.register_service_instance('dscached.group', GroupService(self))
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.management', ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_enabled_directories(self):
        return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order())))

    def get_search_order(self):
        return ['local', 'system'] + self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name, self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories
            )

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories
            )

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.start(address)
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                directory.configure()
                self.directories.append(directory)
            except BaseException as err:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get('directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('/var/log/dscached.log', 'DEBUG')

        setproctitle.setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.init_directories()
        self.client.wait_forever()
Esempio n. 16
0
class Context(object):
    def __init__(self):
        self.store = collections.deque()
        self.lock = threading.Lock()
        self.seqno = 0
        self.rpc_server = Server(self)
        self.boot_id = str(uuid.uuid4())
        self.exiting = False
        self.server = None
        self.servers = []
        self.klog_reader = None
        self.flush = False
        self.flush_thread = None
        self.datastore = None
        self.started_at = datetime.utcnow()
        self.rpc = RpcContext()
        self.rpc.register_service_instance('logd.logging',
                                           LoggingService(self))
        self.rpc.register_service_instance('logd.debug', DebugService())
        self.cv = threading.Condition()

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(log=True)
            self.datastore.insert(
                'boots', {
                    'id': self.boot_id,
                    'booted_at': self.started_at,
                    'hostname': socket.gethostname()
                })
        except datastore.DatastoreException as err:
            logging.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def init_rpc_server(self):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.server.streaming = True
        self.server.start(DEFAULT_SOCKET_ADDRESS,
                          transport_options={'permissions': 0o666})
        thread = threading.Thread(target=self.server.serve_forever,
                                  name='RPC server thread',
                                  daemon=True)
        thread.start()

    def init_syslog_server(self):
        for path, perm in SYSLOG_SOCKETS.items():
            server = SyslogServer(path, perm, self)
            server.start()
            self.servers.append(server)

    def init_klog(self):
        self.klog_reader = KernelLogReader(self)
        thread = threading.Thread(target=self.klog_reader.process,
                                  name='klog reader',
                                  daemon=True)
        thread.start()

    def init_flush(self):
        self.flush_thread = threading.Thread(target=self.do_flush,
                                             name='Flush thread')
        self.flush_thread.start()

    def push(self, item):
        if not item:
            return

        if 'message' not in item or 'priority' not in item:
            return

        if 'timestamp' not in item:
            item['timestamp'] = datetime.now()

        if 'pid' in item:
            try:
                job = get_job_by_pid(item['pid'], True)
                item['service'] = job['Label']
            except ServicedException:
                pass

        with self.lock:
            priority, facility = parse_priority(item['priority'])
            item.update({
                'id': str(uuid.uuid4()),
                'seqno': self.seqno,
                'boot_id': self.boot_id,
                'priority': priority.name,
                'facility': facility.name if facility else None
            })
            self.store.append(item)
            self.seqno += 1
            self.server.broadcast_event('logd.logging.message', item)

    def do_flush(self):
        logging.debug('Flush thread initialized')
        while True:
            # Flush immediately after getting wakeup or when timeout expires
            with self.cv:
                self.cv.wait(FLUSH_INTERVAL)

                if not self.flush:
                    continue

                if not self.datastore:
                    try:
                        self.init_datastore()
                        logging.info('Datastore initialized')
                    except BaseException as err:
                        logging.warning(
                            'Cannot initialize datastore: {0}'.format(err))
                        logging.warning('Flush skipped')
                        continue

                logging.debug('Attempting to flush logs')
                with self.lock:
                    for i in self.store:
                        self.datastore.insert('syslog', i)

                    self.store.clear()

                if self.exiting:
                    return

    def sigusr1(self, signo, frame):
        with self.cv:
            logging.info('Flushing logs on signal')
            self.flush = True
            self.cv.notify_all()

    def main(self):
        setproctitle('logd')
        self.init_syslog_server()
        self.init_klog()
        self.init_rpc_server()
        self.init_flush()
        checkin()
        signal.signal(signal.SIGUSR1, signal.SIG_DFL)
        while True:
            sig = signal.sigwait([signal.SIGTERM, signal.SIGUSR1])
            if sig == signal.SIGUSR1:
                with self.cv:
                    logging.info('Flushing logs on signal')
                    self.flush = True
                    self.cv.notify_all()

                continue

            if sig == signal.SIGTERM:
                logging.info('Got SIGTERM, exiting')
                with self.cv:
                    self.exiting = True
                    self.cv.notify_all()

                self.flush_thread.join()
                break
Esempio n. 17
0
class Context(object):
    def __init__(self):
        self.store = collections.deque()
        self.lock = threading.Lock()
        self.seqno = 0
        self.rpc_server = Server(self)
        self.boot_id = str(uuid.uuid4())
        self.exiting = False
        self.server = None
        self.servers = []
        self.forwarders = []
        self.klog_reader = None
        self.flush = False
        self.flush_thread = None
        self.datastore = None
        self.configstore = None
        self.started_at = datetime.utcnow()
        self.rpc = RpcContext()
        self.rpc.register_service_instance('logd.logging', LoggingService(self))
        self.rpc.register_service_instance('logd.debug', DebugService())
        self.cv = threading.Condition()

    def init_configstore(self):
        ds = datastore.get_datastore()
        self.configstore = datastore.config.ConfigStore(ds)

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(log=True)
            self.datastore.insert('boots', {
                'id': self.boot_id,
                'booted_at': self.started_at,
                'hostname': socket.gethostname()
            })
        except datastore.DatastoreException as err:
            logging.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

    def init_rpc_server(self):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.server.streaming = True
        self.server.start(DEFAULT_SOCKET_ADDRESS, transport_options={'permissions': 0o666})
        thread = threading.Thread(target=self.server.serve_forever, name='RPC server thread', daemon=True)
        thread.start()

    def init_syslog_server(self):
        for path, perm in SYSLOG_SOCKETS.items():
            server = SyslogServer(path, perm, self)
            server.start()
            self.servers.append(server)

    def init_klog(self):
        self.klog_reader = KernelLogReader(self)
        thread = threading.Thread(target=self.klog_reader.process, name='klog reader', daemon=True)
        thread.start()

    def init_flush(self):
        self.flush_thread = threading.Thread(target=self.do_flush, name='Flush thread')
        self.flush_thread.start()

    def load_configuration(self):
        syslog_server = self.configstore.get('system.syslog_server')

        if not syslog_server:
            if self.forwarders:
                for i in self.forwarders:
                    i.close()

                self.forwarders.clear()

            return

        if ':' in syslog_server:
            host, port = syslog_server.split(':')
        else:
            host = syslog_server
            port = 514

        self.forwarders.append(SyslogForwarder(host, port, self))

    def push(self, item):
        if not item:
            return

        if 'message' not in item or 'priority' not in item:
            return

        if 'timestamp' not in item:
            item['timestamp'] = datetime.now()

        if 'pid' in item:
            try:
                job = get_job_by_pid(item['pid'], True)
                item['service'] = job['Label']
            except ServicedException:
                pass

        with self.lock:
            priority, facility = parse_priority(item['priority'])
            item.update({
                'id': str(uuid.uuid4()),
                'seqno': self.seqno,
                'boot_id': self.boot_id,
                'priority': priority.name,
                'facility': facility.name if facility else None
            })
            self.store.append(item)
            self.seqno += 1

        self.server.broadcast_event('logd.logging.message', item)
        self.forward(item)

    def forward(self, item):
        hostname = socket.gethostname()
        prio = SyslogPriority.INFO

        try:
            prio = int(getattr(SyslogPriority, item['priority']))
        except:
            pass

        msg = f'<{prio}>{item["timestamp"]:%b %d %H:%M:%S} {hostname} {item["identifier"]}: {item["message"]}'
        for i in self.forwarders:
            i.forward(msg.encode('utf-8', 'ignore'))

    def do_flush(self):
        logging.debug('Flush thread initialized')
        while True:
            # Flush immediately after getting wakeup or when timeout expires
            with self.cv:
                self.cv.wait(FLUSH_INTERVAL)

                if not self.flush:
                    continue

                if not self.datastore:
                    try:
                        self.init_datastore()
                        logging.info('Datastore initialized')
                    except BaseException as err:
                        logging.warning('Cannot initialize datastore: {0}'.format(err))
                        logging.warning('Flush skipped')
                        continue

                logging.debug('Attempting to flush logs')
                with self.lock:
                    for i in self.store:
                        self.datastore.insert('syslog', i)

                    self.store.clear()

                if self.exiting:
                    return

    def sigusr1(self, signo, frame):
        with self.cv:
            logging.info('Flushing logs on signal')
            self.flush = True
            self.cv.notify_all()

    def main(self):
        setproctitle('logd')
        self.init_configstore()
        self.init_syslog_server()
        self.init_klog()
        self.init_rpc_server()
        self.init_flush()
        self.load_configuration()
        checkin()
        signal.signal(signal.SIGUSR1, signal.SIG_DFL)
        signal.signal(signal.SIGHUP, signal.SIG_DFL)

        while True:
            sig = signal.sigwait([signal.SIGTERM, signal.SIGUSR1, signal.SIGHUP])
            if sig == signal.SIGUSR1:
                with self.cv:
                    logging.info('Flushing logs on signal')
                    self.flush = True
                    self.cv.notify_all()

                continue

            if sig == signal.SIGHUP:
                logging.info('Reloading configuration on SIGHUP')
                self.load_configuration()
                continue

            if sig == signal.SIGTERM:
                logging.info('Got SIGTERM, exiting')
                with self.cv:
                    self.exiting = True
                    self.cv.notify_all()

                self.flush_thread.join()
                break