Beispiel #1
0
def get_replication_client(dispatcher, remote):
    host = dispatcher.call_sync(
        'peer.query',
        [('address', '=', remote), ('type', '=', 'replication')],
        {'single': True}
    )
    if not host:
        raise TaskException(errno.ENOENT, 'There are no known keys to connect to {0}'.format(remote))

    with open('/etc/replication/key') as f:
        pkey = RSAKey.from_private_key(f)

    credentials = host['credentials']

    try:
        client = Client()
        with tempfile.NamedTemporaryFile('w') as host_key_file:
            host_key_file.write(credentials['hostkey'])
            host_key_file.flush()
            client.connect(
                'ws+ssh://replication@{0}'.format(remote),
                port=credentials['port'],
                host_key_file=host_key_file.name,
                pkey=pkey
            )
        client.login_service('replicator')
        return client

    except (AuthenticationException, SSHException):
        raise TaskException(errno.EAUTH, 'Cannot connect to {0}'.format(remote))
    except (OSError, ConnectionRefusedError):
        raise TaskException(errno.ECONNREFUSED, 'Cannot connect to {0}'.format(remote))
    except IOError:
        raise TaskException(errno.EINVAL, 'Provided host key is not valid')
def main(name, *args):
    connection = Client()
    connection.connect('127.0.0.1')
    connection.login_service('ups')

    connection.emit_event('service.ups.signal', {
        'name': name,
        'type': os.environ['NOTIFYTYPE'],
    })

    connection.disconnect()
Beispiel #3
0
def main(*args):
    connection = Client()
    connection.connect('127.0.0.1')
    connection.login_service('smtp')

    parser = argparse.ArgumentParser(description='Process email')
    parser.add_argument('-i', dest='strip_leading_dot', action='store_false',
                        default=True, help='see sendmail(8) -i')
    parser.add_argument('-t', dest='parse_recipients', action='store_true',
                        default=False,
                        help='parse recipients from message')
    parser.usage = ' '.join(parser.format_usage().split(' ')[1:-1])
    parser.usage += ' [email_addr|user] ..'
    args, to_addrs = parser.parse_known_args()
    if not to_addrs and not args.parse_recipients:
        parser.exit(message=parser.format_usage())
    msg = sys.stdin.read()

    em_parser = email.parser.Parser()
    em = em_parser.parsestr(msg)
    if args.parse_recipients:
        # Strip away the comma based delimiters and whitespace.
        to_addrs = map(str.strip, em.get('To').split(','))

    if not to_addrs or not to_addrs[0]:
        to_addrs = ['root']

    margs = {}
    margs['extra_headers'] = dict(em)
    margs['extra_headers'].update({
        'X-Mailer': 'FreeNAS',
        'X-FreeNAS-Host': socket.gethostname(),
    })
    margs['subject'] = em.get('Subject')

    if em.is_multipart():
        margs['attachments'] = filter(
            lambda part: part.get_content_maintype() != 'multipart',
            em.walk()
        )
        margs['message'] = (
            'This is a MIME formatted message.  If you see '
            'this text it means that your email software '
            'does not support MIME formatted messages.')
    else:
        margs['message'] = ''.join(email.iterators.body_line_iterator(em))

    if to_addrs:
        margs['to'] = to_addrs

    connection.call_sync('mail.send', margs)
    connection.disconnect()
def main(*args):
    connection = Client()
    connection.connect("127.0.0.1")
    connection.login_service("smtp")

    parser = argparse.ArgumentParser(description="Process email")
    parser.add_argument("-i", dest="strip_leading_dot", action="store_false", default=True, help="see sendmail(8) -i")
    parser.add_argument(
        "-t", dest="parse_recipients", action="store_true", default=False, help="parse recipients from message"
    )
    parser.usage = " ".join(parser.format_usage().split(" ")[1:-1])
    parser.usage += " [email_addr|user] .."
    args, to_addrs = parser.parse_known_args()
    if not to_addrs and not args.parse_recipients:
        parser.exit(message=parser.format_usage())
    msg = sys.stdin.read()

    em_parser = email.parser.Parser()
    em = em_parser.parsestr(msg)
    if args.parse_recipients:
        # Strip away the comma based delimiters and whitespace.
        to_addrs = map(str.strip, em.get("To").split(","))

    if not to_addrs or not to_addrs[0]:
        to_addrs = ["root"]

    margs = {}
    margs["extra_headers"] = dict(em)
    margs["extra_headers"].update({"X-Mailer": "FreeNAS", "X-FreeNAS-Host": socket.gethostname()})
    margs["subject"] = em.get("Subject")

    if em.is_multipart():
        margs["attachments"] = filter(lambda part: part.get_content_maintype() != "multipart", em.walk())
        margs["message"] = (
            "This is a MIME formatted message.  If you see "
            "this text it means that your email software "
            "does not support MIME formatted messages."
        )
    else:
        margs["message"] = "".join(email.iterators.body_line_iterator(em))

    if to_addrs:
        margs["to"] = to_addrs

    connection.call_sync("mail.send", margs)
    connection.disconnect()
Beispiel #5
0
def get_freenas_peer_client(parent, remote):
    try:
        address = socket.gethostbyname(remote)
    except socket.error as err:
        raise TaskException(err.errno, '{0} is unreachable'.format(remote))

    host = parent.dispatcher.call_sync(
        'peer.query', [
            ('or', [
                ('credentials.address', '=', remote),
                ('credentials.address', '=', address),
            ]),
            ('type', '=', 'freenas')
        ],
        {'single': True}
    )
    if not host:
        raise TaskException(errno.ENOENT, 'There are no known keys to connect to {0}'.format(remote))

    with io.StringIO() as f:
        f.write(parent.configstore.get('peer.freenas.key.private'))
        f.seek(0)
        pkey = RSAKey.from_private_key(f)

    credentials = host['credentials']

    try:
        client = Client()
        with tempfile.NamedTemporaryFile('w') as host_key_file:
            host_key_file.write(remote + ' ' + credentials['hostkey'])
            host_key_file.flush()
            client.connect(
                'ws+ssh://freenas@{0}'.format(wrap_address(remote)),
                port=credentials['port'],
                host_key_file=host_key_file.name,
                pkey=pkey
            )
        client.login_service('replicator')
        return client

    except (AuthenticationException, SSHException):
        raise TaskException(errno.EAUTH, 'Cannot connect to {0}'.format(remote))
    except OSError as err:
        raise TaskException(errno.ECONNREFUSED, 'Cannot connect to {0}: {1}'.format(remote, err))
Beispiel #6
0
def get_freenas_peer_client(parent, remote):
    try:
        address = socket.gethostbyname(remote)
    except socket.error as err:
        raise TaskException(err.errno, '{0} is unreachable'.format(remote))

    host = parent.dispatcher.call_sync(
        'peer.query', [
            ('or', [
                ('credentials.address', '=', remote),
                ('credentials.address', '=', address),
            ]),
            ('type', '=', 'freenas')
        ],
        {'single': True}
    )
    if not host:
        raise TaskException(errno.ENOENT, 'There are no known keys to connect to {0}'.format(remote))

    with io.StringIO() as f:
        f.write(parent.configstore.get('peer.freenas.key.private'))
        f.seek(0)
        pkey = RSAKey.from_private_key(f)

    credentials = host['credentials']

    try:
        client = Client()
        with tempfile.NamedTemporaryFile('w') as host_key_file:
            host_key_file.write(remote + ' ' + credentials['hostkey'])
            host_key_file.flush()
            client.connect(
                'ws+ssh://freenas@{0}'.format(wrap_address(remote)),
                port=credentials['port'],
                host_key_file=host_key_file.name,
                pkey=pkey
            )
        client.login_service('replicator')
        return client

    except (AuthenticationException, SSHException):
        raise TaskException(errno.EAUTH, 'Cannot connect to {0}'.format(remote))
    except OSError as err:
        raise TaskException(errno.ECONNREFUSED, 'Cannot connect to {0}: {1}'.format(remote, err))
Beispiel #7
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation', FileGenerationService(self))
                self.client.register_service('etcd.management', ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #8
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.msock = msock.client.Client()
        self.msock.on_closed = self.on_msock_close
        self.rpc_fd = -1
        self.connection_id = None
        self.jobs = []
        self.state = ConnectionState.OFFLINE
        self.config = None
        self.keepalive = None
        self.connected_at = None
        self.cv = Condition()
        self.rpc = RpcContext()
        self.client = Client()
        self.server = Server()
        self.middleware_endpoint = None

    def start(self, configpath, sockpath):
        signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
        self.read_config(configpath)
        self.server.rpc = RpcContext()
        self.server.rpc.register_service_instance("control", ControlService(self))
        self.server.start(sockpath)
        threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start()

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning("Connection to dispatcher lost")
                self.connect_dispatcher()

        self.middleware_endpoint = Client()
        self.middleware_endpoint.on_error(on_error)
        self.connect_dispatcher()

    def connect_dispatcher(self):
        while True:
            try:
                self.middleware_endpoint.connect("unix:")
                self.middleware_endpoint.login_service("debugd")
                self.middleware_endpoint.enable_server()
                self.middleware_endpoint.register_service("debugd.management", ControlService(self))
                self.middleware_endpoint.resume_service("debugd.management")
                return
            except (OSError, RpcException) as err:
                self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err)))
                time.sleep(1)

    def read_config(self, path):
        try:
            with open(path) as f:
                self.config = json.load(f)
        except (IOError, OSError, ValueError) as err:
            self.logger.fatal("Cannot open config file: {0}".format(str(err)))
            self.logger.fatal("Exiting.")
            sys.exit(1)

    def connect(self, discard=False):
        if discard:
            self.connection_id = None

        self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True)
        self.keepalive.start()

    def connect_keepalive(self):
        while True:
            try:
                if not self.connection_id:
                    self.connection_id = uuid.uuid4()

                self.msock.connect(SUPPORT_PROXY_ADDRESS)
                self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS))
                self.rpc_fd = self.msock.create_channel(0)
                time.sleep(1)  # FIXME
                self.client = Client()
                self.client.connect("fd://", fobj=self.rpc_fd)
                self.client.channel_serializer = MSockChannelSerializer(self.msock)
                self.client.standalone_server = True
                self.client.enable_server()
                self.client.register_service("debug", DebugService(self))
                self.client.call_sync(
                    "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none"
                )
                self.set_state(ConnectionState.CONNECTED)
            except BaseException as err:
                self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True)
                self.msock.disconnect()
            else:
                self.connected_at = datetime.now()
                with self.cv:
                    self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE))
                    if self.state == ConnectionState.OFFLINE:
                        return

            self.logger.warning("Support connection lost, retrying in 10 seconds")
            time.sleep(10)

    def disconnect(self):
        self.connected_at = None
        self.set_state(ConnectionState.OFFLINE)
        self.client.disconnect()
        self.msock.destroy_channel(0)
        self.msock.disconnect()
        self.jobs.clear()

    def on_msock_close(self):
        self.connected_at = None
        self.set_state(ConnectionState.LOST)

    def run_job(self, job):
        self.jobs.append(job)
        job.context = self
        job.start()

    def set_state(self, state):
        with self.cv:
            self.state = state
            self.cv.notify_all()
Beispiel #9
0
    def run(self, peer, initial_credentials):
        hostid = self.dispatcher.call_sync('system.info.host_uuid')
        hostname = self.dispatcher.call_sync(
            'system.general.get_config')['hostname']
        remote_peer_name = hostname
        credentials = peer['credentials']
        remote = credentials.get('address')
        port = credentials.get('port', 22)
        username = initial_credentials.get('username')
        password = initial_credentials.get('password')
        auth_code = initial_credentials.get('auth_code')
        key_auth = initial_credentials.get('key_auth')

        local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')

        if self.datastore.exists('peers', ('credentials.address', '=', remote),
                                 ('type', '=', 'freenas')):
            raise TaskException(
                errno.EEXIST,
                'FreeNAS peer entry for {0} already exists'.format(remote))

        remote_client = Client()

        try:
            if auth_code:
                try:
                    remote_client.connect('ws://{0}'.format(
                        wrap_address(remote)))
                except (AuthenticationException, OSError,
                        ConnectionRefusedError):
                    raise TaskException(
                        errno.ECONNABORTED,
                        'Cannot connect to {0}:{1}'.format(remote, port))

                try:
                    remote_host_uuid, pubkey = remote_client.call_sync(
                        'peer.freenas.auth_with_code', auth_code, hostname,
                        local_ssh_config['port'])
                except RpcException as err:
                    raise TaskException(err.code, err.message)

                try:
                    self.dispatcher.call_sync('peer.freenas.put_temp_pubkey',
                                              pubkey)
                    if not self.dispatcher.test_or_wait_for_event(
                            'peer.changed',
                            lambda ar: ar['operation'] == 'create' and
                            remote_host_uuid in ar['ids'],
                            lambda: self.datastore.exists(
                                'peers', ('id', '=', remote_host_uuid)),
                            timeout=30):
                        raise TaskException(
                            errno.EAUTH,
                            'FreeNAS peer creation failed. Check connection to host {0}.'
                            .format(remote))
                finally:
                    self.dispatcher.call_sync(
                        'peer.freenas.remove_temp_pubkey', pubkey)

            else:
                try:
                    if key_auth:
                        with io.StringIO() as f:
                            f.write(
                                self.configstore.get(
                                    'peer.freenas.key.private'))
                            f.seek(0)
                            pkey = RSAKey.from_private_key(f)

                        max_tries = 50
                        while True:
                            try:
                                remote_client.connect(
                                    'ws+ssh://freenas@{0}'.format(
                                        wrap_address(remote)),
                                    pkey=pkey,
                                    port=port)
                                break
                            except AuthenticationException:
                                if max_tries:
                                    max_tries -= 1
                                    time.sleep(1)
                                else:
                                    raise
                    else:
                        remote_client.connect('ws+ssh://{0}@{1}'.format(
                            username, wrap_address(remote)),
                                              port=port,
                                              password=password)

                    remote_client.login_service('replicator')
                except (AuthenticationException, OSError,
                        ConnectionRefusedError):
                    raise TaskException(
                        errno.ECONNABORTED,
                        'Cannot connect to {0}:{1}'.format(remote, port))

                local_host_key, local_pub_key = self.dispatcher.call_sync(
                    'peer.freenas.get_ssh_keys')
                remote_host_key, remote_pub_key = remote_client.call_sync(
                    'peer.freenas.get_ssh_keys')
                ip_at_remote_side = remote_client.local_address[0]

                remote_hostname = remote_client.call_sync(
                    'system.general.get_config')['hostname']

                remote_host_key = remote_host_key.rsplit(' ', 1)[0]
                local_host_key = local_host_key.rsplit(' ', 1)[0]

                if remote_client.call_sync('peer.query',
                                           [('id', '=', hostid)]):
                    raise TaskException(
                        errno.EEXIST,
                        'Peer entry of {0} already exists at {1}'.format(
                            hostname, remote))

                peer['credentials'] = {
                    '%type': 'freenas-credentials',
                    'pubkey': remote_pub_key,
                    'hostkey': remote_host_key,
                    'port': port,
                    'address': remote_hostname
                }

                local_id = remote_client.call_sync('system.info.host_uuid')
                peer['id'] = local_id
                peer['name'] = remote_hostname
                ip = socket.gethostbyname(remote)

                created_ids = self.join_subtasks(
                    self.run_subtask('peer.freenas.create_local', peer, ip,
                                     True))

                peer['id'] = hostid
                peer['name'] = remote_peer_name
                peer['credentials'] = {
                    '%type': 'freenas-credentials',
                    'pubkey': local_pub_key,
                    'hostkey': local_host_key,
                    'port': local_ssh_config['port'],
                    'address': hostname
                }

                try:
                    call_task_and_check_state(remote_client,
                                              'peer.freenas.create_local',
                                              peer, ip_at_remote_side)
                except TaskException:
                    self.datastore.delete('peers', local_id)
                    self.dispatcher.dispatch_event('peer.changed', {
                        'operation': 'delete',
                        'ids': [local_id]
                    })
                    raise

                return created_ids[0]
        finally:
            remote_client.disconnect()
Beispiel #10
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.msock = msock.client.Client()
        self.msock.on_closed = self.on_msock_close
        self.rpc_fd = -1
        self.connection_id = None
        self.jobs = []
        self.state = ConnectionState.OFFLINE
        self.config = None
        self.keepalive = None
        self.connected_at = None
        self.cv = Condition()
        self.rpc = RpcContext()
        self.client = Client()
        self.server = Server()
        self.middleware_endpoint = None

    def start(self, configpath, sockpath):
        signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect())
        self.read_config(configpath)
        self.server.rpc = RpcContext()
        self.server.rpc.register_service_instance('control',
                                                  ControlService(self))
        self.server.start(sockpath)
        threading.Thread(target=self.server.serve_forever,
                         name='server thread',
                         daemon=True).start()

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect_dispatcher()

        self.middleware_endpoint = Client()
        self.middleware_endpoint.on_error(on_error)
        self.connect_dispatcher()

    def connect_dispatcher(self):
        while True:
            try:
                self.middleware_endpoint.connect('unix:')
                self.middleware_endpoint.login_service('debugd')
                self.middleware_endpoint.enable_server()
                self.middleware_endpoint.register_service(
                    'debugd.management', ControlService(self))
                self.middleware_endpoint.resume_service('debugd.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def read_config(self, path):
        try:
            with open(path) as f:
                self.config = json.load(f)
        except (IOError, OSError, ValueError) as err:
            self.logger.fatal('Cannot open config file: {0}'.format(str(err)))
            self.logger.fatal('Exiting.')
            sys.exit(1)

    def connect(self, discard=False):
        if discard:
            self.connection_id = None

        self.keepalive = threading.Thread(target=self.connect_keepalive,
                                          daemon=True)
        self.keepalive.start()

    def connect_keepalive(self):
        while True:
            try:
                if not self.connection_id:
                    self.connection_id = uuid.uuid4()

                self.msock.connect(SUPPORT_PROXY_ADDRESS)
                self.logger.info(
                    'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS))
                self.rpc_fd = self.msock.create_channel(0)
                time.sleep(1)  # FIXME
                self.client = Client()
                self.client.connect('fd://', fobj=self.rpc_fd)
                self.client.channel_serializer = MSockChannelSerializer(
                    self.msock)
                self.client.standalone_server = True
                self.client.enable_server()
                self.client.register_service('debug', DebugService(self))
                self.client.call_sync('server.login', str(self.connection_id),
                                      socket.gethostname(), get_version(),
                                      'none')
                self.set_state(ConnectionState.CONNECTED)
            except BaseException as err:
                self.logger.warning(
                    'Failed to initiate support connection: {0}'.format(err),
                    exc_info=True)
                self.msock.disconnect()
            else:
                self.connected_at = datetime.now()
                with self.cv:
                    self.cv.wait_for(lambda: self.state in (
                        ConnectionState.LOST, ConnectionState.OFFLINE))
                    if self.state == ConnectionState.OFFLINE:
                        return

            self.logger.warning(
                'Support connection lost, retrying in 10 seconds')
            time.sleep(10)

    def disconnect(self):
        self.connected_at = None
        self.set_state(ConnectionState.OFFLINE)
        self.client.disconnect()
        self.msock.destroy_channel(0)
        self.msock.disconnect()
        self.jobs.clear()

    def on_msock_close(self):
        self.connected_at = None
        self.set_state(ConnectionState.LOST)

    def run_job(self, job):
        self.jobs.append(job)
        job.context = self
        job.start()

    def set_state(self, state):
        with self.cv:
            self.state = state
            self.cv.notify_all()
Beispiel #11
0
    def run(self, peer, initial_credentials):
        hostid = self.dispatcher.call_sync('system.info.host_uuid')
        hostname = self.dispatcher.call_sync('system.general.get_config')['hostname']
        remote_peer_name = hostname
        credentials = peer['credentials']
        remote = credentials.get('address')
        port = credentials.get('port', 22)
        username = initial_credentials.get('username')
        password = initial_credentials.get('password')
        auth_code = initial_credentials.get('auth_code')
        key_auth = initial_credentials.get('key_auth')

        local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')

        if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')):
            raise TaskException(
                errno.EEXIST,
                'FreeNAS peer entry for {0} already exists'.format(remote)
            )

        remote_client = Client()

        try:
            if auth_code:
                try:
                    remote_client.connect('ws://{0}'.format(wrap_address(remote)))
                except (AuthenticationException, OSError, ConnectionRefusedError):
                    raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))

                try:
                    remote_host_uuid, pubkey = remote_client.call_sync(
                        'peer.freenas.auth_with_code',
                        auth_code,
                        hostname,
                        local_ssh_config['port']
                    )
                except RpcException as err:
                    raise TaskException(err.code, err.message)

                try:
                    self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey)
                    if not self.dispatcher.test_or_wait_for_event(
                        'peer.changed',
                        lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'],
                        lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)),
                        timeout=30
                    ):
                        raise TaskException(
                            errno.EAUTH,
                            'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote)
                        )
                finally:
                    self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey)

            else:
                try:
                    if key_auth:
                        with io.StringIO() as f:
                            f.write(self.configstore.get('peer.freenas.key.private'))
                            f.seek(0)
                            pkey = RSAKey.from_private_key(f)

                        max_tries = 50
                        while True:
                            try:
                                remote_client.connect('ws+ssh://freenas@{0}'.format(
                                    wrap_address(remote)), pkey=pkey, port=port
                                )
                                break
                            except AuthenticationException:
                                if max_tries:
                                    max_tries -= 1
                                    time.sleep(1)
                                else:
                                    raise
                    else:
                        remote_client.connect(
                            'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)),
                            port=port,
                            password=password
                        )

                    remote_client.login_service('replicator')
                except (AuthenticationException, OSError, ConnectionRefusedError):
                    raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))

                local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys')
                remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys')
                ip_at_remote_side = remote_client.local_address[0]

                remote_hostname = remote_client.call_sync('system.general.get_config')['hostname']

                remote_host_key = remote_host_key.rsplit(' ', 1)[0]
                local_host_key = local_host_key.rsplit(' ', 1)[0]

                if remote_client.call_sync('peer.query', [('id', '=', hostid)]):
                    raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote))

                peer['credentials'] = {
                    '%type': 'freenas-credentials',
                    'pubkey': remote_pub_key,
                    'hostkey': remote_host_key,
                    'port': port,
                    'address': remote_hostname
                }

                local_id = remote_client.call_sync('system.info.host_uuid')
                peer['id'] = local_id
                peer['name'] = remote_hostname
                ip = socket.gethostbyname(remote)

                created_id = self.run_subtask_sync(
                    'peer.freenas.create_local',
                    peer,
                    ip,
                    True
                )

                peer['id'] = hostid
                peer['name'] = remote_peer_name
                peer['credentials'] = {
                    '%type': 'freenas-credentials',
                    'pubkey': local_pub_key,
                    'hostkey': local_host_key,
                    'port': local_ssh_config['port'],
                    'address': hostname
                }

                try:
                    call_task_and_check_state(
                        remote_client,
                        'peer.freenas.create_local',
                        peer,
                        ip_at_remote_side
                    )
                except TaskException:
                    self.datastore.delete('peers', local_id)
                    self.dispatcher.dispatch_event('peer.changed', {
                        'operation': 'delete',
                        'ids': [local_id]
                    })
                    raise

                return created_id
        finally:
            remote_client.disconnect()
Beispiel #12
0
    def run(self, peer):
        if self.datastore.exists('peers', ('address', '=', peer['address']), ('type', '=', 'replication')):
            raise TaskException(errno.EEXIST, 'Replication peer entry for {0} already exists'.format(peer['address']))

        if peer['credentials']['type'] != 'ssh':
            raise TaskException(errno.EINVAL, 'SSH credentials type is needed to perform replication peer pairing')

        remote = peer.get('address')
        credentials = peer['credentials']
        username = credentials.get('username')
        port = credentials.get('port', 22)
        password = credentials.get('password')

        if not username:
            raise TaskException(errno.EINVAL, 'Username has to be specified')

        if not remote:
            raise TaskException(errno.EINVAL, 'Address of remote host has to be specified')

        if not password:
            raise TaskException(errno.EINVAL, 'Password has to be specified')

        remote_client = Client()
        try:
            try:
                remote_client.connect('ws+ssh://{0}@{1}'.format(username, remote), port=port, password=password)
                remote_client.login_service('replicator')
            except (AuthenticationException, OSError, ConnectionRefusedError):
                raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))

            local_keys = self.dispatcher.call_sync('peer.get_ssh_keys')
            remote_keys = remote_client.call_sync('peer.get_ssh_keys')
            ip_at_remote_side = remote_client.call_sync('management.get_sender_address').split(',', 1)[0]

            remote_host_key = remote + ' ' + remote_keys[0].rsplit(' ', 1)[0]
            local_host_key = ip_at_remote_side + ' ' + local_keys[0].rsplit(' ', 1)[0]

            local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')

            if remote_client.call_sync('peer.query', [('name', '=', peer['name'])]):
                raise TaskException(errno.EEXIST, 'Peer entry {0} already exists at {1}'.format(peer['name'], remote))

            peer['credentials'] = {
                'pubkey': remote_keys[1],
                'hostkey': remote_host_key,
                'port': port,
                'type': 'replication'
            }

            self.join_subtasks(self.run_subtask(
                'peer.replication.create_local',
                peer
            ))

            peer['address'] = ip_at_remote_side
            peer['credentials'] = {
                'pubkey': local_keys[1],
                'hostkey': local_host_key,
                'port': local_ssh_config['port'],
                'type': 'replication'
            }

            id = self.datastore.query('peers', ('name', '=', peer['name']), select='id')
            try:
                call_task_and_check_state(
                    remote_client,
                    'peer.replication.create_local',
                    peer
                )
            except TaskException:
                self.datastore.delete('peers', id)
                self.dispatcher.dispatch_event('peer.changed', {
                    'operation': 'delete',
                    'ids': [id]
                })
                raise
        finally:
            remote_client.disconnect()
Beispiel #13
0
class RESTApi(object):
    def __init__(self):
        self.logger = logging.getLogger('restd')
        self._cruds = []
        self._threads = []
        self._rpcs = {}
        self._schemas = {}
        self._used_schemas = set()
        self._services = {}
        self._tasks = {}
        self.api = falcon.API(middleware=[
            AuthMiddleware(),
            JSONTranslator(),
        ])
        self.api.add_route('/', SwaggerResource(self))

        gevent.signal(signal.SIGINT, self.die)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.dispatcher = Client()
        self.dispatcher.on_error(on_error)
        self.connect()

    def init_metadata(self):
        self._tasks = self.dispatcher.call_sync('discovery.get_tasks')
        self._schemas = self.dispatcher.call_sync('discovery.get_schema')
        for service in self.dispatcher.call_sync('discovery.get_services'):
            self._services[service] = self.dispatcher.call_sync(
                'discovery.get_methods', service)
            for method in self._services[service]:
                self._rpcs['{0}.{1}'.format(service, method['name'])] = method

    def load_plugins(self):
        pluginsdir = os.path.realpath(
            os.path.join(os.path.dirname(__file__), '..', 'plugins'))
        for i in glob.glob1(pluginsdir, "*.py"):
            try:
                loader = importlib.machinery.SourceFileLoader(
                    i.split('.')[0], os.path.join(pluginsdir, i))
                mod = loader.load_module()
            except:
                self.logger.error('Failed to load plugin %s', i, exc_info=True)
                raise
            mod._init(self)

    def connect(self):
        while True:
            try:
                self.dispatcher.connect('unix:')
                self.dispatcher.login_service('restd')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def __call__(self, environ, start_response):
        if 'HTTP_X_REAL_IP' in environ:
            environ['PATH_INFO'] = environ.get('PATH_INFO',
                                               '').replace('/api/v2.0', '', 1)
        return self.api.__call__(environ, start_response)

    def register_crud(self, klass):
        ins = klass(self, self.dispatcher)
        self._cruds.append(ins)

    def register_singleitem(self, klass):
        klass(self, self.dispatcher)

    def register_resource(self, klass):
        klass(self)

    def run(self):
        self.init_dispatcher()
        self.init_metadata()
        self.load_plugins()

        server4 = WSGIServer(('0.0.0.0', 8889),
                             self,
                             handler_class=RESTWSGIHandler)
        self._threads = [gevent.spawn(server4.serve_forever)]
        checkin()
        gevent.joinall(self._threads)

    def die(self, *args):
        gevent.killall(self._threads)
        sys.exit(0)
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.plugins = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server()
                self.client.register_service('dscached.account', AccountService(self))
                self.client.register_service('dscached.group', GroupService(self))
                self.client.register_service('dscached.debug', DebugService())
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/dscached.log', 'DEBUG')

        setproctitle.setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.client.wait_forever()
Beispiel #15
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('neighbord')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.plugins = {}

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['neighbord']['plugin-dirs']

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_service(self, name, regtype, port, properties=None):
        for plugin in self.plugins.values():
            plugin.register(regtype, name, port, properties)

    def register(self):
        try:
            hostname = socket.gethostname()
            general = self.client.call_sync('system.general.get_config')
            properties = {
                'version': self.client.call_sync('system.info.version'),
                'description': general['description'],
                'tags': ','.join(general['tags'])
            }

            self.register_service(hostname, 'freenas', 80, properties)
            self.register_service(hostname, 'http', 80)
            self.register_service(hostname, 'ssh', 22)
            self.register_service(hostname, 'sftp-ssh', 22)
        except BaseException as err:
            self.logger.error('Failed to register services: {0}'.format(str(err)))

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('neighbord')
                self.client.enable_server()
                self.client.register_service('neighbord.management', ManagementService(self))
                self.client.register_service('neighbord.discovery', DiscoveryService(self))
                self.client.register_service('neighbord.debug', DebugService())
                self.client.resume_service('neighbord.management')
                self.client.resume_service('neighbord.discovery')
                self.client.resume_service('neighbord.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/neighbord.log', 'DEBUG')

        setproctitle('neighbord')
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.register()
        self.checkin()
        self.client.wait_forever()
Beispiel #16
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('clid')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.ml = None
        self.context = None

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_cli(self):
        self.logger.info('Initializing CLI instance')
        self.context = Context()
        self.context.connection = self.client
        self.context.plugin_dirs = PLUGIN_DIRS
        self.context.discover_plugins()
        self.context.start_entity_subscribers()
        self.context.login_plugins()
        self.ml = MainLoop(self.context)
        self.logger.info('CLI instance ready')

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('clid')
                self.client.enable_server()
                self.client.call_sync('management.enable_features', ['streaming_responses'])
                self.client.register_service('clid.management', ManagementService(self))
                self.client.register_service('clid.eval', EvalService(self))
                self.client.register_service('clid.debug', DebugService())
                self.client.resume_service('clid.management')
                self.client.resume_service('clid.eval')
                self.client.resume_service('clid.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('clid', 'DEBUG')

        setproctitle('clid')
        self.init_dispatcher()
        self.init_cli()
        self.client.wait_forever()
Beispiel #17
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('alertd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.emitters = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_reminder(self):
        t = threading.Thread(target=self.reminder_thread)
        t.daemon = True
        t.start()

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['alertd']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('alertd')
                self.client.enable_server()
                self.client.register_service('alertd.management',
                                             ManagementService(self))
                self.client.register_service('alertd.alert',
                                             AlertService(self))
                self.client.register_service('alertd.debug', DebugService())
                self.client.resume_service('alertd.management')
                self.client.resume_service('alertd.alert')
                self.client.resume_service('alertd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def emit_alert(self, alert):
        self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(
            alert['id'], alert['clazz']))
        for i in self.datastore.query(
                'alert.filters', ('or', [('clazz', '=', None),
                                         ('clazz', '=', alert['clazz'])])):
            for pr in i.get('predicates', []):
                if pr['operator'] not in operators_table:
                    continue

                try:
                    if not operators_table[pr['operator']](
                            alert.properties.get(pr['property']), pr['value']):
                        break
                except:
                    continue
            else:
                try:
                    emitter = self.emitters.get(i['emitter'])
                    if not emitter:
                        self.logger.warning(
                            'Invalid emitter {0} for alert filter {1}'.format(
                                i['emitter'], i['id']))
                        continue

                    self.logger.debug(
                        'Alert <id:{0}> matched filter {1}'.format(
                            alert['id'], i['id']))
                    if alert['send_count'] > 0:
                        if not alert['one_shot']:
                            emitter.emit_again(alert, i['parameters'])
                    else:
                        emitter.emit_first(alert, i['parameters'])
                except BaseException as err:
                    # Failed to emit alert using alert emitter
                    # XXX: generate another alert about that
                    self.logger.error(
                        'Cannot emit alert <id:{0}> using {1}: {2}'.format(
                            alert['id'], i['emitter'], str(err)))

        alert['send_count'] += 1
        alert['last_emitted_at'] = datetime.utcnow()
        self.datastore.update('alerts', alert['id'], alert)

    def cancel_alert(self, alert):
        self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(
            alert['id'], alert['clazz']))

        alert.update({'active': False, 'cancelled': datetime.utcnow()})

        self.datastore.update('alerts', alert['id'], alert)

    def register_emitter(self, name, cls):
        self.emitters[name] = cls(self)
        self.logger.info('Registered emitter {0} (class {1})'.format(
            name, cls))

    def reminder_thread(self):
        while True:
            time.sleep(REMINDER_SECONDS)
            for i in self.datastore.query('alerts'):
                if not i['active'] or i['dismissed']:
                    continue

                last_emission = i.get('last_emitted_at') or i['created_at']
                interval = REMINDER_SCHEDULE[i['severity']]

                if not interval:
                    continue

                if last_emission + timedelta(
                        seconds=interval) <= datetime.utcnow():
                    self.emit_alert(i)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        args = parser.parse_args()
        configure_logging('alertd', 'DEBUG')

        setproctitle('alertd')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.init_reminder()
        self.checkin()
        self.client.wait_forever()
Beispiel #18
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.rpc.register_service_instance('dscached.account', AccountService(self))
        self.rpc.register_service_instance('dscached.group', GroupService(self))
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.management', ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_enabled_directories(self):
        return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order())))

    def get_search_order(self):
        return ['local', 'system'] + self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name, self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories
            )

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories
            )

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.start(address)
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                directory.configure()
                self.directories.append(directory)
            except BaseException as err:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get('directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('/var/log/dscached.log', 'DEBUG')

        setproctitle.setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.init_directories()
        self.client.wait_forever()
Beispiel #19
0
class Context(object):
    def __init__(self):
        self.service = TaskProxyService(self)
        self.task = queue.Queue(1)
        self.datastore = None
        self.configstore = None
        self.conn = None
        self.instance = None
        self.running = Event()

    def put_status(self, state, result=None, exception=None):
        obj = {
            'status': state,
            'result': None
        }

        if result is not None:
            obj['result'] = result

        if exception is not None:
            obj['error'] = serialize_error(exception)

        self.conn.call_sync('task.put_status', obj)

    def task_progress_handler(self, args):
        if self.instance:
            self.instance.task_progress_handler(args)

    def collect_fds(self, obj):
        if isinstance(obj, dict):
            for v in obj.values():
                if isinstance(v, FileDescriptor):
                    yield v
                else:
                    yield from self.collect_fds(v)

        if isinstance(obj, (list, tuple)):
            for o in obj:
                if isinstance(o, FileDescriptor):
                    yield o
                else:
                    yield from self.collect_fds(o)

    def close_fds(self, fds):
        for i in fds:
            try:
                os.close(i.fd)
            except OSError:
                pass

    def main(self):
        if len(sys.argv) != 2:
            print("Invalid number of arguments", file=sys.stderr)
            sys.exit(errno.EINVAL)

        key = sys.argv[1]
        configure_logging(None, logging.DEBUG)

        self.datastore = get_datastore()
        self.configstore = ConfigStore(self.datastore)
        self.conn = Client()
        self.conn.connect('unix:')
        self.conn.login_service('task.{0}'.format(os.getpid()))
        self.conn.enable_server()
        self.conn.rpc.register_service_instance('taskproxy', self.service)
        self.conn.register_event_handler('task.progress', self.task_progress_handler)
        self.conn.call_sync('task.checkin', key)
        setproctitle.setproctitle('task executor (idle)')

        while True:
            try:
                task = self.task.get()
                logging.root.setLevel(self.conn.call_sync('management.get_logging_level'))
                setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))

                if task['debugger']:
                    sys.path.append('/usr/local/lib/dispatcher/pydev')

                    import pydevd
                    host, port = task['debugger']
                    pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)

                name, _ = os.path.splitext(os.path.basename(task['filename']))
                module = load_module_from_file(name, task['filename'])
                setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))

                fds = list(self.collect_fds(task['args']))

                try:
                    self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
                    self.instance.configstore = self.configstore
                    self.instance.user = task['user']
                    self.instance.environment = task['environment']
                    self.running.set()
                    result = self.instance.run(*task['args'])
                except BaseException as err:
                    print("Task exception: {0}".format(str(err)), file=sys.stderr)
                    traceback.print_exc(file=sys.stderr)

                    if hasattr(self.instance, 'rollback'):
                        self.put_status('ROLLBACK')
                        try:
                            self.instance.rollback(*task['args'])
                        except BaseException as rerr:
                            print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr)
                            traceback.print_exc(file=sys.stderr)

                    self.put_status('FAILED', exception=err)
                else:
                    self.put_status('FINISHED', result=result)
                finally:
                    self.close_fds(fds)
                    self.running.clear()

            except RpcException as err:
                print("RPC failed: {0}".format(str(err)), file=sys.stderr)
                print(traceback.format_exc(), flush=True)
                sys.exit(errno.EBADMSG)
            except socket.error as err:
                print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr)
                sys.exit(errno.ETIMEDOUT)

            if task['debugger']:
                import pydevd
                pydevd.stoptrace()

            setproctitle.setproctitle('task executor (idle)')
class Context(object):
    def __init__(self):
        self.service = TaskProxyService(self)
        self.task = queue.Queue(1)
        self.datastore = None
        self.configstore = None
        self.conn = None
        self.instance = None
        self.running = Event()

    def put_status(self, state, result=None, exception=None):
        obj = {
            'status': state,
            'result': None
        }

        if result is not None:
            obj['result'] = result

        if exception is not None:
            obj['error'] = serialize_error(exception)

        self.conn.call_sync('task.put_status', obj)

    def main(self):
        if len(sys.argv) != 2:
            print("Invalid number of arguments", file=sys.stderr)
            sys.exit(errno.EINVAL)

        key = sys.argv[1]
        logging.basicConfig(level=logging.DEBUG)

        self.datastore = get_datastore()
        self.configstore = ConfigStore(self.datastore)
        self.conn = Client()
        self.conn.connect('unix:')
        self.conn.login_service('task.{0}'.format(os.getpid()))
        self.conn.enable_server()
        self.conn.rpc.register_service_instance('taskproxy', self.service)
        self.conn.call_sync('task.checkin', key)
        setproctitle.setproctitle('task executor (idle)')

        while True:
            try:
                task = self.task.get()
                setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))

                if task['debugger']:
                    sys.path.append('/usr/local/lib/dispatcher/pydev')

                    import pydevd
                    host, port = task['debugger']
                    pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True)

                name, _ = os.path.splitext(os.path.basename(task['filename']))
                module = load_module_from_file(name, task['filename'])
                setproctitle.setproctitle('task executor (tid {0})'.format(task['id']))

                try:
                    self.instance = getattr(module, task['class'])(DispatcherWrapper(self.conn), self.datastore)
                    self.instance.configstore = self.configstore
                    self.instance.environment = task['environment']
                    self.running.set()
                    result = self.instance.run(*task['args'])
                except BaseException as err:
                    print("Task exception: {0}".format(str(err)), file=sys.stderr)
                    traceback.print_exc(file=sys.stderr)

                    if hasattr(self.instance, 'rollback'):
                        self.put_status('ROLLBACK')
                        try:
                            self.instance.rollback(*task['args'])
                        except BaseException as rerr:
                            print("Task exception during rollback: {0}".format(str(rerr)), file=sys.stderr)
                            traceback.print_exc(file=sys.stderr)

                    self.put_status('FAILED', exception=err)
                else:
                    self.put_status('FINISHED', result=result)
                finally:
                    self.running.clear()

            except RpcException as err:
                print("RPC failed: {0}".format(str(err)), file=sys.stderr)
                sys.exit(errno.EBADMSG)
            except socket.error as err:
                print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr)
                sys.exit(errno.ETIMEDOUT)

            if task['debugger']:
                import pydevd
                pydevd.stoptrace()

            setproctitle.setproctitle('task executor (idle)')
Beispiel #21
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(
                ', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(
                                    job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK
            | select.KQ_NOTE_TRACK, 0, 0)

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0,
                           0, 0)

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #22
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas',
                                collection='calendar_tasks',
                                client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store},
                                             timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management',
                                             ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync(
            'task.submit_with_env', args[0], args[1:], {
                'RUN_AS_USER': '******',
                'CALENDAR_TASK_NAME': kwargs.get('name')
            })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync(
                    'alert.emit', {
                        'name':
                        'scheduler.task.failed',
                        'severity':
                        'CRITICAL',
                        'description':
                        'Task {0} has failed: {1}'.format(
                            kwargs.get('name', tid),
                            result['error']['message']),
                    })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #23
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('neighbord')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.plugins = {}

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['neighbord']['plugin-dirs']

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls(self)
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_service(self, name, regtype, port, properties=None):
        for plugin in self.plugins.values():
            plugin.register(regtype, name, port, properties)

    def register(self):
        try:
            hostname = socket.gethostname()
            general = self.client.call_sync('system.general.get_config')
            properties = {
                'version': self.client.call_sync('system.info.version'),
                'description': general['description'],
                'tags': ','.join(general['tags'])
            }

            self.register_service(hostname, 'freenas', 80, properties)
            self.register_service(hostname, 'http', 80)
            self.register_service(hostname, 'ssh', 22)
            self.register_service(hostname, 'sftp-ssh', 22)
        except BaseException as err:
            self.logger.error('Failed to register services: {0}'.format(
                str(err)))

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('neighbord')
                self.client.enable_server()
                self.client.register_service('neighbord.management',
                                             ManagementService(self))
                self.client.register_service('neighbord.discovery',
                                             DiscoveryService(self))
                self.client.register_service('neighbord.debug', DebugService())
                self.client.resume_service('neighbord.management')
                self.client.resume_service('neighbord.discovery')
                self.client.resume_service('neighbord.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/neighbord.log', 'DEBUG')

        setproctitle.setproctitle('neighbord')
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.register()
        self.client.wait_forever()
Beispiel #24
0
class Context(object):
    def __init__(self):
        self.server = None
        self.client = None
        self.jobs = {}
        self.provides = set()
        self.lock = RLock()
        self.kq = select.kqueue()
        self.devnull = os.open('/dev/null', os.O_RDWR)
        self.logger = logging.getLogger('Context')
        self.rpc = RpcContext()
        self.rpc.register_service_instance('serviced.management', ManagementService(self))
        self.rpc.register_service_instance('serviced.job', JobService(self))

    def init_dispatcher(self):
        if self.client and self.client.connected:
            return

        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def provide(self, targets):
        def doit():
            self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets)))
            with self.lock:
                self.provides |= targets
                for job in list(self.jobs.values()):
                    if job.state == JobState.STOPPED and job.requires <= self.provides:
                        job.start()

        if targets:
            Timer(2, doit).start()

    def job_by_pid(self, pid):
        job = first_or_default(lambda j: j.pid == pid, self.jobs.values())
        return job

    def event_loop(self):
        while True:
            with contextlib.suppress(InterruptedError):
                for ev in self.kq.control(None, MAX_EVENTS):
                    self.logger.log(TRACE, 'New event: {0}'.format(ev))
                    if ev.filter == select.KQ_FILTER_PROC:
                        job = self.job_by_pid(ev.ident)
                        if job:
                            job.pid_event(ev)
                            continue

                        if ev.fflags & select.KQ_NOTE_CHILD:
                            if ev.fflags & select.KQ_NOTE_EXIT:
                                continue

                            pjob = self.job_by_pid(ev.data)
                            if not pjob:
                                self.untrack_pid(ev.ident)
                                continue

                            # Stop tracking at session ID boundary
                            try:
                                if pjob.pgid != os.getpgid(ev.ident):
                                    self.untrack_pid(ev.ident)
                                    continue
                            except ProcessLookupError:
                                continue

                            with self.lock:
                                job = Job(self)
                                job.load_anonymous(pjob, ev.ident)
                                self.jobs[job.id] = job
                                self.logger.info('Added job {0}'.format(job.label))

    def track_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_ADD | select.KQ_EV_ENABLE,
            select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK,
            0, 0
        )

        self.kq.control([ev], 0)

    def untrack_pid(self, pid):
        ev = select.kevent(
            pid,
            select.KQ_FILTER_PROC,
            select.KQ_EV_DELETE,
            0, 0, 0
        )

        with contextlib.suppress(FileNotFoundError):
            self.kq.control([ev], 0)

    def emit_event(self, name, args):
        self.server.broadcast_event(name, args)
        if self.client and self.client.connected:
            self.client.emit_event(name, args)

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('serviced')
                self.client.enable_server(self.rpc)
                self.client.resume_service('serviced.job')
                self.client.resume_service('serviced.management')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def bootstrap(self):
        def doit():
            with self.lock:
                job = Job(self)
                job.load({
                    'Label': 'org.freenas.serviced.bootstrap',
                    'ProgramArguments': BOOTSTRAP_JOB,
                    'OneShot': True,
                    'RunAtLoad': True,
                })

                self.jobs[job.id] = job

        Thread(target=doit).start()

    def shutdown(self):
        self.client.disconnect()
        self.server.close()
        sys.exit(0)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on')
        args = parser.parse_args()

        configure_logging('/var/log/serviced.log', 'DEBUG', file=True)
        bsd.setproctitle('serviced')
        self.logger.info('Started')
        self.init_server(args.s)
        self.bootstrap()
        self.event_loop()
Beispiel #25
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client)
        self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc)
        self.scheduler.start()

    def register_schemas(self):
        self.client.register_schema('calendar-task', {
            'type': 'object',
            'additionalProperties': False,
            'properties': {
                'id': {'type': 'string'},
                'name': {'type': 'string'},
                'args': {'type': 'array'},
                'description': {'type': 'string'},
                'enabled': {'type': 'boolean'},
                'hidden': {'type': 'boolean'},
                'protected': {'type': 'boolean'},
                'status': {'$ref': 'calendar-task-status'},
                'schedule': {
                    'type': 'object',
                    'additionalProperties': False,
                    'properties': {
                        'coalesce': {'type': ['boolean', 'integer', 'null']},
                        'year': {'type': ['string', 'integer', 'null']},
                        'month': {'type': ['string', 'integer', 'null']},
                        'day': {'type': ['string', 'integer', 'null']},
                        'week': {'type': ['string', 'integer', 'null']},
                        'day_of_week': {'type': ['string', 'integer', 'null']},
                        'hour': {'type': ['string', 'integer', 'null']},
                        'minute': {'type': ['string', 'integer', 'null']},
                        'second': {'type': ['string', 'integer', 'null']},
                        'timezone': {'type': ['string', 'null']}
                    }
                }
            }
        })

        self.client.register_schema('calendar-task-status', {
            'type': 'object',
            'properties': {
                'next_run_time': {'type': 'string'},
                'last_run_status': {'type': 'string'},
                'current_run_status': {'type': ['string', 'null']},
                'current_run_progress': {'type': ['object', 'null']}
            }
        })

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.submit_task(*args)
        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alerts.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle.setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.register_schemas()
        self.client.wait_forever()
Beispiel #26
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('dscached')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.rpc = RpcContext()
        self.rpc.streaming_enabled = True
        self.rpc.streaming_burst = 16
        self.client = None
        self.server = None
        self.plugin_dirs = []
        self.plugins = {}
        self.directories = []
        self.users_cache = TTLCacheStore()
        self.groups_cache = TTLCacheStore()
        self.hosts_cache = TTLCacheStore()
        self.cache_ttl = 7200
        self.search_order = []
        self.cache_enumerations = True
        self.cache_lookups = True
        self.home_directory_root = None
        self.account_service = AccountService(self)
        self.group_service = GroupService(self)
        self.rpc.register_service_instance('dscached.account',
                                           self.account_service)
        self.rpc.register_service_instance('dscached.group',
                                           self.group_service)
        self.rpc.register_service_instance('dscached.host', HostService(self))
        self.rpc.register_service_instance('dscached.idmap',
                                           IdmapService(self))
        self.rpc.register_service_instance('dscached.management',
                                           ManagementService(self))
        self.rpc.register_service_instance('dscached.debug', DebugService())

    def get_active_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   self.directories))

    def get_searched_directories(self):
        return list(
            filter(lambda d: d and d.state == DirectoryState.BOUND,
                   (self.get_directory_by_name(n)
                    for n in self.get_search_order())))

    def get_search_order(self):
        return self.search_order

    def get_directory_by_domain(self, domain_name):
        return first_or_default(lambda d: d.domain_name == domain_name,
                                self.directories)

    def get_directory_by_name(self, name):
        return first_or_default(lambda d: d.name == name, self.directories)

    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories)

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories)

    def get_home_directory(self, directory, username):
        if not self.home_directory_root:
            return '/nonexistent'

        return os.path.join(self.home_directory_root,
                            f'{username}@{directory.domain_name}')

    def wait_for_etcd(self):
        self.client.test_or_wait_for_event(
            'plugin.service_resume',
            lambda args: args['name'] == 'etcd.generation',
            lambda: 'etcd.generation' in self.client.call_sync(
                'discovery.get_services'))

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_server(self, address):
        self.server = Server(self)
        self.server.rpc = self.rpc
        self.server.streaming = True
        self.server.start(address, transport_options={'permissions': 0o777})
        thread = Thread(target=self.server.serve_forever)
        thread.name = 'ServerThread'
        thread.daemon = True
        thread.start()

    def parse_config(self, filename):
        try:
            with open(filename, 'r') as f:
                self.config = json.load(f)
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['dscached']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('dscached')
                self.client.enable_server(self.rpc)
                self.client.resume_service('dscached.account')
                self.client.resume_service('dscached.group')
                self.client.resume_service('dscached.host')
                self.client.resume_service('dscached.idmap')
                self.client.resume_service('dscached.management')
                self.client.resume_service('dscached.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = load_module_from_file(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f),
                                  exc_info=True)

    def register_plugin(self, name, cls):
        self.plugins[name] = cls
        self.logger.info('Registered plugin {0} (class {1})'.format(name, cls))

    def register_schema(self, name, schema):
        self.client.register_schema(name, schema)

    def register_schemas(self):
        from freenas.dispatcher.model import context
        for name, schema in (s.__named_json_schema__()
                             for s in context.local_json_schema_objects):
            self.logger.debug(f'Registering schema: {name}')
            self.client.register_schema(name, schema)

    def init_directories(self):
        for i in self.datastore.query('directories'):
            try:
                directory = Directory(self, i)
                self.directories.append(directory)
                directory.configure()
            except:
                continue

    def load_config(self):
        self.search_order = self.configstore.get('directory.search_order')
        self.cache_ttl = self.configstore.get('directory.cache_ttl')
        self.cache_enumerations = self.configstore.get(
            'directory.cache_enumerations')
        self.cache_lookups = self.configstore.get('directory.cache_lookups')
        self.home_directory_root = self.configstore.get(
            'system.home_directory_root')

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-s',
                            metavar='SOCKET',
                            default=DEFAULT_SOCKET_ADDRESS,
                            help='Socket address to listen on')
        args = parser.parse_args()
        configure_logging('dscached', 'DEBUG')

        setproctitle('dscached')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.load_config()
        self.init_server(args.s)
        self.scan_plugins()
        self.register_schemas()
        self.wait_for_etcd()
        self.init_directories()
        self.checkin()
        self.client.wait_forever()
Beispiel #27
0
class RESTApi(object):

    def __init__(self):
        self.logger = logging.getLogger('restd')
        self._cruds = []
        self._threads = []
        self._rpcs = {}
        self._schemas = {}
        self._used_schemas = set()
        self._services = {}
        self._tasks = {}
        self.api = falcon.API(middleware=[
            AuthMiddleware(),
            JSONTranslator(),
        ])
        self.api.add_route('/', SwaggerResource(self))

        gevent.signal(signal.SIGINT, self.die)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.dispatcher = Client()
        self.dispatcher.on_error(on_error)
        self.connect()

    def init_metadata(self):
        self._tasks = self.dispatcher.call_sync('discovery.get_tasks')
        self._schemas = self.dispatcher.call_sync('discovery.get_schema')
        for service in self.dispatcher.call_sync('discovery.get_services'):
            self._services[service] = self.dispatcher.call_sync('discovery.get_methods', service)
            for method in self._services[service]:
                self._rpcs['{0}.{1}'.format(service, method['name'])] = method

    def load_plugins(self):
        pluginsdir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'plugins'))
        for i in glob.glob1(pluginsdir, "*.py"):
            try:
                loader = importlib.machinery.SourceFileLoader(i.split('.')[0], os.path.join(pluginsdir, i))
                mod = loader.load_module()
            except:
                self.logger.error('Failed to load plugin %s', i, exc_info=True)
                raise
            mod._init(self)

    def connect(self):
        while True:
            try:
                self.dispatcher.connect('unix:')
                self.dispatcher.login_service('restd')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def __call__(self, environ, start_response):
        if 'HTTP_X_REAL_IP' in environ:
            environ['PATH_INFO'] = environ.get('PATH_INFO', '').replace('/api/v2.0', '', 1)
        return self.api.__call__(environ, start_response)

    def register_crud(self, klass):
        ins = klass(self, self.dispatcher)
        self._cruds.append(ins)

    def register_singleitem(self, klass):
        klass(self, self.dispatcher)

    def register_resource(self, klass):
        klass(self)

    def run(self):
        self.init_dispatcher()
        self.init_metadata()
        self.load_plugins()

        server4 = WSGIServer(('', 8889), self, handler_class=RESTWSGIHandler)
        self._threads = [gevent.spawn(server4.serve_forever)]
        checkin()
        gevent.joinall(self._threads)

    def die(self, *args):
        gevent.killall(self._threads)
        sys.exit(0)
Beispiel #28
0
class Context(object):
    def __init__(self):
        self.logger = logging.getLogger('schedulerd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.scheduler = None
        self.active_tasks = {}

    def init_datastore(self):
        try:
            self.datastore = get_datastore(self.config)
        except DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_scheduler(self):
        store = FreeNASJobStore()
        self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc)
        self.scheduler.start()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('schedulerd')
                self.client.enable_server()
                self.client.register_service('scheduler.management', ManagementService(self))
                self.client.register_service('scheduler.debug', DebugService())
                self.client.resume_service('scheduler.management')
                self.client.resume_service('scheduler.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def run_job(self, *args, **kwargs):
        tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], {
            'RUN_AS_USER': '******',
            'CALENDAR_TASK_NAME': kwargs.get('name')
        })

        self.active_tasks[kwargs['id']] = tid
        self.client.call_sync('task.wait', tid, timeout=None)
        result = self.client.call_sync('task.status', tid)
        if result['state'] != 'FINISHED':
            try:
                self.client.call_sync('alert.emit', {
                    'name': 'scheduler.task.failed',
                    'severity': 'CRITICAL',
                    'description': 'Task {0} has failed: {1}'.format(
                        kwargs.get('name', tid),
                        result['error']['message']
                    ),
                })
            except RpcException as e:
                self.logger.error('Failed to emit alert', exc_info=True)

        del self.active_tasks[kwargs['id']]
        self.datastore.insert('schedulerd.runs', {
            'job_id': kwargs['id'],
            'task_id': result['id']
        })

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-f', action='store_true', default=False, help='Run in foreground')
        args = parser.parse_args()
        configure_logging('/var/log/schedulerd.log', 'DEBUG')
        setproctitle('schedulerd')
        self.config = args.c
        self.init_datastore()
        self.init_scheduler()
        self.init_dispatcher()
        self.checkin()
        self.client.wait_forever()
Beispiel #29
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('clid')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.config = None
        self.logger = logging.getLogger()
        self.plugin_dirs = []
        self.ml = None
        self.context = None

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_cli(self):
        self.logger.info('Initializing CLI instance')
        self.context = Context()
        self.context.connection = self.client
        self.context.plugin_dirs = PLUGIN_DIRS
        self.context.discover_plugins()
        self.context.start_entity_subscribers()
        self.context.login_plugins()
        self.ml = MainLoop(self.context)
        self.logger.info('CLI instance ready')

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('clid')
                self.client.enable_server()
                self.client.call_sync('management.enable_features', ['streaming_responses'])
                self.client.register_service('clid.management', ManagementService(self))
                self.client.register_service('clid.eval', EvalService(self))
                self.client.register_service('clid.debug', DebugService())
                self.client.resume_service('clid.management')
                self.client.resume_service('clid.eval')
                self.client.resume_service('clid.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        self.config = args.c
        configure_logging('/var/log/clid.log', 'DEBUG')

        setproctitle('clid')
        self.init_dispatcher()
        self.init_cli()
        self.client.wait_forever()
Beispiel #30
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('etcd')
        self.root = None
        self.configfile = None
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.renderers = {}
        self.managed_files = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore(self.configfile)
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('etcd')
                self.client.enable_server()
                self.client.register_service('etcd.generation',
                                             FileGenerationService(self))
                self.client.register_service('etcd.management',
                                             ManagementService(self))
                self.client.register_service('etcd.debug', DebugService())
                self.client.resume_service('etcd.generation')
                self.client.resume_service('etcd.management')
                self.client.resume_service('etcd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning(
                    'Cannot connect to dispatcher: {0}, retrying in 1 second'.
                    format(str(err)))
                time.sleep(1)

    def init_renderers(self):
        for name, impl in TEMPLATE_RENDERERS.items():
            self.renderers[name] = impl(self)

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error(
                'Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['etcd']['plugin-dirs']

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for root, dirs, files in os.walk(dir):
            for name in files:
                abspath = os.path.join(root, name)
                path = os.path.relpath(abspath, dir)
                name, ext = os.path.splitext(path)

                if name in self.managed_files.keys():
                    continue

                if ext in TEMPLATE_RENDERERS.keys():
                    self.managed_files[name] = abspath
                    self.logger.info('Adding managed file %s [%s]', name, ext)

    def generate_file(self, file_path):
        if file_path not in self.managed_files.keys():
            raise RpcException(errno.ENOENT, 'No such file')

        template_path = self.managed_files[file_path]
        name, ext = os.path.splitext(template_path)
        if ext not in self.renderers.keys():
            raise RuntimeError("Can't find renderer for {0}".format(file_path))

        renderer = self.renderers[ext]
        try:
            return renderer.render_template(template_path)
        except Exception as e:
            self.logger.warn('Cannot generate file {0}: {1}'.format(
                file_path, str(e)))
            return "# FILE GENERATION FAILED: {0}\n".format(str(e))

    def emit_event(self, name, params):
        self.client.emit_event(name, params)

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            metavar='CONFIG',
                            default=DEFAULT_CONFIGFILE,
                            help='Middleware config file')
        parser.add_argument('-f',
                            action='store_true',
                            default=False,
                            help='Run in foreground')
        parser.add_argument('mountpoint',
                            metavar='MOUNTPOINT',
                            default='/etc',
                            help='/etc mount point')
        args = parser.parse_args()
        configure_logging('/var/log/etcd.log', 'DEBUG')

        setproctitle.setproctitle('etcd')
        self.root = args.mountpoint
        self.configfile = args.c
        self.parse_config(args.c)
        self.scan_plugins()
        self.init_renderers()
        self.init_datastore()
        self.init_dispatcher()
        self.client.wait_forever()
Beispiel #31
0
class Context(object):
    def __init__(self):
        self.service = TaskProxyService(self)
        self.task = queue.Queue(1)
        self.datastore = None
        self.configstore = None
        self.conn = None
        self.instance = None
        self.module_cache = {}
        self.running = Event()

    def put_status(self, state, result=None, exception=None):
        obj = {'status': state, 'result': None}

        if result is not None:
            obj['result'] = result

        if exception is not None:
            obj['error'] = serialize_error(exception)

        self.conn.call_sync('task.put_status', obj)

    def task_progress_handler(self, args):
        if self.instance:
            self.instance.task_progress_handler(args)

    def collect_fds(self, obj):
        if isinstance(obj, dict):
            for v in obj.values():
                if isinstance(v, FileDescriptor):
                    yield v
                else:
                    yield from self.collect_fds(v)

        if isinstance(obj, (list, tuple)):
            for o in obj:
                if isinstance(o, FileDescriptor):
                    yield o
                else:
                    yield from self.collect_fds(o)

    def close_fds(self, fds):
        for i in fds:
            try:
                os.close(i.fd)
            except OSError:
                pass

    def run_task_hooks(self, instance, task, type, **extra_env):
        for hook, props in task['hooks'].get(type, {}).items():
            try:
                if props['condition'] and not props['condition'](
                        *task['args']):
                    continue
            except BaseException as err:
                print(err)
                continue

            instance.join_subtasks(
                instance.run_subtask(hook, *task['args'], **extra_env))

    def main(self):
        if len(sys.argv) != 2:
            print("Invalid number of arguments", file=sys.stderr)
            sys.exit(errno.EINVAL)

        key = sys.argv[1]
        configure_logging(None, logging.DEBUG)

        self.datastore = get_datastore()
        self.configstore = ConfigStore(self.datastore)
        self.conn = Client()
        self.conn.connect('unix:')
        self.conn.login_service('task.{0}'.format(os.getpid()))
        self.conn.enable_server()
        self.conn.call_sync('management.enable_features',
                            ['streaming_responses'])
        self.conn.rpc.register_service_instance('taskproxy', self.service)
        self.conn.register_event_handler('task.progress',
                                         self.task_progress_handler)
        self.conn.call_sync('task.checkin', key)
        setproctitle.setproctitle('task executor (idle)')

        while True:
            try:
                task = self.task.get()
                logging.root.setLevel(
                    self.conn.call_sync('management.get_logging_level'))
                setproctitle.setproctitle('task executor (tid {0})'.format(
                    task['id']))

                if task['debugger']:
                    sys.path.append('/usr/local/lib/dispatcher/pydev')

                    import pydevd
                    host, port = task['debugger']
                    pydevd.settrace(host,
                                    port=port,
                                    stdoutToServer=True,
                                    stderrToServer=True)

                name, _ = os.path.splitext(os.path.basename(task['filename']))
                module = self.module_cache.get(task['filename'])
                if not module:
                    module = load_module_from_file(name, task['filename'])
                    self.module_cache[task['filename']] = module

                setproctitle.setproctitle('task executor (tid {0})'.format(
                    task['id']))
                fds = list(self.collect_fds(task['args']))

                try:
                    dispatcher = DispatcherWrapper(self.conn)
                    self.instance = getattr(module,
                                            task['class'])(dispatcher,
                                                           self.datastore)
                    self.instance.configstore = self.configstore
                    self.instance.user = task['user']
                    self.instance.environment = task['environment']
                    self.running.set()
                    self.run_task_hooks(self.instance, task, 'before')
                    result = self.instance.run(*task['args'])
                    self.run_task_hooks(self.instance,
                                        task,
                                        'after',
                                        result=result)
                except BaseException as err:
                    print("Task exception: {0}".format(str(err)),
                          file=sys.stderr)
                    traceback.print_exc(file=sys.stderr)

                    if hasattr(self.instance, 'rollback'):
                        self.put_status('ROLLBACK')
                        try:
                            self.instance.rollback(*task['args'])
                        except BaseException as rerr:
                            print("Task exception during rollback: {0}".format(
                                str(rerr)),
                                  file=sys.stderr)
                            traceback.print_exc(file=sys.stderr)

                    # Main task is already failed at this point, so ignore hook errors
                    with contextlib.suppress(RpcException):
                        self.run_task_hooks(self.instance,
                                            task,
                                            'error',
                                            error=serialize_error(err))

                    self.put_status('FAILED', exception=err)
                else:
                    self.put_status('FINISHED', result=result)
                finally:
                    self.close_fds(fds)
                    self.running.clear()

            except RpcException as err:
                print("RPC failed: {0}".format(str(err)), file=sys.stderr)
                print(traceback.format_exc(), flush=True)
                sys.exit(errno.EBADMSG)
            except socket.error as err:
                print("Cannot connect to dispatcher: {0}".format(str(err)),
                      file=sys.stderr)
                sys.exit(errno.ETIMEDOUT)

            if task['debugger']:
                import pydevd
                pydevd.stoptrace()

            setproctitle.setproctitle('task executor (idle)')
Beispiel #32
0
class Main(object):
    def __init__(self):
        self.logger = logging.getLogger('alertd')
        self.config = None
        self.datastore = None
        self.configstore = None
        self.client = None
        self.plugin_dirs = []
        self.emitters = {}

    def init_datastore(self):
        try:
            self.datastore = datastore.get_datastore()
        except datastore.DatastoreException as err:
            self.logger.error('Cannot initialize datastore: %s', str(err))
            sys.exit(1)

        self.configstore = ConfigStore(self.datastore)

    def init_dispatcher(self):
        def on_error(reason, **kwargs):
            if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT):
                self.logger.warning('Connection to dispatcher lost')
                self.connect()

        self.client = Client()
        self.client.on_error(on_error)
        self.connect()

    def init_reminder(self):
        t = threading.Thread(target=self.reminder_thread)
        t.daemon = True
        t.start()

    def parse_config(self, filename):
        try:
            f = open(filename, 'r')
            self.config = json.load(f)
            f.close()
        except IOError as err:
            self.logger.error('Cannot read config file: %s', err.message)
            sys.exit(1)
        except ValueError:
            self.logger.error('Config file has unreadable format (not valid JSON)')
            sys.exit(1)

        self.plugin_dirs = self.config['alertd']['plugin-dirs']

    def connect(self):
        while True:
            try:
                self.client.connect('unix:')
                self.client.login_service('alertd')
                self.client.enable_server()
                self.client.register_service('alertd.management', ManagementService(self))
                self.client.register_service('alertd.alert', AlertService(self))
                self.client.register_service('alertd.debug', DebugService())
                self.client.resume_service('alertd.management')
                self.client.resume_service('alertd.alert')
                self.client.resume_service('alertd.debug')
                return
            except (OSError, RpcException) as err:
                self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err)))
                time.sleep(1)

    def scan_plugins(self):
        for i in self.plugin_dirs:
            self.scan_plugin_dir(i)

    def scan_plugin_dir(self, dir):
        self.logger.debug('Scanning plugin directory %s', dir)
        for f in os.listdir(dir):
            name, ext = os.path.splitext(os.path.basename(f))
            if ext != '.py':
                continue

            try:
                plugin = imp.load_source(name, os.path.join(dir, f))
                plugin._init(self)
            except:
                self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True)

    def emit_alert(self, alert):
        self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class']))
        for i in self.datastore.query('alert.filters'):
            for predicate in i.get('predicates', []):
                if predicate['operator'] not in operators_table:
                    continue

                if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']):
                    break
            else:
                try:
                    emitter = self.emitters.get(i['emitter'])
                    if not emitter:
                        self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id']))
                        continue

                    self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id']))
                    if alert['send_count'] > 0:
                        emitter.emit_again(alert, i['parameters'])
                    else:
                        emitter.emit_first(alert, i['parameters'])
                except BaseException as err:
                    # Failed to emit alert using alert emitter
                    # XXX: generate another alert about that
                    self.logger.error('Cannot emit alert <id:{0}> using {1}: {2}'.format(
                        alert['id'],
                        i['emitter'],
                        str(err))
                    )

        alert['send_count'] += 1
        alert['last_emitted_at'] = datetime.utcnow()

        if alert['one_shot']:
            alert['active'] = False

        self.datastore.update('alerts', alert['id'], alert)

    def cancel_alert(self, alert):
        self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(alert['id'], alert['class']))

        alert.update({
            'active': False,
            'cancelled': datetime.utcnow()
        })

        self.datastore.update('alerts', alert['id'], alert)

    def register_emitter(self, name, cls):
        self.emitters[name] = cls(self)
        self.logger.info('Registered emitter {0} (class {1})'.format(name, cls))

    def reminder_thread(self):
        while True:
            time.sleep(REMINDER_SECONDS)
            for i in self.datastore.query('alerts'):
                if not i['active'] or i['dismissed']:
                    continue

                last_emission = i.get('last_emitted_at') or i['created_at']
                interval = REMINDER_SCHEDULE[i['severity']]

                if not interval:
                    continue

                if last_emission + timedelta(seconds=interval) <= datetime.utcnow():
                    self.emit_alert(i)

    def checkin(self):
        checkin()

    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        args = parser.parse_args()
        configure_logging('/var/log/alertd.log', 'DEBUG')

        setproctitle('alertd')
        self.config = args.c
        self.parse_config(self.config)
        self.init_datastore()
        self.init_dispatcher()
        self.scan_plugins()
        self.init_reminder()
        self.checkin()
        self.client.wait_forever()