Esempio n. 1
0
 def setUp(self):
     super(_TestPathDispatcherBase, self).setUp()
     self.dispatcher = PathDispatcher()
     self.handlers = {
         "delete": {},
         "set": {},
     }
     self.register("/")
     self.register("/a")
     self.register("/a/<b>")
     self.register("/a/<b>/c")
     self.register("/a/<b>/d")
     self.register("/a/<b>/d/<e>")
Esempio n. 2
0
 def setUp(self):
     super(_TestPathDispatcherBase, self).setUp()
     self.dispatcher = PathDispatcher()
     self.handlers = {"delete": {}, "set": {}}
     self.register("/")
     self.register("/a")
     self.register("/a/<b>")
     self.register("/a/<b>/c")
     self.register("/a/<b>/d")
     self.register("/a/<b>/d/<e>")
Esempio n. 3
0
 def __init__(self, config, etcd_api, status_reporter, hosts_ipset):
     super(_FelixEtcdWatcher, self).__init__()
     self._config = config
     self._etcd_api = etcd_api
     self._status_reporter = status_reporter
     self.hosts_ipset = hosts_ipset
     # Whether we've been in sync with etcd at some point.
     self._been_in_sync = False
     # Keep track of the config loaded from etcd so we can spot if it
     # changes.
     self.last_global_config = None
     self.last_host_config = None
     self.my_config_dir = dir_for_per_host_config(self._config.HOSTNAME)
     # Events triggered by the EtcdAPI Actor to tell us to load the config
     # and start polling.  These are one-way flags.
     self.load_config = Event()
     self.begin_polling = Event()
     # Event that we trigger once the config is loaded.
     self.configured = Event()
     # Polling state initialized at poll start time.
     self.splitter = None
     # Next-hop IP addresses of our hosts, if populated in etcd.
     self.ipv4_by_hostname = {}
     # Forces a resync after the current poll if set.  Safe to set from
     # another thread.  Automatically reset to False after the resync is
     # triggered.
     self.resync_requested = False
     self.dispatcher = PathDispatcher()
     # The Popen object for the driver.
     self._driver_process = None
     # Stats.
     self.read_count = 0
     self.msgs_processed = 0
     self.last_rate_log_time = monotonic_time()
     # Register for events when values change.
     self._register_paths()
     self._usage_report_greenlet = gevent.Greenlet(
         self._periodically_usage_report)
Esempio n. 4
0
    def __init__(self, config):
        super(EtcdWatcher, self).__init__()
        self.config = config
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        reg(VERSION_DIR, on_del=self._resync)
        reg(POLICY_DIR, on_del=self._resync)
        reg(PROFILE_DIR, on_del=self._resync)
        reg(CONFIG_DIR, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(HOST_DIR, on_del=self._resync)
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)
Esempio n. 5
0
 def __init__(self, config, etcd_api, status_reporter, hosts_ipset):
     super(_FelixEtcdWatcher, self).__init__()
     self._config = config
     self._etcd_api = etcd_api
     self._status_reporter = status_reporter
     self.hosts_ipset = hosts_ipset
     # Whether we've been in sync with etcd at some point.
     self._been_in_sync = False
     # Keep track of the config loaded from etcd so we can spot if it
     # changes.
     self.last_global_config = None
     self.last_host_config = None
     self.my_config_dir = dir_for_per_host_config(self._config.HOSTNAME)
     # Events triggered by the EtcdAPI Actor to tell us to load the config
     # and start polling.  These are one-way flags.
     self.load_config = Event()
     self.begin_polling = Event()
     # Event that we trigger once the config is loaded.
     self.configured = Event()
     # Polling state initialized at poll start time.
     self.splitter = None
     # Next-hop IP addresses of our hosts, if populated in etcd.
     self.ipv4_by_hostname = {}
     # Forces a resync after the current poll if set.  Safe to set from
     # another thread.  Automatically reset to False after the resync is
     # triggered.
     self.resync_requested = False
     self.dispatcher = PathDispatcher()
     # The Popen object for the driver.
     self._driver_process = None
     # Stats.
     self.read_count = 0
     self.msgs_processed = 0
     self.last_rate_log_time = monotonic_time()
     # Register for events when values change.
     self._register_paths()
class _TestPathDispatcherBase(BaseTestCase):
    """
    Abstract base class for Dispatcher tests.
    """
    # Etcd action that this class tests.
    action = None
    # Expected handler type, "set" or "delete".
    expected_handlers = None

    def setUp(self):
        super(_TestPathDispatcherBase, self).setUp()
        self.dispatcher = PathDispatcher()
        self.handlers = {
            "delete": {},
            "set": {},
        }
        self.register("/")
        self.register("/a")
        self.register("/a/<b>")
        self.register("/a/<b>/c")
        self.register("/a/<b>/d")
        self.register("/a/<b>/d/<e>")

    def register(self, key):
        m_on_set = Mock()
        m_on_del = Mock()
        self.dispatcher.register(key, on_set=m_on_set, on_del=m_on_del)
        self.handlers["set"][key.strip("/")] = m_on_set
        self.handlers["delete"][key.strip("/")] = m_on_del

    def assert_handled(self, key, exp_handler=SAME_AS_KEY, **exp_captures):
        if exp_handler is SAME_AS_KEY:
            exp_handler = key
        if isinstance(exp_handler, types.StringTypes):
            exp_handler = exp_handler.strip("/")
        m_response = Mock(spec=etcd.EtcdResult)
        m_response.key = key
        m_response.action = self.action
        self.dispatcher.handle_event(m_response)
        exp_handlers = self.handlers[self.expected_handlers]
        for handler_key, handler in exp_handlers.iteritems():
            assert isinstance(handler, Mock)
            if handler_key == exp_handler:
                continue
            self.assertFalse(handler.called,
                             "Unexpected set handler %s was called for "
                             "key %s" % (handler_key, key))
        unexp_handlers = self.handlers[self.unexpected_handlers]
        for handler_key, handler in unexp_handlers.iteritems():
            assert isinstance(handler, Mock)
            self.assertFalse(handler.called,
                             "Unexpected del handler %s was called for "
                             "key %s" % (handler_key, key))
        if exp_handler is not None:
            exp_handlers[exp_handler].assert_called_once_with(
                m_response, **exp_captures)

    @property
    def unexpected_handlers(self):
        if self.expected_handlers == "set":
            return "delete"
        else:
            return "set"

    def test_dispatch_root(self):
        self.assert_handled("/")

    def test_dispatch_no_captures(self):
        self.assert_handled("/a")

    def test_dispatch_capture(self):
        self.assert_handled("/a/bval", exp_handler="/a/<b>", b="bval")

    def test_dispatch_after_capture(self):
        self.assert_handled("/a/bval/c", exp_handler="/a/<b>/c", b="bval")

    def test_dispatch_after_capture_2(self):
        self.assert_handled("/a/bval/d", exp_handler="/a/<b>/d", b="bval")

    def test_multi_capture(self):
        self.assert_handled("/a/bval/d/eval",
                            exp_handler="/a/<b>/d/<e>",
                            b="bval", e="eval")

    def test_non_match(self):
        self.assert_handled("/a/bval/c/eval", exp_handler=None)
        self.assert_handled("/foo", exp_handler=None)

    def test_cover_no_match(self):
        m_result = Mock(spec=etcd.EtcdResult)
        m_result.key = "/a"
        m_result.action = "unknown"
        self.dispatcher.handle_event(m_result)
        for handlers in self.handlers.itervalues():
            for key, handler in handlers.iteritems():
                self.assertFalse(handler.called,
                                 msg="Unexpected handler called: %s" % key)
Esempio n. 7
0
class _TestPathDispatcherBase(BaseTestCase):
    """
    Abstract base class for Dispatcher tests.
    """
    # Etcd action that this class tests.
    action = None
    # Expected handler type, "set" or "delete".
    expected_handlers = None

    def setUp(self):
        super(_TestPathDispatcherBase, self).setUp()
        self.dispatcher = PathDispatcher()
        self.handlers = {
            "delete": {},
            "set": {},
        }
        self.register("/")
        self.register("/a")
        self.register("/a/<b>")
        self.register("/a/<b>/c")
        self.register("/a/<b>/d")
        self.register("/a/<b>/d/<e>")

    def register(self, key):
        m_on_set = Mock()
        m_on_del = Mock()
        self.dispatcher.register(key, on_set=m_on_set, on_del=m_on_del)
        self.handlers["set"][key.strip("/")] = m_on_set
        self.handlers["delete"][key.strip("/")] = m_on_del

    def assert_handled(self, key, exp_handler=SAME_AS_KEY, **exp_captures):
        if exp_handler is SAME_AS_KEY:
            exp_handler = key
        if isinstance(exp_handler, types.StringTypes):
            exp_handler = exp_handler.strip("/")
        m_response = Mock(spec=etcd.EtcdResult)
        m_response.key = key
        m_response.action = self.action
        self.dispatcher.handle_event(m_response)
        exp_handlers = self.handlers[self.expected_handlers]
        for handler_key, handler in exp_handlers.iteritems():
            assert isinstance(handler, Mock)
            if handler_key == exp_handler:
                continue
            self.assertFalse(
                handler.called, "Unexpected set handler %s was called for "
                "key %s" % (handler_key, key))
        unexp_handlers = self.handlers[self.unexpected_handlers]
        for handler_key, handler in unexp_handlers.iteritems():
            assert isinstance(handler, Mock)
            self.assertFalse(
                handler.called, "Unexpected del handler %s was called for "
                "key %s" % (handler_key, key))
        if exp_handler is not None:
            exp_handlers[exp_handler].assert_called_once_with(
                m_response, **exp_captures)

    @property
    def unexpected_handlers(self):
        if self.expected_handlers == "set":
            return "delete"
        else:
            return "set"

    def test_dispatch_root(self):
        self.assert_handled("/")

    def test_dispatch_no_captures(self):
        self.assert_handled("/a")

    def test_dispatch_capture(self):
        self.assert_handled("/a/bval", exp_handler="/a/<b>", b="bval")

    def test_dispatch_after_capture(self):
        self.assert_handled("/a/bval/c", exp_handler="/a/<b>/c", b="bval")

    def test_dispatch_after_capture_2(self):
        self.assert_handled("/a/bval/d", exp_handler="/a/<b>/d", b="bval")

    def test_multi_capture(self):
        self.assert_handled("/a/bval/d/eval",
                            exp_handler="/a/<b>/d/<e>",
                            b="bval",
                            e="eval")

    def test_non_match(self):
        self.assert_handled("/a/bval/c/eval", exp_handler=None)
        self.assert_handled("/foo", exp_handler=None)

    def test_cover_no_match(self):
        m_result = Mock(spec=etcd.EtcdResult)
        m_result.key = "/a"
        m_result.action = "unknown"
        self.dispatcher.handle_event(m_result)
        for handlers in self.handlers.itervalues():
            for key, handler in handlers.iteritems():
                self.assertFalse(handler.called,
                                 msg="Unexpected handler called: %s" % key)
Esempio n. 8
0
    def __init__(self, config, hosts_ipset):
        super(_EtcdWatcher, self).__init__()
        self.config = config
        self.hosts_ipset = hosts_ipset

        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()

        # Flag used to trigger a resync.  this is modified from other
        # greenlets, which is safe in Python.
        self.resync_after_current_poll = False

        # Event that we trigger once the config is loaded.
        self.configured = Event()

        # Etcd client, initialised lazily.
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Polling state initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        for key in RESYNC_KEYS:
            reg(key, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set,
            on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)
Esempio n. 9
0
class _EtcdWatcher(gevent.Greenlet):
    """
    Greenlet that watches the etcd data model for changes.

    (1) Waits for the load_config event to be triggered.
    (2) Connects to etcd and waits for the Ready flag to be set,
        indicating the data model is consistent.
    (3) Loads the config from etcd and passes it to the config object.
    (4) Waits for the begin_polling Event to be triggered.
    (5) Loads a complete snapshot from etcd and passes it to the
        UpdateSplitter.
    (6) Watches etcd for changes, sending them incrementally to the
        UpdateSplitter.
    (On etcd error) starts again from step (5)

    This greenlet is expected to be managed by the EtcdAPI Actor.
    """
    def __init__(self, config, hosts_ipset):
        super(_EtcdWatcher, self).__init__()
        self.config = config
        self.hosts_ipset = hosts_ipset

        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()

        # Flag used to trigger a resync.  this is modified from other
        # greenlets, which is safe in Python.
        self.resync_after_current_poll = False

        # Event that we trigger once the config is loaded.
        self.configured = Event()

        # Etcd client, initialised lazily.
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Polling state initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        for key in RESYNC_KEYS:
            reg(key, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set,
            on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)

    @logging_exceptions
    def _run(self):
        """
        Greenlet main loop: loads the initial dump from etcd and then
        monitors for changes and feeds them to the splitter.
        """
        self.load_config.wait()
        while True:
            _log.info("Reconnecting and loading snapshot from etcd...")
            self._reconnect(copy_cluster_id=False)
            self._wait_for_ready()

            while not self.configured.is_set():
                self._load_config()
                # Unblock anyone who's waiting on the config.
                self.configured.set()

            if not self.begin_polling.is_set():
                _log.info("etcd worker about to wait for begin_polling event")
            self.begin_polling.wait()

            try:
                # Load initial dump from etcd.  First just get all the
                # endpoints and profiles by id.  The response contains a
                # generation ID allowing us to then start polling for updates
                # without missing any.
                self._load_initial_dump()
                while True:
                    # Wait for something to change.
                    response = self._wait_for_etcd_event()
                    self.dispatcher.handle_event(response)
            except ResyncRequired:
                _log.info("Polling aborted, doing resync.")

    def _reconnect(self, copy_cluster_id=True):
        etcd_addr = self.config.ETCD_ADDR
        if ":" in etcd_addr:
            host, port = etcd_addr.split(":")
            port = int(port)
        else:
            host = etcd_addr
            port = 4001
        if self.client and copy_cluster_id:
            old_cluster_id = self.client.expected_cluster_id
            _log.info("(Re)connecting to etcd. Old etcd cluster ID was %s.",
                      old_cluster_id)
        else:
            _log.info("(Re)connecting to etcd. No previous cluster ID.")
            old_cluster_id = None
        self.client = etcd.Client(host=host,
                                  port=port,
                                  expected_cluster_id=old_cluster_id)

    def _wait_for_ready(self):
        _log.info("Waiting for etcd to be ready...")
        ready = False
        while not ready:
            try:
                db_ready = self.client.read(READY_KEY, timeout=10).value
            except EtcdKeyNotFound:
                _log.warn("Ready flag not present in etcd; felix will pause "
                          "updates until the orchestrator sets the flag.")
                db_ready = "false"
            except EtcdException as e:
                # Note: we don't log the
                _log.error(
                    "Failed to retrieve ready flag from etcd (%r). "
                    "Felix will not receive updates until the "
                    "connection to etcd is restored.", e)
                db_ready = "false"

            if db_ready == "true":
                _log.info("etcd is ready.")
                ready = True
            else:
                _log.info("etcd not ready.  Will retry.")
                gevent.sleep(RETRY_DELAY)
                continue

    def _load_config(self):
        """
        Loads our start-of-day configuration from etcd.  Does not return
        until the config is successfully loaded.
        """
        while True:
            try:
                global_cfg = self.client.read(CONFIG_DIR)
                global_dict = _build_config_dict(global_cfg)

                try:
                    host_cfg = self.client.read(self.my_config_dir)
                    host_dict = _build_config_dict(host_cfg)
                except EtcdKeyNotFound:
                    # It is not an error for there to be no per-host
                    # config; default to empty.
                    _log.info("No configuration overrides for this node")
                    host_dict = {}
            except (EtcdKeyNotFound, EtcdException) as e:
                # Note: we don't log the stack trace because it's too
                # spammy and adds little.
                _log.error(
                    "Failed to read config. etcd may be down or "
                    "the data model may not be ready: %r. Will "
                    "retry.", e)
                gevent.sleep(RETRY_DELAY)
            else:
                self.config.report_etcd_config(host_dict, global_dict)
                return

    def _load_initial_dump(self):
        """
        Loads a snapshot from etcd and passes it to the update splitter.

        :raises ResyncRequired: if the Ready flag is not set in the snapshot.
        """
        initial_dump = self.client.read(VERSION_DIR, recursive=True)
        _log.info("Loaded snapshot from etcd cluster %s, parsing it...",
                  self.client.expected_cluster_id)
        rules_by_id = {}
        tags_by_id = {}
        endpoints_by_id = {}
        ipv4_pools_by_id = {}
        self.endpoint_ids_per_host.clear()
        self.ipv4_by_hostname.clear()
        still_ready = False
        for child in initial_dump.children:
            profile_id, rules = parse_if_rules(child)
            if profile_id:
                rules_by_id[profile_id] = rules
                continue
            profile_id, tags = parse_if_tags(child)
            if profile_id:
                tags_by_id[profile_id] = tags
                continue
            endpoint_id, endpoint = parse_if_endpoint(self.config, child)
            if endpoint_id and endpoint:
                endpoints_by_id[endpoint_id] = endpoint
                self.endpoint_ids_per_host[endpoint_id.host].add(endpoint_id)
                continue
            pool_id, pool = parse_if_ipam_v4_pool(child)
            if pool_id and pool:
                ipv4_pools_by_id[pool_id] = pool
                continue
            if self.config.IP_IN_IP_ENABLED:
                hostname, ip = parse_if_host_ip(child)
                if hostname and ip:
                    self.ipv4_by_hostname[hostname] = ip
                    continue

            # Double-check the flag hasn't changed since we read it before.
            if child.key == READY_KEY:
                if child.value == "true":
                    still_ready = True
                else:
                    _log.warning("Aborting resync because ready flag was"
                                 "unset since we read it.")
                    raise ResyncRequired()

        if not still_ready:
            _log.warn("Aborting resync; ready flag no longer present.")
            raise ResyncRequired()

        # Actually apply the snapshot. This does not return anything, but
        # just sends the relevant messages to the relevant threads to make
        # all the processing occur.
        _log.info("Snapshot parsed, passing to update splitter")
        self.splitter.apply_snapshot(rules_by_id,
                                     tags_by_id,
                                     endpoints_by_id,
                                     ipv4_pools_by_id,
                                     async=True)
        if self.config.IP_IN_IP_ENABLED:
            # We only support IPv4 for host tracking right now so there's not
            # much point in going via the splitter.
            # FIXME Support IP-in-IP for IPv6.
            _log.info("Sending (%d) host IPs to ipset.",
                      len(self.ipv4_by_hostname))
            self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                             async=True)

        # The etcd_index is the high-water-mark for the snapshot, record that
        # we want to poll starting at the next index.
        self.next_etcd_index = initial_dump.etcd_index + 1

    def _wait_for_etcd_event(self):
        """
        Polls etcd until something changes.

        Retries on read timeouts and other non-fatal errors.

        :returns: The etcd response object for the change.
        :raises ResyncRequired: If we get out of sync with etcd or hit
            a fatal error.
        """
        response = None
        while not response:
            if self.resync_after_current_poll:
                _log.debug("Told to resync, aborting poll.")
                self.resync_after_current_poll = False
                raise ResyncRequired()

            try:
                _log.debug("About to wait for etcd update %s",
                           self.next_etcd_index)
                response = self.client.read(VERSION_DIR,
                                            wait=True,
                                            waitIndex=self.next_etcd_index,
                                            recursive=True,
                                            timeout=Timeout(connect=10,
                                                            read=90),
                                            check_cluster_uuid=True)
                _log.debug("etcd response: %r", response)
            except (ReadTimeoutError, SocketTimeout) as e:
                # This is expected when we're doing a poll and nothing
                # happened. socket timeout doesn't seem to be caught by
                # urllib3 1.7.1.  Simply reconnect.
                _log.debug("Read from etcd timed out (%r), retrying.", e)
                # Force a reconnect to ensure urllib3 doesn't recycle the
                # connection.  (We were seeing this with urllib3 1.7.1.)
                self._reconnect()
            except (ConnectTimeoutError, urllib3.exceptions.HTTPError,
                    httplib.HTTPException) as e:
                # We don't log out the stack trace here because it can spam the
                # logs heavily if the requests keep failing.  The errors are
                # very descriptive anyway.
                _log.warning(
                    "Low-level HTTP error, reconnecting to "
                    "etcd: %r.", e)
                self._reconnect()
            except (EtcdClusterIdChanged, EtcdEventIndexCleared) as e:
                _log.warning(
                    "Out of sync with etcd (%r).  Reconnecting "
                    "for full sync.", e)
                raise ResyncRequired()
            except EtcdException as e:
                # Sadly, python-etcd doesn't have a dedicated exception
                # for the "no more machines in cluster" error. Parse the
                # message:
                msg = (e.message or "unknown").lower()
                # Limit our retry rate in case etcd is down.
                gevent.sleep(1)
                if "no more machines" in msg:
                    # This error comes from python-etcd when it can't
                    # connect to any servers.  When we retry, it should
                    # reconnect.
                    # TODO: We should probably limit retries here and die
                    # That'd recover from errors caused by resource
                    # exhaustion/leaks.
                    _log.error("Connection to etcd failed, will retry.")
                else:
                    # Assume any other errors are fatal to our poll and
                    # do a full resync.
                    _log.exception("Unknown etcd error %r; doing resync.",
                                   e.message)
                    self._reconnect()
                    raise ResyncRequired()
            except:
                _log.exception("Unexpected exception during etcd poll")
                raise

        # Since we're polling on a subtree, we can't just increment
        # the index, we have to look at the modifiedIndex to spot
        # if we've skipped a lot of updates.
        self.next_etcd_index = max(self.next_etcd_index,
                                   response.modifiedIndex) + 1
        return response

    def _resync(self, response, **kwargs):
        """
        Force a resync.
        :raises ResyncRequired: always.
        """
        raise ResyncRequired()

    def on_ready_flag_set(self, response):
        if response.value != "true":
            raise ResyncRequired()

    def on_endpoint_set(self, response, hostname, orchestrator, workload_id,
                        endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        self.endpoint_ids_per_host[combined_id.host].add(combined_id)
        endpoint = parse_endpoint(self.config, combined_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint, async=True)

    def on_endpoint_delete(self, response, hostname, orchestrator, workload_id,
                           endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        self.endpoint_ids_per_host[combined_id.host].discard(combined_id)
        if not self.endpoint_ids_per_host[combined_id.host]:
            del self.endpoint_ids_per_host[combined_id.host]
        self.splitter.on_endpoint_update(combined_id, None, async=True)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        rules = parse_rules(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_rules_update(profile_id, rules, async=True)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        rules = parse_tags(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_tags_update(profile_id, rules, async=True)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_profile_delete(self, response, profile_id):
        """
        Handler for a whole profile deletion

        Fakes a tag and rules delete.
        """
        # Fake deletes for the rules and tags.
        _log.debug("Whole profile %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_host_delete(self, response, hostname):
        """
        Handler for deletion of a whole host directory.

        Deletes all the contained endpoints.
        """
        ids_on_that_host = self.endpoint_ids_per_host.pop(hostname, set())
        _log.info("Host %s deleted, removing %d endpoints", hostname,
                  len(ids_on_that_host))
        for endpoint_id in ids_on_that_host:
            self.splitter.on_endpoint_update(endpoint_id, None, async=True)
        self.on_host_ip_delete(response, hostname)

    def on_host_ip_set(self, response, hostname):
        if not self.config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        ip = parse_host_ip(hostname, response.value)
        if ip:
            self.ipv4_by_hostname[hostname] = ip
        else:
            _log.warning(
                "Invalid IP for hostname %s: %s, treating as "
                "deletion", hostname, response.value)
            self.ipv4_by_hostname.pop(hostname, None)
        self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                         async=True)

    def on_host_ip_delete(self, response, hostname):
        if not self.config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        if self.ipv4_by_hostname.pop(hostname, None):
            self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                             async=True)

    def on_ipam_v4_pool_set(self, response, pool_id):
        pool = parse_ipam_pool(pool_id, response.value)
        self.splitter.on_ipam_pool_update(pool_id, pool, async=True)

    def on_ipam_v4_pool_delete(self, response, pool_id):
        self.splitter.on_ipam_pool_update(pool_id, None, async=True)

    def on_orch_delete(self, response, hostname, orchestrator):
        """
        Handler for deletion of a whole host orchestrator directory.

        Deletes all the contained endpoints.
        """
        _log.info("Orchestrator dir %s/%s deleted, removing contained hosts",
                  hostname, orchestrator)
        orchestrator = intern(orchestrator.encode("utf8"))
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if endpoint_id.orchestrator == orchestrator:
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]

    def on_workload_delete(self, response, hostname, orchestrator,
                           workload_id):
        """
        Handler for deletion of a whole workload directory.

        Deletes all the contained endpoints.
        """
        _log.debug("Workload dir %s/%s/%s deleted, removing endpoints",
                   hostname, orchestrator, workload_id)
        orchestrator = intern(orchestrator.encode("utf8"))
        workload_id = intern(workload_id.encode("utf8"))
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if (endpoint_id.orchestrator == orchestrator
                    and endpoint_id.workload == workload_id):
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]
Esempio n. 10
0
class _FelixEtcdWatcher(gevent.Greenlet):
    """
    Greenlet that communicates with the etcd driver over a socket.

    * Does the initial handshake with the driver, sending it the init
      message.
    * Receives the pre-loaded config from the driver and uses that
      to do Felix's one-off configuration.
    * Sends the relevant config back to the driver.
    * Processes the event stream from the driver, sending it on to
      the splitter.

    This class is similar to the EtcdWatcher class in that it uses
    a PathDispatcher to fan out updates but it doesn't own an etcd
    connection of its own.
    """
    def __init__(self, config, etcd_api, status_reporter, hosts_ipset):
        super(_FelixEtcdWatcher, self).__init__()
        self._config = config
        self._etcd_api = etcd_api
        self._status_reporter = status_reporter
        self.hosts_ipset = hosts_ipset
        # Whether we've been in sync with etcd at some point.
        self._been_in_sync = False
        # Keep track of the config loaded from etcd so we can spot if it
        # changes.
        self.last_global_config = None
        self.last_host_config = None
        self.my_config_dir = dir_for_per_host_config(self._config.HOSTNAME)
        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()
        # Event that we trigger once the config is loaded.
        self.configured = Event()
        # Polling state initialized at poll start time.
        self.splitter = None
        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}
        # Forces a resync after the current poll if set.  Safe to set from
        # another thread.  Automatically reset to False after the resync is
        # triggered.
        self.resync_requested = False
        self.dispatcher = PathDispatcher()
        # The Popen object for the driver.
        self._driver_process = None
        # Stats.
        self.read_count = 0
        self.msgs_processed = 0
        self.last_rate_log_time = monotonic_time()
        # Register for events when values change.
        self._register_paths()

    def _register_paths(self):
        """
        Program the dispatcher with the paths we care about.
        """
        reg = self.dispatcher.register
        # Profiles and their contents.
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        reg(PROFILE_LABELS_KEY,
            on_set=self.on_prof_labels_set,
            on_del=self.on_prof_labels_delete)
        # Tiered policy
        reg(TIER_DATA,
            on_set=self.on_tier_data_set,
            on_del=self.on_tier_data_delete)
        reg(TIERED_PROFILE,
            on_set=self.on_tiered_policy_set,
            on_del=self.on_tiered_policy_delete)
        # Hosts and endpoints.
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set,
            on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)
        # Configuration keys.  If any of these is changed or created, we'll
        # restart to pick up the change.
        reg(CONFIG_PARAM_KEY,
            on_set=self._on_config_updated,
            on_del=self._on_config_updated)
        reg(PER_HOST_CONFIG_PARAM_KEY,
            on_set=self._on_host_config_updated,
            on_del=self._on_host_config_updated)

    @logging_exceptions
    def _run(self):
        # Don't do anything until we're told to load the config.
        _log.info("Waiting for load_config event...")
        self.load_config.wait()
        _log.info("...load_config set.  Starting driver read %s loop", self)
        # Start the driver process and wait for it to connect back to our
        # socket.
        self._msg_reader, self._msg_writer = self._start_driver()
        # Loop reading from the socket and processing messages.
        self._loop_reading_from_driver()

    def _loop_reading_from_driver(self):
        while True:
            try:
                # Note: self._msg_reader.new_messages() returns iterator so
                # whole for loop must be inside the try.
                for msg_type, msg in self._msg_reader.new_messages(timeout=1):
                    self._dispatch_msg_from_driver(msg_type, msg)
            except SocketClosed:
                _log.critical("The driver process closed its socket, Felix "
                              "must exit.")
                die_and_restart()
            if self.resync_requested:
                _log.info("Resync requested, sending resync request to driver")
                self.resync_requested = False
                self._msg_writer.send_message(MSG_TYPE_RESYNC)
            # Check that the driver hasn't died.  The recv() call should
            # raise an exception when the buffer runs dry but this usually
            # gets hit first.
            driver_rc = self._driver_process.poll()
            if driver_rc is not None:
                _log.critical(
                    "Driver process died with RC = %s.  Felix must "
                    "exit.", driver_rc)
                die_and_restart()

    def _dispatch_msg_from_driver(self, msg_type, msg):
        # Optimization: put update first in the "switch" block because
        # it's on the critical path.
        if msg_type == MSG_TYPE_UPDATE:
            _stats.increment("Update messages from driver")
            self._on_update_from_driver(msg)
        elif msg_type == MSG_TYPE_CONFIG_LOADED:
            _stats.increment("Config loaded messages from driver")
            self._on_config_loaded_from_driver(msg)
        elif msg_type == MSG_TYPE_STATUS:
            _stats.increment("Status messages from driver")
            self._on_status_from_driver(msg)
        else:
            raise RuntimeError("Unexpected message %s" % msg)
        self.msgs_processed += 1
        if self.msgs_processed % MAX_EVENTS_BEFORE_YIELD == 0:
            # Yield to ensure that other actors make progress.  (gevent only
            # yields for us if the socket would block.)  The sleep must be
            # non-zero to work around gevent issue where we could be
            # immediately rescheduled.
            gevent.sleep(0.000001)

    def _on_update_from_driver(self, msg):
        """
        Called when the driver sends us a key/value pair update.

        After the initial handshake, the stream of events consists
        entirely of updates unless something happens to change the
        state of the driver.

        :param dict msg: The message received from the driver.
        """
        assert self.configured.is_set(), "Received update before config"
        # The driver starts polling immediately, make sure we block until
        # everyone else is ready to receive updates.
        self.begin_polling.wait()
        # Unpack the message.
        key = msg[MSG_KEY_KEY]
        value = msg[MSG_KEY_VALUE]
        _log.debug("Update from driver: %s -> %s", key, value)
        # Output some very coarse stats.
        self.read_count += 1
        if self.read_count % 1000 == 0:
            now = monotonic_time()
            delta = now - self.last_rate_log_time
            _log.info("Processed %s updates from driver "
                      "%.1f/s", self.read_count, 1000.0 / delta)
            self.last_rate_log_time = now
        # Wrap the update in an EtcdEvent object so we can dispatch it via the
        # PathDispatcher.
        n = EtcdEvent("set" if value is not None else "delete", key, value)
        self.dispatcher.handle_event(n)

    def _on_config_loaded_from_driver(self, msg):
        """
        Called when we receive a config loaded message from the driver.

        This message is expected once per resync, when the config is
        pre-loaded by the driver.

        On the first call, responds to the driver synchronously with a
        config response.

        If the config has changed since a previous call, triggers Felix
        to die.
        """
        global_config = msg[MSG_KEY_GLOBAL_CONFIG]
        host_config = msg[MSG_KEY_HOST_CONFIG]
        _log.info("Config loaded by driver:\n"
                  "Global: %s\nPer-host: %s", global_config, host_config)
        if self.configured.is_set():
            # We've already been configured.  We don't yet support
            # dynamic config update so instead we check if the config
            # has changed and die if it has.
            _log.info("Checking configuration for changes...")
            if (host_config != self.last_host_config
                    or global_config != self.last_global_config):
                _log.warning("Felix configuration has changed, "
                             "felix must restart.")
                _log.info("Old host config: %s", self.last_host_config)
                _log.info("New host config: %s", host_config)
                _log.info("Old global config: %s", self.last_global_config)
                _log.info("New global config: %s", global_config)
                die_and_restart()
        else:
            # First time loading the config.  Report it to the config
            # object.  Take copies because report_etcd_config is
            # destructive.
            self.last_host_config = host_config.copy()
            self.last_global_config = global_config.copy()
            self._config.report_etcd_config(host_config, global_config)
            # Config now fully resolved, inform the driver.
            driver_log_file = self._config.DRIVERLOGFILE
            self._msg_writer.send_message(
                MSG_TYPE_CONFIG, {
                    MSG_KEY_LOG_FILE: driver_log_file,
                    MSG_KEY_SEV_FILE: self._config.LOGLEVFILE,
                    MSG_KEY_SEV_SCREEN: self._config.LOGLEVSCR,
                    MSG_KEY_SEV_SYSLOG: self._config.LOGLEVSYS,
                })
            self.configured.set()

    def _on_status_from_driver(self, msg):
        """
        Called when we receive a status update from the driver.

        The driver sends us status messages whenever its status changes.
        It moves through these states:

        (1) wait-for-ready (waiting for the global ready flag to become set)
        (2) resync (resyncing with etcd, processing a snapshot and any
            concurrent events)
        (3) in-sync (snapshot processsing complete, now processing only events
            from etcd)

        If the driver falls out of sync with etcd then it will start again
        from (1).

        If the status is in-sync, triggers the relevant processing.
        """
        status = msg[MSG_KEY_STATUS]
        _log.info("etcd driver status changed to %s", status)
        if status == STATUS_IN_SYNC and not self._been_in_sync:
            # We're now in sync, tell the Actors that need to do start-of-day
            # cleanup.
            self.begin_polling.wait()  # Make sure splitter is set.
            self._been_in_sync = True
            self.splitter.on_datamodel_in_sync()
            if self._config.REPORT_ENDPOINT_STATUS:
                self._status_reporter.clean_up_endpoint_statuses(async=True)
            self._update_hosts_ipset()

    def _start_driver(self):
        """
        Starts the driver subprocess, connects to it over the socket
        and sends it the init message.

        Stores the Popen object in self._driver_process for future
        access.

        :return: the connected socket to the driver.
        """
        _log.info("Creating server socket.")
        try:
            os.unlink("/run/felix-driver.sck")
        except OSError:
            _log.debug("Failed to delete driver socket, assuming it "
                       "didn't exist.")
        update_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        update_socket.bind("/run/felix-driver.sck")
        update_socket.listen(1)
        self._driver_process = subprocess.Popen([
            sys.executable, "-m", "calico.etcddriver", "/run/felix-driver.sck"
        ])
        _log.info("Started etcd driver with PID %s", self._driver_process.pid)
        update_conn, _ = update_socket.accept()
        _log.info("Accepted connection on socket")
        # No longer need the server socket, remove it.
        try:
            os.unlink("/run/felix-driver.sck")
        except OSError:
            # Unexpected but carry on...
            _log.exception("Failed to unlink socket")
        else:
            _log.info("Unlinked server socket")

        # Wrap the socket in reader/writer objects that simplify using the
        # protocol.
        reader = MessageReader(update_conn)
        writer = MessageWriter(update_conn)
        # Give the driver its config.
        writer.send_message(
            MSG_TYPE_INIT, {
                MSG_KEY_ETCD_URLS: [
                    self._config.ETCD_SCHEME + "://" + addr
                    for addr in self._config.ETCD_ADDRS
                ],
                MSG_KEY_HOSTNAME:
                self._config.HOSTNAME,
                MSG_KEY_KEY_FILE:
                self._config.ETCD_KEY_FILE,
                MSG_KEY_CERT_FILE:
                self._config.ETCD_CERT_FILE,
                MSG_KEY_CA_FILE:
                self._config.ETCD_CA_FILE
            })
        return reader, writer

    def on_endpoint_set(self, response, hostname, orchestrator, workload_id,
                        endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        _stats.increment("Endpoint created/updated")
        endpoint = parse_endpoint(self._config, combined_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint)

    def on_endpoint_delete(self, response, hostname, orchestrator, workload_id,
                           endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        _stats.increment("Endpoint deleted")
        self.splitter.on_endpoint_update(combined_id, None)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        _stats.increment("Rules created/updated")
        rules = parse_profile(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_rules_update(profile_id, rules)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        _stats.increment("Rules deleted")
        self.splitter.on_rules_update(profile_id, None)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        _stats.increment("Tags created/updated")
        rules = parse_tags(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_tags_update(profile_id, rules)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        _stats.increment("Tags deleted")
        self.splitter.on_tags_update(profile_id, None)

    def on_prof_labels_set(self, response, profile_id):
        """Handler for profile labels, passes update to the splitter."""
        _log.debug("Labels for profile %s created/updated", profile_id)
        labels = parse_labels(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_prof_labels_set(profile_id, labels)

    def on_prof_labels_delete(self, response, profile_id):
        """Handler for profile label deletion

        passed update to the splitter."""
        _log.debug("Labels for profile %s deleted", profile_id)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_prof_labels_set(profile_id, None)

    def on_tier_data_set(self, response, tier):
        _log.debug("Tier data set for tier %s", tier)
        _stats.increment("Tier data created/updated")
        data = parse_tier_data(tier, response.value)
        self.splitter.on_tier_data_update(tier, data)

    def on_tier_data_delete(self, response, tier):
        _log.debug("Tier data deleted for tier %s", tier)
        _stats.increment("Tier data deleted")
        self.splitter.on_tier_data_update(tier, None)

    def on_tiered_policy_set(self, response, tier, policy_id):
        _log.debug("Rules for %s/%s set", tier, policy_id)
        _stats.increment("Tiered rules created/updated")
        policy_id = TieredPolicyId(tier, policy_id)
        rules = parse_policy(policy_id, response.value)
        if rules is not None:
            selector = rules.pop("selector")
            order = rules.pop("order")
            self.splitter.on_rules_update(policy_id, rules)
            self.splitter.on_policy_selector_update(policy_id, selector, order)
        else:
            self.splitter.on_rules_update(policy_id, None)
            self.splitter.on_policy_selector_update(policy_id, None, None)

    def on_tiered_policy_delete(self, response, tier, policy_id):
        """Handler for tiered rules deletes, passes update to the splitter."""
        _log.debug("Rules for %s/%s deleted", tier, policy_id)
        _stats.increment("tiered rules deleted")
        policy_id = TieredPolicyId(tier, policy_id)
        self.splitter.on_rules_update(policy_id, None)
        self.splitter.on_policy_selector_update(policy_id, None, None)

    def on_host_ip_set(self, response, hostname):
        if not self._config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        _stats.increment("Host IP created/updated")
        ip = parse_host_ip(hostname, response.value)
        if ip:
            self.ipv4_by_hostname[hostname] = ip
        else:
            _log.warning(
                "Invalid IP for hostname %s: %s, treating as "
                "deletion", hostname, response.value)
            self.ipv4_by_hostname.pop(hostname, None)
        self._update_hosts_ipset()

    def on_host_ip_delete(self, response, hostname):
        if not self._config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        _stats.increment("Host IP deleted")
        if self.ipv4_by_hostname.pop(hostname, None):
            self._update_hosts_ipset()

    def _update_hosts_ipset(self):
        if not self._been_in_sync:
            _log.debug("Deferring update to hosts ipset until we're in-sync")
            return
        self.hosts_ipset.replace_members(frozenset(
            self.ipv4_by_hostname.values()),
                                         async=True)

    def _on_config_updated(self, response, config_param):
        new_value = response.value
        if self.last_global_config.get(config_param) != new_value:
            _log.critical(
                "Global config value %s updated.  Felix must be "
                "restarted.", config_param)
            die_and_restart()
        _stats.increment("Global config (non) updates")

    def _on_host_config_updated(self, response, hostname, config_param):
        if hostname != self._config.HOSTNAME:
            _log.debug("Ignoring config update for host %s", hostname)
            return
        _stats.increment("Per-host config created/updated")
        new_value = response.value
        if self.last_host_config.get(config_param) != new_value:
            _log.critical(
                "Global config value %s updated.  Felix must be "
                "restarted.", config_param)
            die_and_restart()

    def on_ipam_v4_pool_set(self, response, pool_id):
        _stats.increment("IPAM pool created/updated")
        pool = parse_ipam_pool(pool_id, response.value)
        self.splitter.on_ipam_pool_updated(pool_id, pool)

    def on_ipam_v4_pool_delete(self, response, pool_id):
        _stats.increment("IPAM pool deleted")
        self.splitter.on_ipam_pool_updated(pool_id, None)
Esempio n. 11
0
class _FelixEtcdWatcher(gevent.Greenlet):
    """
    Greenlet that communicates with the etcd driver over a socket.

    * Does the initial handshake with the driver, sending it the init
      message.
    * Receives the pre-loaded config from the driver and uses that
      to do Felix's one-off configuration.
    * Sends the relevant config back to the driver.
    * Processes the event stream from the driver, sending it on to
      the splitter.

    This class is similar to the EtcdWatcher class in that it uses
    a PathDispatcher to fan out updates but it doesn't own an etcd
    connection of its own.
    """

    def __init__(self, config, etcd_api, status_reporter, hosts_ipset):
        super(_FelixEtcdWatcher, self).__init__()
        self._config = config
        self._etcd_api = etcd_api
        self._status_reporter = status_reporter
        self.hosts_ipset = hosts_ipset
        # Whether we've been in sync with etcd at some point.
        self._been_in_sync = False
        # Keep track of the config loaded from etcd so we can spot if it
        # changes.
        self.last_global_config = None
        self.last_host_config = None
        self.my_config_dir = dir_for_per_host_config(self._config.HOSTNAME)
        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()
        # Event that we trigger once the config is loaded.
        self.configured = Event()
        # Polling state initialized at poll start time.
        self.splitter = None
        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}
        # Forces a resync after the current poll if set.  Safe to set from
        # another thread.  Automatically reset to False after the resync is
        # triggered.
        self.resync_requested = False
        self.dispatcher = PathDispatcher()
        # The Popen object for the driver.
        self._driver_process = None
        # Stats.
        self.read_count = 0
        self.msgs_processed = 0
        self.last_rate_log_time = monotonic_time()
        # Register for events when values change.
        self._register_paths()

    def _register_paths(self):
        """
        Program the dispatcher with the paths we care about.
        """
        reg = self.dispatcher.register
        # Profiles and their contents.
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts and endpoints.
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)
        # Configuration keys.  If any of these is changed or created, we'll
        # restart to pick up the change.
        reg(CONFIG_PARAM_KEY,
            on_set=self._on_config_updated,
            on_del=self._on_config_updated)
        reg(PER_HOST_CONFIG_PARAM_KEY,
            on_set=self._on_host_config_updated,
            on_del=self._on_host_config_updated)

    @logging_exceptions
    def _run(self):
        # Don't do anything until we're told to load the config.
        _log.info("Waiting for load_config event...")
        self.load_config.wait()
        _log.info("...load_config set.  Starting driver read %s loop", self)
        # Start the driver process and wait for it to connect back to our
        # socket.
        self._msg_reader, self._msg_writer = self._start_driver()
        # Loop reading from the socket and processing messages.
        self._loop_reading_from_driver()

    def _loop_reading_from_driver(self):
        while True:
            try:
                # Note: self._msg_reader.new_messages() returns iterator so
                # whole for loop must be inside the try.
                for msg_type, msg in self._msg_reader.new_messages(timeout=1):
                    self._dispatch_msg_from_driver(msg_type, msg)
            except SocketClosed:
                _log.critical("The driver process closed its socket, Felix "
                              "must exit.")
                die_and_restart()
            if self.resync_requested:
                _log.info("Resync requested, sending resync request to driver")
                self.resync_requested = False
                self._msg_writer.send_message(MSG_TYPE_RESYNC)
            # Check that the driver hasn't died.  The recv() call should
            # raise an exception when the buffer runs dry but this usually
            # gets hit first.
            driver_rc = self._driver_process.poll()
            if driver_rc is not None:
                _log.critical("Driver process died with RC = %s.  Felix must "
                              "exit.", driver_rc)
                die_and_restart()

    def _dispatch_msg_from_driver(self, msg_type, msg):
        # Optimization: put update first in the "switch" block because
        # it's on the critical path.
        if msg_type == MSG_TYPE_UPDATE:
            _stats.increment("Update messages from driver")
            self._on_update_from_driver(msg)
        elif msg_type == MSG_TYPE_CONFIG_LOADED:
            _stats.increment("Config loaded messages from driver")
            self._on_config_loaded_from_driver(msg)
        elif msg_type == MSG_TYPE_STATUS:
            _stats.increment("Status messages from driver")
            self._on_status_from_driver(msg)
        else:
            raise RuntimeError("Unexpected message %s" % msg)
        self.msgs_processed += 1
        if self.msgs_processed % MAX_EVENTS_BEFORE_YIELD == 0:
            # Yield to ensure that other actors make progress.  (gevent only
            # yields for us if the socket would block.)  The sleep must be
            # non-zero to work around gevent issue where we could be
            # immediately rescheduled.
            gevent.sleep(0.000001)

    def _on_update_from_driver(self, msg):
        """
        Called when the driver sends us a key/value pair update.

        After the initial handshake, the stream of events consists
        entirely of updates unless something happens to change the
        state of the driver.

        :param dict msg: The message received from the driver.
        """
        assert self.configured.is_set(), "Received update before config"
        # The driver starts polling immediately, make sure we block until
        # everyone else is ready to receive updates.
        self.begin_polling.wait()
        # Unpack the message.
        key = msg[MSG_KEY_KEY]
        value = msg[MSG_KEY_VALUE]
        _log.debug("Update from driver: %s -> %s", key, value)
        # Output some very coarse stats.
        self.read_count += 1
        if self.read_count % 1000 == 0:
            now = monotonic_time()
            delta = now - self.last_rate_log_time
            _log.info("Processed %s updates from driver "
                      "%.1f/s", self.read_count, 1000.0 / delta)
            self.last_rate_log_time = now
        # Wrap the update in an EtcdEvent object so we can dispatch it via the
        # PathDispatcher.
        n = EtcdEvent("set" if value is not None else "delete", key, value)
        self.dispatcher.handle_event(n)

    def _on_config_loaded_from_driver(self, msg):
        """
        Called when we receive a config loaded message from the driver.

        This message is expected once per resync, when the config is
        pre-loaded by the driver.

        On the first call, responds to the driver synchronously with a
        config response.

        If the config has changed since a previous call, triggers Felix
        to die.
        """
        global_config = msg[MSG_KEY_GLOBAL_CONFIG]
        host_config = msg[MSG_KEY_HOST_CONFIG]
        _log.info("Config loaded by driver:\n"
                  "Global: %s\nPer-host: %s",
                  global_config,
                  host_config)
        if self.configured.is_set():
            # We've already been configured.  We don't yet support
            # dynamic config update so instead we check if the config
            # has changed and die if it has.
            _log.info("Checking configuration for changes...")
            if (host_config != self.last_host_config or
                    global_config != self.last_global_config):
                _log.warning("Felix configuration has changed, "
                             "felix must restart.")
                _log.info("Old host config: %s", self.last_host_config)
                _log.info("New host config: %s", host_config)
                _log.info("Old global config: %s",
                          self.last_global_config)
                _log.info("New global config: %s", global_config)
                die_and_restart()
        else:
            # First time loading the config.  Report it to the config
            # object.  Take copies because report_etcd_config is
            # destructive.
            self.last_host_config = host_config.copy()
            self.last_global_config = global_config.copy()
            self._config.report_etcd_config(host_config,
                                            global_config)
            # Config now fully resolved, inform the driver.
            driver_log_file = self._config.DRIVERLOGFILE
            self._msg_writer.send_message(
                MSG_TYPE_CONFIG,
                {
                    MSG_KEY_LOG_FILE: driver_log_file,
                    MSG_KEY_SEV_FILE: self._config.LOGLEVFILE,
                    MSG_KEY_SEV_SCREEN: self._config.LOGLEVSCR,
                    MSG_KEY_SEV_SYSLOG: self._config.LOGLEVSYS,
                }
            )
            self.configured.set()

    def _on_status_from_driver(self, msg):
        """
        Called when we receive a status update from the driver.

        The driver sends us status messages whenever its status changes.
        It moves through these states:

        (1) wait-for-ready (waiting for the global ready flag to become set)
        (2) resync (resyncing with etcd, processing a snapshot and any
            concurrent events)
        (3) in-sync (snapshot processsing complete, now processing only events
            from etcd)

        If the driver falls out of sync with etcd then it will start again
        from (1).

        If the status is in-sync, triggers the relevant processing.
        """
        status = msg[MSG_KEY_STATUS]
        _log.info("etcd driver status changed to %s", status)
        if status == STATUS_IN_SYNC and not self._been_in_sync:
            # We're now in sync, tell the Actors that need to do start-of-day
            # cleanup.
            self.begin_polling.wait()  # Make sure splitter is set.
            self._been_in_sync = True
            self.splitter.on_datamodel_in_sync()
            if self._config.REPORT_ENDPOINT_STATUS:
                self._status_reporter.clean_up_endpoint_statuses(async=True)
            self._update_hosts_ipset()

    def _start_driver(self):
        """
        Starts the driver subprocess, connects to it over the socket
        and sends it the init message.

        Stores the Popen object in self._driver_process for future
        access.

        :return: the connected socket to the driver.
        """
        _log.info("Creating server socket.")
        try:
            os.unlink("/run/felix-driver.sck")
        except OSError:
            _log.debug("Failed to delete driver socket, assuming it "
                       "didn't exist.")
        update_socket = socket.socket(socket.AF_UNIX,
                                      socket.SOCK_STREAM)
        update_socket.bind("/run/felix-driver.sck")
        update_socket.listen(1)
        self._driver_process = subprocess.Popen([sys.executable,
                                                 "-m",
                                                 "calico.etcddriver",
                                                 "/run/felix-driver.sck"])
        _log.info("Started etcd driver with PID %s", self._driver_process.pid)
        update_conn, _ = update_socket.accept()
        _log.info("Accepted connection on socket")
        # No longer need the server socket, remove it.
        try:
            os.unlink("/run/felix-driver.sck")
        except OSError:
            # Unexpected but carry on...
            _log.exception("Failed to unlink socket")
        else:
            _log.info("Unlinked server socket")

        # Wrap the socket in reader/writer objects that simplify using the
        # protocol.
        reader = MessageReader(update_conn)
        writer = MessageWriter(update_conn)
        # Give the driver its config.
        writer.send_message(
            MSG_TYPE_INIT,
            {
                MSG_KEY_ETCD_URL: self._config.ETCD_SCHEME + "://" +
                                  self._config.ETCD_ADDR,
                MSG_KEY_HOSTNAME: self._config.HOSTNAME,
                MSG_KEY_KEY_FILE: self._config.ETCD_KEY_FILE,
                MSG_KEY_CERT_FILE: self._config.ETCD_CERT_FILE,
                MSG_KEY_CA_FILE: self._config.ETCD_CA_FILE
            }
        )
        return reader, writer

    def on_endpoint_set(self, response, hostname, orchestrator,
                        workload_id, endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        _stats.increment("Endpoint created/updated")
        endpoint = parse_endpoint(self._config, combined_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint)

    def on_endpoint_delete(self, response, hostname, orchestrator,
                           workload_id, endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        _stats.increment("Endpoint deleted")
        self.splitter.on_endpoint_update(combined_id, None)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        _stats.increment("Rules created/updated")
        rules = parse_rules(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_rules_update(profile_id, rules)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        _stats.increment("Rules deleted")
        self.splitter.on_rules_update(profile_id, None)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        _stats.increment("Tags created/updated")
        rules = parse_tags(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_tags_update(profile_id, rules)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        _stats.increment("Tags deleted")
        self.splitter.on_tags_update(profile_id, None)

    def on_host_ip_set(self, response, hostname):
        if not self._config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        _stats.increment("Host IP created/updated")
        ip = parse_host_ip(hostname, response.value)
        if ip:
            self.ipv4_by_hostname[hostname] = ip
        else:
            _log.warning("Invalid IP for hostname %s: %s, treating as "
                         "deletion", hostname, response.value)
            self.ipv4_by_hostname.pop(hostname, None)
        self._update_hosts_ipset()

    def on_host_ip_delete(self, response, hostname):
        if not self._config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        _stats.increment("Host IP deleted")
        if self.ipv4_by_hostname.pop(hostname, None):
            self._update_hosts_ipset()

    def _update_hosts_ipset(self):
        if not self._been_in_sync:
            _log.debug("Deferring update to hosts ipset until we're in-sync")
            return
        self.hosts_ipset.replace_members(
            frozenset(self.ipv4_by_hostname.values()),
            async=True
        )

    def _on_config_updated(self, response, config_param):
        new_value = response.value
        if self.last_global_config.get(config_param) != new_value:
            _log.critical("Global config value %s updated.  Felix must be "
                          "restarted.", config_param)
            die_and_restart()
        _stats.increment("Global config (non) updates")

    def _on_host_config_updated(self, response, hostname, config_param):
        if hostname != self._config.HOSTNAME:
            _log.debug("Ignoring config update for host %s", hostname)
            return
        _stats.increment("Per-host config created/updated")
        new_value = response.value
        if self.last_host_config.get(config_param) != new_value:
            _log.critical("Global config value %s updated.  Felix must be "
                          "restarted.", config_param)
            die_and_restart()

    def on_ipam_v4_pool_set(self, response, pool_id):
        _stats.increment("IPAM pool created/updated")
        pool = parse_ipam_pool(pool_id, response.value)
        self.splitter.on_ipam_pool_update(pool_id, pool)

    def on_ipam_v4_pool_delete(self, response, pool_id):
        _stats.increment("IPAM pool deleted")
        self.splitter.on_ipam_pool_update(pool_id, None)
Esempio n. 12
0
    def __init__(self, config, hosts_ipset):
        super(_EtcdWatcher, self).__init__()
        self.config = config
        self.hosts_ipset = hosts_ipset

        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()

        # Flag used to trigger a resync.  this is modified from other
        # greenlets, which is safe in Python.
        self.resync_after_current_poll = False

        # Event that we trigger once the config is loaded.
        self.configured = Event()

        # Etcd client, initialised lazily.
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Polling state initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        for key in RESYNC_KEYS:
            reg(key, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)
Esempio n. 13
0
class _EtcdWatcher(gevent.Greenlet):
    """
    Greenlet that watches the etcd data model for changes.

    (1) Waits for the load_config event to be triggered.
    (2) Connects to etcd and waits for the Ready flag to be set,
        indicating the data model is consistent.
    (3) Loads the config from etcd and passes it to the config object.
    (4) Waits for the begin_polling Event to be triggered.
    (5) Loads a complete snapshot from etcd and passes it to the
        UpdateSplitter.
    (6) Watches etcd for changes, sending them incrementally to the
        UpdateSplitter.
    (On etcd error) starts again from step (5)

    This greenlet is expected to be managed by the EtcdAPI Actor.
    """

    def __init__(self, config, hosts_ipset):
        super(_EtcdWatcher, self).__init__()
        self.config = config
        self.hosts_ipset = hosts_ipset

        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()

        # Flag used to trigger a resync.  this is modified from other
        # greenlets, which is safe in Python.
        self.resync_after_current_poll = False

        # Event that we trigger once the config is loaded.
        self.configured = Event()

        # Etcd client, initialised lazily.
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Polling state initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        for key in RESYNC_KEYS:
            reg(key, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)

    @logging_exceptions
    def _run(self):
        """
        Greenlet main loop: loads the initial dump from etcd and then
        monitors for changes and feeds them to the splitter.
        """
        self.load_config.wait()
        while True:
            _log.info("Reconnecting and loading snapshot from etcd...")
            self._reconnect(copy_cluster_id=False)
            self._wait_for_ready()

            while not self.configured.is_set():
                self._load_config()
                # Unblock anyone who's waiting on the config.
                self.configured.set()

            if not self.begin_polling.is_set():
                _log.info("etcd worker about to wait for begin_polling event")
            self.begin_polling.wait()

            try:
                # Load initial dump from etcd.  First just get all the
                # endpoints and profiles by id.  The response contains a
                # generation ID allowing us to then start polling for updates
                # without missing any.
                self._load_initial_dump()
                while True:
                    # Wait for something to change.
                    response = self._wait_for_etcd_event()
                    self.dispatcher.handle_event(response)
            except ResyncRequired:
                _log.info("Polling aborted, doing resync.")

    def _reconnect(self, copy_cluster_id=True):
        etcd_addr = self.config.ETCD_ADDR
        if ":" in etcd_addr:
            host, port = etcd_addr.split(":")
            port = int(port)
        else:
            host = etcd_addr
            port = 4001
        if self.client and copy_cluster_id:
            old_cluster_id = self.client.expected_cluster_id
            _log.info("(Re)connecting to etcd. Old etcd cluster ID was %s.",
                      old_cluster_id)
        else:
            _log.info("(Re)connecting to etcd. No previous cluster ID.")
            old_cluster_id = None
        self.client = etcd.Client(host=host, port=port,
                                  expected_cluster_id=old_cluster_id)

    def _wait_for_ready(self):
        _log.info("Waiting for etcd to be ready...")
        ready = False
        while not ready:
            try:
                db_ready = self.client.read(READY_KEY,
                                            timeout=10).value
            except EtcdKeyNotFound:
                _log.warn("Ready flag not present in etcd; felix will pause "
                          "updates until the orchestrator sets the flag.")
                db_ready = "false"
            except EtcdException as e:
                # Note: we don't log the
                _log.error("Failed to retrieve ready flag from etcd (%r). "
                           "Felix will not receive updates until the "
                           "connection to etcd is restored.", e)
                db_ready = "false"

            if db_ready == "true":
                _log.info("etcd is ready.")
                ready = True
            else:
                _log.info("etcd not ready.  Will retry.")
                gevent.sleep(RETRY_DELAY)
                continue

    def _load_config(self):
        """
        Loads our start-of-day configuration from etcd.  Does not return
        until the config is successfully loaded.
        """
        while True:
            try:
                global_cfg = self.client.read(CONFIG_DIR)
                global_dict = _build_config_dict(global_cfg)

                try:
                    host_cfg = self.client.read(self.my_config_dir)
                    host_dict = _build_config_dict(host_cfg)
                except EtcdKeyNotFound:
                    # It is not an error for there to be no per-host
                    # config; default to empty.
                    _log.info("No configuration overrides for this node")
                    host_dict = {}
            except (EtcdKeyNotFound, EtcdException) as e:
                # Note: we don't log the stack trace because it's too
                # spammy and adds little.
                _log.error("Failed to read config. etcd may be down or "
                           "the data model may not be ready: %r. Will "
                           "retry.", e)
                gevent.sleep(RETRY_DELAY)
            else:
                self.config.report_etcd_config(host_dict, global_dict)
                return

    def _load_initial_dump(self):
        """
        Loads a snapshot from etcd and passes it to the update splitter.

        :raises ResyncRequired: if the Ready flag is not set in the snapshot.
        """
        initial_dump = self.client.read(VERSION_DIR, recursive=True)
        _log.info("Loaded snapshot from etcd cluster %s, parsing it...",
                  self.client.expected_cluster_id)
        rules_by_id = {}
        tags_by_id = {}
        endpoints_by_id = {}
        ipv4_pools_by_id = {}
        self.endpoint_ids_per_host.clear()
        self.ipv4_by_hostname.clear()
        still_ready = False
        for child in initial_dump.children:
            profile_id, rules = parse_if_rules(child)
            if profile_id:
                rules_by_id[profile_id] = rules
                continue
            profile_id, tags = parse_if_tags(child)
            if profile_id:
                tags_by_id[profile_id] = tags
                continue
            endpoint_id, endpoint = parse_if_endpoint(self.config, child)
            if endpoint_id and endpoint:
                endpoints_by_id[endpoint_id] = endpoint
                self.endpoint_ids_per_host[endpoint_id.host].add(endpoint_id)
                continue
            pool_id, pool = parse_if_ipam_v4_pool(child)
            if pool_id and pool:
                ipv4_pools_by_id[pool_id] = pool
                continue
            if self.config.IP_IN_IP_ENABLED:
                hostname, ip = parse_if_host_ip(child)
                if hostname and ip:
                    self.ipv4_by_hostname[hostname] = ip
                    continue

            # Double-check the flag hasn't changed since we read it before.
            if child.key == READY_KEY:
                if child.value == "true":
                    still_ready = True
                else:
                    _log.warning("Aborting resync because ready flag was"
                                 "unset since we read it.")
                    raise ResyncRequired()

        if not still_ready:
            _log.warn("Aborting resync; ready flag no longer present.")
            raise ResyncRequired()

        # Actually apply the snapshot. This does not return anything, but
        # just sends the relevant messages to the relevant threads to make
        # all the processing occur.
        _log.info("Snapshot parsed, passing to update splitter")
        self.splitter.apply_snapshot(rules_by_id,
                                     tags_by_id,
                                     endpoints_by_id,
                                     ipv4_pools_by_id,
                                     async=True)
        if self.config.IP_IN_IP_ENABLED:
            # We only support IPv4 for host tracking right now so there's not
            # much point in going via the splitter.
            # FIXME Support IP-in-IP for IPv6.
            _log.info("Sending (%d) host IPs to ipset.",
                      len(self.ipv4_by_hostname))
            self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                             async=True)

        # The etcd_index is the high-water-mark for the snapshot, record that
        # we want to poll starting at the next index.
        self.next_etcd_index = initial_dump.etcd_index + 1

    def _wait_for_etcd_event(self):
        """
        Polls etcd until something changes.

        Retries on read timeouts and other non-fatal errors.

        :returns: The etcd response object for the change.
        :raises ResyncRequired: If we get out of sync with etcd or hit
            a fatal error.
        """
        response = None
        while not response:
            if self.resync_after_current_poll:
                _log.debug("Told to resync, aborting poll.")
                self.resync_after_current_poll = False
                raise ResyncRequired()

            try:
                _log.debug("About to wait for etcd update %s",
                           self.next_etcd_index)
                response = self.client.read(VERSION_DIR,
                                            wait=True,
                                            waitIndex=self.next_etcd_index,
                                            recursive=True,
                                            timeout=Timeout(connect=10,
                                                            read=90),
                                            check_cluster_uuid=True)
                _log.debug("etcd response: %r", response)
            except (ReadTimeoutError, SocketTimeout) as e:
                # This is expected when we're doing a poll and nothing
                # happened. socket timeout doesn't seem to be caught by
                # urllib3 1.7.1.  Simply reconnect.
                _log.debug("Read from etcd timed out (%r), retrying.", e)
                # Force a reconnect to ensure urllib3 doesn't recycle the
                # connection.  (We were seeing this with urllib3 1.7.1.)
                self._reconnect()
            except (ConnectTimeoutError,
                    urllib3.exceptions.HTTPError,
                    httplib.HTTPException) as e:
                # We don't log out the stack trace here because it can spam the
                # logs heavily if the requests keep failing.  The errors are
                # very descriptive anyway.
                _log.warning("Low-level HTTP error, reconnecting to "
                             "etcd: %r.", e)
                self._reconnect()
            except (EtcdClusterIdChanged, EtcdEventIndexCleared) as e:
                _log.warning("Out of sync with etcd (%r).  Reconnecting "
                             "for full sync.", e)
                raise ResyncRequired()
            except EtcdException as e:
                # Sadly, python-etcd doesn't have a dedicated exception
                # for the "no more machines in cluster" error. Parse the
                # message:
                msg = (e.message or "unknown").lower()
                # Limit our retry rate in case etcd is down.
                gevent.sleep(1)
                if "no more machines" in msg:
                    # This error comes from python-etcd when it can't
                    # connect to any servers.  When we retry, it should
                    # reconnect.
                    # TODO: We should probably limit retries here and die
                    # That'd recover from errors caused by resource
                    # exhaustion/leaks.
                    _log.error("Connection to etcd failed, will retry.")
                else:
                    # Assume any other errors are fatal to our poll and
                    # do a full resync.
                    _log.exception("Unknown etcd error %r; doing resync.",
                                   e.message)
                    self._reconnect()
                    raise ResyncRequired()
            except:
                _log.exception("Unexpected exception during etcd poll")
                raise

        # Since we're polling on a subtree, we can't just increment
        # the index, we have to look at the modifiedIndex to spot
        # if we've skipped a lot of updates.
        self.next_etcd_index = max(self.next_etcd_index,
                                   response.modifiedIndex) + 1
        return response

    def _resync(self, response, **kwargs):
        """
        Force a resync.
        :raises ResyncRequired: always.
        """
        raise ResyncRequired()

    def on_ready_flag_set(self, response):
        if response.value != "true":
            raise ResyncRequired()

    def on_endpoint_set(self, response, hostname, orchestrator,
                        workload_id, endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        self.endpoint_ids_per_host[combined_id.host].add(combined_id)
        endpoint = parse_endpoint(self.config, combined_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint, async=True)

    def on_endpoint_delete(self, response, hostname, orchestrator,
                           workload_id, endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        self.endpoint_ids_per_host[combined_id.host].discard(combined_id)
        if not self.endpoint_ids_per_host[combined_id.host]:
            del self.endpoint_ids_per_host[combined_id.host]
        self.splitter.on_endpoint_update(combined_id, None, async=True)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        rules = parse_rules(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_rules_update(profile_id, rules, async=True)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        rules = parse_tags(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_tags_update(profile_id, rules, async=True)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_profile_delete(self, response, profile_id):
        """
        Handler for a whole profile deletion

        Fakes a tag and rules delete.
        """
        # Fake deletes for the rules and tags.
        _log.debug("Whole profile %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_host_delete(self, response, hostname):
        """
        Handler for deletion of a whole host directory.

        Deletes all the contained endpoints.
        """
        ids_on_that_host = self.endpoint_ids_per_host.pop(hostname, set())
        _log.info("Host %s deleted, removing %d endpoints",
                  hostname, len(ids_on_that_host))
        for endpoint_id in ids_on_that_host:
            self.splitter.on_endpoint_update(endpoint_id, None, async=True)
        self.on_host_ip_delete(response, hostname)

    def on_host_ip_set(self, response, hostname):
        if not self.config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        ip = parse_host_ip(hostname, response.value)
        if ip:
            self.ipv4_by_hostname[hostname] = ip
        else:
            _log.warning("Invalid IP for hostname %s: %s, treating as "
                         "deletion", hostname, response.value)
            self.ipv4_by_hostname.pop(hostname, None)
        self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                         async=True)

    def on_host_ip_delete(self, response, hostname):
        if not self.config.IP_IN_IP_ENABLED:
            _log.debug("Ignoring update to %s because IP-in-IP is disabled",
                       response.key)
            return
        if self.ipv4_by_hostname.pop(hostname, None):
            self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(),
                                             async=True)

    def on_ipam_v4_pool_set(self, response, pool_id):
        pool = parse_ipam_pool(pool_id, response.value)
        self.splitter.on_ipam_pool_update(pool_id, pool, async=True)

    def on_ipam_v4_pool_delete(self, response, pool_id):
        self.splitter.on_ipam_pool_update(pool_id, None, async=True)

    def on_orch_delete(self, response, hostname, orchestrator):
        """
        Handler for deletion of a whole host orchestrator directory.

        Deletes all the contained endpoints.
        """
        _log.info("Orchestrator dir %s/%s deleted, removing contained hosts",
                  hostname, orchestrator)
        orchestrator = intern(orchestrator.encode("utf8"))
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if endpoint_id.orchestrator == orchestrator:
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]

    def on_workload_delete(self, response, hostname, orchestrator,
                           workload_id):
        """
        Handler for deletion of a whole workload directory.

        Deletes all the contained endpoints.
        """
        _log.debug("Workload dir %s/%s/%s deleted, removing endpoints",
                   hostname, orchestrator, workload_id)
        orchestrator = intern(orchestrator.encode("utf8"))
        workload_id = intern(workload_id.encode("utf8"))
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if (endpoint_id.orchestrator == orchestrator and
                    endpoint_id.workload == workload_id):
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]
Esempio n. 14
0
class EtcdWatcher(Actor):
    def __init__(self, config):
        super(EtcdWatcher, self).__init__()
        self.config = config
        self.client = None
        self.my_config_dir = dir_for_per_host_config(self.config.HOSTNAME)

        # Initialized at poll start time.
        self.splitter = None
        self.next_etcd_index = None

        # Cache of known endpoints, used to resolve deletions of whole
        # directory trees.
        self.endpoint_ids_per_host = defaultdict(set)

        # Program the dispatcher with the paths we care about.  Since etcd
        # gives us a single event for a recursive directory deletion, we have
        # to handle deletes for lots of directories that we otherwise wouldn't
        # care about.
        self.dispatcher = PathDispatcher()
        reg = self.dispatcher.register
        # Top-level directories etc.  If these go away, stop polling and
        # resync.
        reg(VERSION_DIR, on_del=self._resync)
        reg(POLICY_DIR, on_del=self._resync)
        reg(PROFILE_DIR, on_del=self._resync)
        reg(CONFIG_DIR, on_del=self._resync)
        reg(READY_KEY, on_set=self.on_ready_flag_set, on_del=self._resync)
        # Profiles and their contents.
        reg(PER_PROFILE_DIR, on_del=self.on_profile_delete)
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        # Hosts, workloads and endpoints.
        reg(HOST_DIR, on_del=self._resync)
        reg(PER_HOST_DIR, on_del=self.on_host_delete)
        reg(WORKLOAD_DIR, on_del=self.on_host_delete)
        reg(PER_ORCH_DIR, on_del=self.on_orch_delete)
        reg(PER_WORKLOAD_DIR, on_del=self.on_workload_delete)
        reg(ENDPOINT_DIR, on_del=self.on_workload_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set, on_del=self.on_endpoint_delete)

    @actor_message()
    def load_config(self):
        _log.info("Waiting for etcd to be ready and for config to be present.")
        configured = False
        while not configured:
            self._reconnect()
            self.wait_for_ready()
            try:
                global_cfg = self.client.read(CONFIG_DIR)
                global_dict = _build_config_dict(global_cfg)

                try:
                    host_cfg = self.client.read(self.my_config_dir)
                    host_dict = _build_config_dict(host_cfg)
                except EtcdKeyNotFound:
                    # It is not an error for there to be no per-host config;
                    # default to empty.
                    _log.info("No configuration overrides for this node")
                    host_dict = {}
            except (EtcdKeyNotFound, EtcdException) as e:
                # Note: we don't log the stack trace because it's too spammy
                # and adds little.
                _log.error("Failed to read config. etcd may be down or the"
                           "data model may not be ready: %r. Will retry.", e)
                gevent.sleep(RETRY_DELAY)
                continue

            self.config.report_etcd_config(host_dict, global_dict)
            configured = True

    @actor_message()
    def wait_for_ready(self):
        _log.info("Waiting for etcd to be ready...")
        ready = False
        while not ready:
            try:
                db_ready = self.client.read(READY_KEY,
                                            timeout=10).value
            except EtcdKeyNotFound:
                _log.warn("Ready flag not present in etcd; felix will pause "
                          "updates until the orchestrator sets the flag.")
                db_ready = "false"
            except EtcdException as e:
                # Note: we don't log the
                _log.error("Failed to retrieve ready flag from etcd (%r). "
                           "Felix will not receive updates until the "
                           "connection to etcd is restored.", e)
                db_ready = "false"

            if db_ready == "true":
                _log.info("etcd is ready.")
                ready = True
            else:
                _log.info("etcd not ready.  Will retry.")
                gevent.sleep(RETRY_DELAY)
                continue

    def _reconnect(self, copy_cluster_id=True):
        etcd_addr = self.config.ETCD_ADDR
        if ":" in etcd_addr:
            host, port = etcd_addr.split(":")
            port = int(port)
        else:
            host = etcd_addr
            port = 4001
        if self.client and copy_cluster_id:
            old_cluster_id = self.client.expected_cluster_id
            _log.info("(Re)connecting to etcd. Old etcd cluster ID was %s.",
                      old_cluster_id)
        else:
            _log.info("(Re)connecting to etcd. No previous cluster ID.")
            old_cluster_id = None
        self.client = etcd.Client(host=host, port=port,
                                  expected_cluster_id=old_cluster_id)

    @actor_message()
    def watch_etcd(self, update_splitter):
        """
        Loads the snapshot from etcd and then monitors etcd for changes.
        Posts events to the UpdateSplitter.

        :returns: Does not return.
        """
        self.splitter = update_splitter
        while True:
            _log.info("Reconnecting and loading snapshot from etcd...")
            self._reconnect(copy_cluster_id=False)
            self.wait_for_ready()

            try:
                # Load initial dump from etcd.  First just get all the
                # endpoints and profiles by id.  The response contains a
                # generation ID allowing us to then start polling for updates
                # without missing any.
                self.load_initial_dump()
                while True:
                    # Wait for something to change.
                    response = self._wait_for_etcd_event()
                    self.dispatcher.handle_event(response)
            except ResyncRequired:
                _log.info("Polling aborted, doing resync.")

    def load_initial_dump(self):
        """
        Loads a snapshot from etcd and passes it to the update splitter.

        :raises ResyncRequired: if the Ready flag is not set in the snapshot.
        """
        initial_dump = self.client.read(VERSION_DIR, recursive=True)
        _log.info("Loaded snapshot from etcd cluster %s, parsing it...",
                  self.client.expected_cluster_id)
        rules_by_id = {}
        tags_by_id = {}
        endpoints_by_id = {}
        self.endpoint_ids_per_host.clear()
        still_ready = False
        for child in initial_dump.children:
            profile_id, rules = parse_if_rules(child)
            if profile_id:
                rules_by_id[profile_id] = rules
                continue
            profile_id, tags = parse_if_tags(child)
            if profile_id:
                tags_by_id[profile_id] = tags
                continue
            endpoint_id, endpoint = parse_if_endpoint(self.config, child)
            if endpoint_id and endpoint:
                endpoints_by_id[endpoint_id] = endpoint
                self.endpoint_ids_per_host[endpoint_id.host].add(endpoint_id)
                continue

            # Double-check the flag hasn't changed since we read it before.
            if child.key == READY_KEY:
                if child.value == "true":
                    still_ready = True
                else:
                    _log.warning("Aborting resync because ready flag was"
                                 "unset since we read it.")
                    raise ResyncRequired()

        if not still_ready:
            _log.warn("Aborting resync; ready flag no longer present.")
            raise ResyncRequired()

        # Actually apply the snapshot. This does not return anything, but
        # just sends the relevant messages to the relevant threads to make
        # all the processing occur.
        _log.info("Snapshot parsed, passing to update splitter")
        self.splitter.apply_snapshot(rules_by_id,
                                     tags_by_id,
                                     endpoints_by_id,
                                     async=True)
        # The etcd_index is the high-water-mark for the snapshot, record that
        # we want to poll starting at the next index.
        self.next_etcd_index = initial_dump.etcd_index + 1

    def _wait_for_etcd_event(self):
        """
        Polls etcd until something changes.

        Retries on read timeouts and other non-fatal errors.

        :returns: The etcd response object for the change.
        :raises ResyncRequired: If we get out of sync with etcd or hit
            a fatal error.
        """
        response = None
        while not response:
            try:
                _log.debug("About to wait for etcd update %s",
                           self.next_etcd_index)
                response = self.client.read(VERSION_DIR,
                                            wait=True,
                                            waitIndex=self.next_etcd_index,
                                            recursive=True,
                                            timeout=Timeout(connect=10,
                                                            read=90),
                                            check_cluster_uuid=True)
                _log.debug("etcd response: %r", response)
            except (ReadTimeoutError, SocketTimeout) as e:
                # This is expected when we're doing a poll and nothing
                # happened. socket timeout doesn't seem to be caught by
                # urllib3 1.7.1.  Simply reconnect.
                _log.debug("Read from etcd timed out (%r), retrying.", e)
                # Force a reconnect to ensure urllib3 doesn't recycle the
                # connection.  (We were seeing this with urllib3 1.7.1.)
                self._reconnect()
            except (ConnectTimeoutError,
                    urllib3.exceptions.HTTPError,
                    httplib.HTTPException):
                _log.warning("Low-level HTTP error, reconnecting to "
                             "etcd.", exc_info=True)
                self._reconnect()
            except (EtcdClusterIdChanged, EtcdEventIndexCleared) as e:
                _log.warning("Out of sync with etcd (%r).  Reconnecting "
                             "for full sync.", e)
                raise ResyncRequired()
            except EtcdException as e:
                # Sadly, python-etcd doesn't have a dedicated exception
                # for the "no more machines in cluster" error. Parse the
                # message:
                msg = (e.message or "unknown").lower()
                # Limit our retry rate in case etcd is down.
                gevent.sleep(1)
                if "no more machines" in msg:
                    # This error comes from python-etcd when it can't
                    # connect to any servers.  When we retry, it should
                    # reconnect.
                    # TODO: We should probably limit retries here and die
                    # That'd recover from errors caused by resource
                    # exhaustion/leaks.
                    _log.error("Connection to etcd failed, will retry.")
                else:
                    # Assume any other errors are fatal to our poll and
                    # do a full resync.
                    _log.exception("Unknown etcd error %r; doing resync.",
                                   e.message)
                    self._reconnect()
                    raise ResyncRequired()
            except:
                _log.exception("Unexpected exception during etcd poll")
                raise

        # Since we're polling on a subtree, we can't just increment
        # the index, we have to look at the modifiedIndex to spot
        # if we've skipped a lot of updates.
        self.next_etcd_index = max(self.next_etcd_index,
                                   response.modifiedIndex) + 1
        return response

    def _resync(self, response, **kwargs):
        """
        Force a resync.
        :raises ResyncRequired: always.
        """
        raise ResyncRequired()

    def on_ready_flag_set(self, response):
        if response.value != "true":
            raise ResyncRequired()

    def on_endpoint_set(self, response, hostname, orchestrator,
                        workload_id, endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        self.endpoint_ids_per_host[combined_id.host].add(combined_id)
        endpoint = parse_endpoint(self.config, endpoint_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint, async=True)

    def on_endpoint_delete(self, response, hostname, orchestrator,
                           workload_id, endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = EndpointId(hostname, orchestrator, workload_id,
                                 endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        self.endpoint_ids_per_host[combined_id.host].discard(combined_id)
        if not self.endpoint_ids_per_host[combined_id.host]:
            del self.endpoint_ids_per_host[combined_id.host]
        self.splitter.on_endpoint_update(combined_id, None, async=True)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        rules = parse_rules(profile_id, response.value)
        self.splitter.on_rules_update(profile_id, rules, async=True)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        rules = parse_tags(profile_id, response.value)
        self.splitter.on_tags_update(profile_id, rules, async=True)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_profile_delete(self, response, profile_id):
        """
        Handler for a whole profile deletion

        Fakes a tag and rules delete.
        """
        # Fake deletes for the rules and tags.
        _log.debug("Whole profile %s deleted", profile_id)
        self.splitter.on_rules_update(profile_id, None, async=True)
        self.splitter.on_tags_update(profile_id, None, async=True)

    def on_host_delete(self, response, hostname):
        """
        Handler for deletion of a whole host directory.

        Deletes all the contained endpoints.
        """
        ids_on_that_host = self.endpoint_ids_per_host.pop(hostname, set())
        _log.info("Host %s deleted, removing %d endpoints",
                  hostname, len(ids_on_that_host))
        for endpoint_id in ids_on_that_host:
            self.splitter.on_endpoint_update(endpoint_id, None, async=True)

    def on_orch_delete(self, response, hostname, orchestrator):
        """
        Handler for deletion of a whole host orchestrator directory.

        Deletes all the contained endpoints.
        """
        _log.info("Orchestrator dir %s/%s deleted, removing contained hosts",
                  hostname, orchestrator)
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if endpoint_id.orchestrator == orchestrator:
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]

    def on_workload_delete(self, response, hostname, orchestrator,
                           workload_id):
        """
        Handler for deletion of a whole workload directory.

        Deletes all the contained endpoints.
        """
        _log.debug("Workload dir %s/%s/%s deleted, removing endpoints",
                   hostname, orchestrator, workload_id)
        for endpoint_id in list(self.endpoint_ids_per_host[hostname]):
            if (endpoint_id.orchestrator == orchestrator and
                    endpoint_id.workload == workload_id):
                self.splitter.on_endpoint_update(endpoint_id, None, async=True)
                self.endpoint_ids_per_host[hostname].discard(endpoint_id)
        if not self.endpoint_ids_per_host[hostname]:
            del self.endpoint_ids_per_host[hostname]