Exemplo n.º 1
0
class TestRefHelper(TestReferenceManager):
    def setUp(self):
        super(TestRefHelper, self).setUp()
        self._rh = RefHelper(self._rm,
                             self._rm,
                             self._rm.ready_callback)

    def test_no_refs(self):
        # With no references, we're ready but haven't been notified
        self.assertFalse(self._rm._ready_called)
        self.assertTrue(self._rh.ready)

        # Discarding non-existent references is allowed
        self._rh.discard_ref("foo")

    def test_acquire_discard_1(self):
        # Acquire a reference to 'foo' - it won't be ready immediately
        self._rh.acquire_ref("foo")
        self.assertFalse(self._rm._ready_called)
        self.assertFalse(self._rh.ready)

        # Spin the actor framework - we become ready
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)
        self.assertTrue(self._rm._ready_called)
        self.assertTrue(self._rh.ready)
        self.assertEqual(next(self._rh.iteritems())[0], "foo")

        # Acquiring an already-acquired reference is idempotent
        self._rh.acquire_ref("foo")
        self.assertTrue(self._rh.ready)

        # Discard the reference
        self._rh.discard_ref("foo")
        _, obj = self.call_via_cb(self._rm.get_and_incref, "baz", async=True)
        self.assertTrue(self._rh.ready)

    def test_sync_acquire_discard(self):
        # Acquire a reference and discard it before it's become ready
        self._rh.acquire_ref("foo")
        self.assertFalse(self._rh.ready)

        self._rh.discard_ref("foo")
        self.assertTrue(self._rh.ready)

        # Spin the actor framework
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)

    def test_acquire_discard_2(self):
        # Acquire two references
        self._rh.acquire_ref("foo")
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)
        self._rh.acquire_ref("baz")
        self.assertFalse(self._rh.ready)
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar2", async=True)
        acq_ids = list(key for key, value in self._rh.iteritems())
        self.assertItemsEqual(acq_ids, ["foo", "baz"])
        self.assertTrue(self._rh.ready)

        # Discard them all!
        self._rh.discard_all()
Exemplo n.º 2
0
class TestRefHelper(TestReferenceManager):
    def setUp(self):
        super(TestRefHelper, self).setUp()
        self._rh = RefHelper(self._rm,
                             self._rm,
                             self._rm.ready_callback)

    def test_no_refs(self):
        # With no references, we're ready but haven't been notified
        self.assertFalse(self._rm._ready_called)
        self.assertTrue(self._rh.ready)

        # Discarding non-existent references is allowed
        self._rh.discard_ref("foo")

    def test_acquire_discard_1(self):
        # Acquire a reference to 'foo' - it won't be ready immediately
        self._rh.acquire_ref("foo")
        self.assertFalse(self._rm._ready_called)
        self.assertFalse(self._rh.ready)

        # Spin the actor framework - we become ready
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)
        self.assertTrue(self._rm._ready_called)
        self.assertTrue(self._rh.ready)
        self.assertEqual(next(self._rh.iteritems())[0], "foo")

        # Acquiring an already-acquired reference is idempotent
        self._rh.acquire_ref("foo")
        self.assertTrue(self._rh.ready)

        # Discard the reference
        self._rh.discard_ref("foo")
        _, obj = self.call_via_cb(self._rm.get_and_incref, "baz", async=True)
        self.assertTrue(self._rh.ready)

    def test_sync_acquire_discard(self):
        # Acquire a reference and discard it before it's become ready
        self._rh.acquire_ref("foo")
        self.assertFalse(self._rh.ready)

        self._rh.discard_ref("foo")
        self.assertTrue(self._rh.ready)

        # Spin the actor framework
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)

    def test_acquire_discard_2(self):
        # Acquire two references
        self._rh.acquire_ref("foo")
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar", async=True)
        self._rh.acquire_ref("baz")
        self.assertFalse(self._rh.ready)
        _, obj = self.call_via_cb(self._rm.get_and_incref, "bar2", async=True)
        acq_ids = list(key for key, value in self._rh.iteritems())
        self.assertItemsEqual(acq_ids, ["foo", "baz"])
        self.assertTrue(self._rh.ready)

        # Discard them all!
        self._rh.discard_all()
Exemplo n.º 3
0
class LocalEndpoint(RefCountedActor):
    def __init__(self, config, combined_id, ip_type, iptables_updater, dispatch_chains, rules_manager, status_reporter):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" % (combined_id.endpoint, ip_type))
        assert isinstance(dispatch_chains, DispatchChains)
        assert isinstance(rules_manager, RulesManager)

        self.config = config
        self.iptables_generator = config.plugins["iptables_generator"]

        self.combined_id = combined_id
        self.ip_type = ip_type

        # Other actors we need to talk to.
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.status_reporter = status_reporter

        # Helper for acquiring/releasing profiles.
        self.rules_ref_helper = RefHelper(self, rules_manager, self._on_profiles_ready)

        # Per-batch state.
        self._pending_endpoint = None
        self._endpoint_update_pending = False
        self._mac_changed = False

        # Current endpoint data.
        self.endpoint = None

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track the success/failure of our dataplane programming.
        self._chains_programmed = False
        self._iptables_in_sync = False
        self._device_in_sync = False

        # Oper-state of the Linux interface.
        self._device_is_up = None  # Unknown

        # Our last status report.  Used for de-dupe.
        self._last_status = None

        # One-way flags to indicate that we should clean up/have cleaned up.
        self._unreferenced = False
        self._added_to_dispatch_chains = False
        self._cleaned_up = False

    @property
    def nets_key(self):
        if self.ip_type == IPV4:
            return "ipv4_nets"
        else:
            return "ipv6_nets"

    @property
    def _admin_up(self):
        return not self._unreferenced and self.endpoint and self.endpoint.get("state") == "active"

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str]|NoneType endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        assert not self._unreferenced, "Update after being unreferenced"

        # Store off the update, to be handled in _finish_msg_batch.
        self._pending_endpoint = endpoint
        self._endpoint_update_pending = True
        if force_reprogram:
            self._iptables_in_sync = False
            self._device_in_sync = False

    @actor_message()
    def on_interface_update(self, iface_up):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick: %s", self.combined_id, iface_up)
        assert not self._unreferenced, "Interface kick after unreference"

        # Use a flag so that we coalesce any duplicate updates in
        # _finish_msg_batch.
        self._device_in_sync = False
        self._device_is_up = iface_up

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._unreferenced, "Duplicate on_unreferenced() call"

        # We should be deleted before being unreferenced.
        assert self.endpoint is None or (self._pending_endpoint is None and self._endpoint_update_pending)

        # Defer the processing to _finish_msg_batch.
        self._unreferenced = True

    def _finish_msg_batch(self, batch, results):
        if self._cleaned_up:
            # This can occur if we get a callback from a profile via the
            # RefHelper after we've already been deleted.
            _log.warn("_finish_msg_batch() called after being unreferenced," "ignoring.  Batch: %s", batch)
            return

        if self._endpoint_update_pending:
            # Copy the pending update into our data structures.  May work out
            # that iptables or the device is now out of sync.
            _log.debug("Endpoint update pending: %s", self._pending_endpoint)
            self._apply_endpoint_update()

        if not self._iptables_in_sync:
            # Try to update iptables, if successful, will set the
            # _iptables_in_sync flag.
            _log.debug("iptables is out-of-sync, trying to update it")
            if self._admin_up:
                _log.info("%s is 'active', (re)programming chains.", self)
                self._update_chains()
            elif self._chains_programmed:
                # No longer active but our chains are still in place.  Remove
                # them.
                _log.info("%s is not 'active', removing chains.", self)
                self._remove_chains()

        if not self._device_in_sync and self._iface_name:
            # Try to update the device configuration.  If successful, will set
            # the _device_in_sync flag.
            if self._admin_up:
                # Endpoint is supposed to be live, try to configure it.
                _log.debug("Device is out-of-sync, trying to configure it")
                self._configure_interface()
            else:
                # We've been deleted, de-configure the interface.
                _log.debug("Device is out-of-sync, trying to de-configure it")
                self._deconfigure_interface()

        if self._unreferenced:
            # Endpoint is being removed, clean up...
            _log.debug("Cleaning up after endpoint unreferenced")
            self.dispatch_chains.on_endpoint_removed(self._iface_name, async=True)
            self.rules_ref_helper.discard_all()
            self._notify_cleanup_complete()
            self._cleaned_up = True
        elif not self._added_to_dispatch_chains and self._iface_name:
            # This must be the first batch, add ourself to the dispatch chains.
            _log.debug("Adding endpoint to dispatch chain")
            self.dispatch_chains.on_endpoint_added(self._iface_name, async=True)
            self._added_to_dispatch_chains = True

        # If changed, report our status back to the datastore.
        self._maybe_update_status()

    def _maybe_update_status(self):
        if not self.config.REPORT_ENDPOINT_STATUS:
            _log.debug("Status reporting disabled. Not reporting status.")
            return

        if not self._device_is_up:
            # Check this first because we won't try to sync the device if it's
            # oper down.
            reason = "Interface is oper-down"
            status = ENDPOINT_STATUS_DOWN
        elif not self.endpoint:
            reason = "No endpoint data"
            status = ENDPOINT_STATUS_DOWN
        elif not self._iptables_in_sync:
            # Definitely an error, the iptables command failed.
            reason = "Failed to update iptables"
            status = ENDPOINT_STATUS_ERROR
        elif not self._device_in_sync:
            reason = "Failed to update device config"
            status = ENDPOINT_STATUS_ERROR
        elif not self._admin_up:
            # After the tests for being in sync because we handle admin down
            # by removing the configuration from the dataplane.
            reason = "Endpoint is admin down"
            status = ENDPOINT_STATUS_DOWN
        else:
            # All checks passed.  We're up!
            reason = "In sync and device is up"
            status = ENDPOINT_STATUS_UP

        if self._unreferenced or status != self._last_status:
            _log.info("%s: updating status to %s", reason, status)
            if self._unreferenced:
                _log.debug("Unreferenced, reporting status = None")
                status_dict = None
            else:
                _log.debug("Endpoint oper state changed to %s", status)
                status_dict = {"status": status}
            self.status_reporter.on_endpoint_status_changed(self.combined_id, self.ip_type, status_dict, async=True)
            self._last_status = status

    def _apply_endpoint_update(self):
        pending_endpoint = self._pending_endpoint
        if pending_endpoint == self.endpoint:
            _log.debug("Endpoint hasn't changed, nothing to do")
            return

        if pending_endpoint:
            # Update/create.
            if pending_endpoint["mac"] != self._mac:
                # Either we have not seen this MAC before, or it has changed.
                _log.debug("Endpoint MAC changed to %s", pending_endpoint["mac"])
                self._mac = pending_endpoint["mac"]
                self._mac_changed = True
                # MAC change requires refresh of iptables rules and ARP table.
                self._iptables_in_sync = False
                self._device_in_sync = False

            if self.endpoint is None:
                # This is the first time we have seen the endpoint, so extract
                # the interface name and endpoint ID.
                self._iface_name = pending_endpoint["name"]
                self._suffix = interface_to_suffix(self.config, self._iface_name)
                _log.debug("Learned interface name/suffix: %s/%s", self._iface_name, self._suffix)
                # First time through, need to program everything.
                self._iptables_in_sync = False
                self._device_in_sync = False
                if self._device_is_up is None:
                    _log.debug("Learned interface name, checking if device " "is up.")
                    self._device_is_up = devices.interface_exists(self._iface_name) and devices.interface_up(
                        self._iface_name
                    )

            # Check if the profile ID or IP addresses have changed, requiring
            # a refresh of the dataplane.
            profile_ids = set(pending_endpoint.get("profile_ids", []))
            if profile_ids != self.rules_ref_helper.required_refs:
                # Profile ID update required iptables update but not device
                # update.
                _log.debug("Profile IDs changed, need to update iptables")
                self._iptables_in_sync = False

            # Check for changes to values that require a device update.
            if self.endpoint:
                if self.endpoint.get("state") != pending_endpoint.get("state"):
                    _log.debug("Desired interface state updated.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                if self.endpoint[self.nets_key] != pending_endpoint[self.nets_key]:
                    # IP addresses have changed, need to update the routing
                    # table.
                    _log.debug("IP addresses changed, need to update routing")
                    self._device_in_sync = False
        else:
            # Delete of the endpoint.  Need to resync everything.
            profile_ids = set()
            self._iptables_in_sync = False
            self._device_in_sync = False

        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self.rules_ref_helper.replace_all(profile_ids)

        self.endpoint = pending_endpoint
        self._endpoint_update_pending = False
        self._pending_endpoint = None

    def _update_chains(self):
        updates, deps = self.iptables_generator.endpoint_updates(
            IP_TYPE_TO_VERSION[self.ip_type],
            self.combined_id.endpoint,
            self._suffix,
            self._mac,
            self.endpoint["profile_ids"],
        )
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            try:
                self.iptables_updater.delete_chains(
                    self.iptables_generator.endpoint_chain_names(self._suffix), async=False
                )
            except FailedSystemCall:
                _log.exception("Failed to remove chains after original " "failure")
        else:
            self._iptables_in_sync = True
            self._chains_programmed = True

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(self.iptables_generator.endpoint_chain_names(self._suffix), async=False)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
        else:
            self._iptables_in_sync = True
            self._chains_programmed = False

    def _configure_interface(self):
        """
        Applies sysctls and routes to the interface.
        """
        if not self._device_is_up:
            _log.debug("Device is known to be down, skipping attempt to " "configure it.")
            return
        try:
            if self.ip_type == IPV4:
                devices.configure_interface_ipv4(self._iface_name)
                reset_arp = self._mac_changed
            else:
                ipv6_gw = self.endpoint.get("ipv6_gateway", None)
                devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
                reset_arp = False

            ips = set()
            for ip in self.endpoint.get(self.nets_key, []):
                ips.add(futils.net_to_ip(ip))
            devices.set_routes(self.ip_type, ips, self._iface_name, self.endpoint["mac"], reset_arp=reset_arp)

        except (IOError, FailedSystemCall) as e:
            if not devices.interface_exists(self._iface_name):
                _log.info("Interface %s for %s does not exist yet", self._iface_name, self.combined_id)
            elif not devices.interface_up(self._iface_name):
                _log.info("Interface %s for %s is not up yet", self._iface_name, self.combined_id)
            else:
                # Either the interface flapped back up after the failure (in
                # which case we'll retry when the event reaches us) or there
                # was a genuine failure due to bad data or some other factor.
                #
                # Since the former is fairly common, we log at warning level
                # rather than error, which avoids false positives.
                _log.warning(
                    "Failed to configure interface %s for %s: %r.  "
                    "Either the interface is flapping or it is "
                    "misconfigured.",
                    self._iface_name,
                    self.combined_id,
                    e,
                )
        else:
            _log.info("Interface %s configured", self._iface_name)
            self._device_in_sync = True

    def _deconfigure_interface(self):
        """
        Removes routes from the interface.
        """
        try:
            devices.set_routes(self.ip_type, set(), self._iface_name, None)
        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                # Deleted under our feet - so the rules are gone.
                _log.info("Interface %s for %s already deleted", self._iface_name, self.combined_id)
            else:
                # An error deleting the routes. Log and continue.
                _log.exception("Cannot delete routes for interface %s for %s", self._iface_name, self.combined_id)
        else:
            _log.info("Interface %s deconfigured", self._iface_name)
            self._device_in_sync = True

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references", self.combined_id)

    def __str__(self):
        return "LocalEndpoint<%s,id=%s,iface=%s>" % (self.ip_type, self.combined_id, self._iface_name or "unknown")
Exemplo n.º 4
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, profile_id, ip_version, iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.id = profile_id
        self.ip_version = ip_version
        self._ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self._ipset_refs = RefHelper(self, ipset_mgr, self._on_ipsets_acquired)

        # Latest profile update - a profile dictionary.
        self._pending_profile = None
        # Currently-programmed profile dictionary.
        self._profile = None

        # State flags.
        self._notified_ready = False
        self._cleaned_up = False
        self._dead = False
        self._dirty = True

        self.chain_names = {
            "inbound": profile_to_chain_name("inbound", profile_id),
            "outbound": profile_to_chain_name("outbound", profile_id),
        }
        _log.info("Profile %s has chain names %s", profile_id,
                  self.chain_names)

    @actor_message()
    def on_profile_update(self, profile, force_reprogram=False):
        """
        Update the programmed iptables configuration with the new
        profile.

        :param dict[str]|NoneType profile: Dictionary of all profile data or
            None if profile is to be deleted.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert not self._dead, "Shouldn't receive updates after we're dead."
        self._pending_profile = profile
        self._dirty |= force_reprogram

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.
        """
        # Flag that we're dead and then let finish_msg_batch() do the cleanup.
        self._dead = True

    def _on_ipsets_acquired(self):
        """
        Callback from the RefHelper once it's acquired all the ipsets we
        need.

        This is called from an actor_message on our greenlet.
        """
        # Nothing to do here, if this is being called then we're already in
        # a message batch so _finish_msg_batch() will get called next.
        _log.info("All required ipsets acquired.")

    def _finish_msg_batch(self, batch, results):
        # Due to dependency management in IptablesUpdater, we don't need to
        # worry about programming the dataplane before notifying so do it on
        # this common code path.
        if not self._notified_ready:
            self._notify_ready()
            self._notified_ready = True

        if self._dead:
            # Only want to clean up once.  Note: we can get here a second time
            # if we had a pending ipset incref in-flight when we were asked
            # to clean up.
            if not self._cleaned_up:
                try:
                    _log.info("%s unreferenced, removing our chains", self)
                    self._delete_chains()
                    self._ipset_refs.discard_all()
                    self._ipset_refs = None  # Break ref cycle.
                    self._profile = None
                    self._pending_profile = None
                finally:
                    self._cleaned_up = True
                    self._notify_cleanup_complete()
        else:
            if self._pending_profile != self._profile:
                _log.debug("Profile data changed, updating ipset references.")
                old_tags = extract_tags_from_profile(self._profile)
                new_tags = extract_tags_from_profile(self._pending_profile)
                removed_tags = old_tags - new_tags
                added_tags = new_tags - old_tags
                for tag in removed_tags:
                    _log.debug("Queueing ipset for tag %s for decref", tag)
                    self._ipset_refs.discard_ref(tag)
                for tag in added_tags:
                    _log.debug("Requesting ipset for tag %s", tag)
                    self._ipset_refs.acquire_ref(tag)
                self._dirty = True
                self._profile = self._pending_profile

            if (self._dirty and self._ipset_refs.ready
                    and self._pending_profile is not None):
                _log.info("Ready to program rules for %s", self.id)
                try:
                    self._update_chains()
                except FailedSystemCall as e:
                    _log.error("Failed to program profile chain %s; error: %r",
                               self, e)
                else:
                    self._dirty = False
            elif not self._dirty:
                _log.debug("No changes to program.")
            elif self._pending_profile is None:
                _log.info("Profile is None, removing our chains")
                try:
                    self._delete_chains()
                except FailedSystemCall:
                    _log.exception("Failed to delete chains for profile %s",
                                   self.id)
                else:
                    self._dirty = False
            elif not self._ipset_refs.ready:
                _log.info("Can't program rules %s yet, waiting on ipsets",
                          self.id)

    def _delete_chains(self):
        """
        Removes our chains from the dataplane, blocks until complete.
        """
        chains = set(self.chain_names.values())
        # Need to block here: have to wait for chains to be deleted
        # before we can decref our ipsets.
        self._iptables_updater.delete_chains(chains, async=False)

    def _update_chains(self):
        """
        Updates the chains in the dataplane.

        Blocks until the update is complete.

        On entry, self._pending_profile must not be None.

        :raises FailedSystemCall: if the update fails.
        """
        _log.info("%s Programming iptables with our chains.", self)
        assert self._pending_profile is not None, \
               "_update_chains called with no _pending_profile"
        updates = {}
        for direction in ("inbound", "outbound"):
            chain_name = self.chain_names[direction]
            _log.info("Updating %s chain %r for profile %s", direction,
                      chain_name, self.id)
            _log.debug("Profile %s: %s", self.id, self._profile)
            rules_key = "%s_rules" % direction
            new_rules = self._pending_profile.get(rules_key, [])
            tag_to_ip_set_name = {}
            for tag, ipset in self._ipset_refs.iteritems():
                tag_to_ip_set_name[tag] = ipset.ipset_name
            updates[chain_name] = rules_to_chain_rewrite_lines(
                chain_name,
                new_rules,
                self.ip_version,
                tag_to_ip_set_name,
                on_allow="RETURN",
                comment_tag=self.id)
        _log.debug("Queueing programming for rules %s: %s", self.id, updates)
        self._iptables_updater.rewrite_chains(updates, {}, async=False)
Exemplo n.º 5
0
class LocalEndpoint(RefCountedActor):

    def __init__(self, config, combined_id, ip_type, iptables_updater,
                 dispatch_chains, rules_manager):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
                                            (combined_id.endpoint, ip_type))
        assert isinstance(dispatch_chains, DispatchChains)
        assert isinstance(rules_manager, RulesManager)

        self.config = config

        self.combined_id = combined_id
        self.ip_type = ip_type

        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager

        self.rules_ref_helper = RefHelper(self, rules_manager,
                                          self._on_profiles_ready)

        self._pending_endpoint = None
        self._endpoint_update_pending = False
        self._mac_changed = False

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self.endpoint = None
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Keep track of which dependencies we're missing.
        self._missing_deps = self._calculate_missing_deps()

        # Track the success/failure of our dataplane programming.
        self._iptables_in_sync = False
        self._device_in_sync = False

        # One-way flags to indicate that we should clean up/have cleaned up.
        self._unreferenced = False
        self._added_to_dispatch_chains = False
        self._cleaned_up = False

    @property
    def nets_key(self):
        if self.ip_type == IPV4:
            return "ipv4_nets"
        else:
            return "ipv6_nets"

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str]|NoneType endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        assert not self._unreferenced, "Update after being unreferenced"

        # Store off the update, to be handled in _finish_msg_batch.
        self._pending_endpoint = endpoint
        self._endpoint_update_pending = True
        if force_reprogram:
            self._iptables_in_sync = False
            self._device_in_sync = False

    @actor_message()
    def on_interface_update(self):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick", self.combined_id)

        # Use a flag so that we coalesce any duplicate updates in
        # _finish_msg_batch.
        self._device_in_sync = False

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)

        # We should be deleted before being unreferenced.
        assert self.endpoint is None or (self._pending_endpoint is None and
                                         self._endpoint_update_pending)

        # Defer the processing to _finish_msg_batch.
        self._unreferenced = True

    def _finish_msg_batch(self, batch, results):
        if self._cleaned_up:
            # We could just ignore this but it suggests that the
            # EndpointManager is bugged.
            raise AssertionError(
                "Unexpected update to %s (%s) after being unreferenced" %
                (self, self.__dict__)
            )

        if self._endpoint_update_pending:
            # Copy the pending update into our data structures.  May work out
            # that iptables or the device is now out of sync.
            _log.debug("Endpoint update pending: %s", self._pending_endpoint)
            self._apply_endpoint_update()

        if not self._iptables_in_sync:
            # Try to update iptables, if successful, will set the
            # _iptables_in_sync flag.
            _log.debug("iptables is out-of-sync, trying to update it")
            self._maybe_update_iptables()

        if not self._device_in_sync and self._iface_name:
            # Try to update the device configuration.  If successful, will set
            # the _device_in_sync flag.
            if self.endpoint:
                # Endpoint is supposed to be live, try to configure it.
                _log.debug("Device is out-of-sync, trying to configure it")
                self._configure_interface()
            else:
                # We've been deleted, de-configure the interface.
                _log.debug("Device is out-of-sync, trying to de-configure it")
                self._deconfigure_interface()

        if self._unreferenced:
            # Endpoint is being removed, clean up...
            _log.debug("Cleaning up after endpoint unreferenced")
            self.dispatch_chains.on_endpoint_removed(self._iface_name,
                                                     async=True)
            self.rules_ref_helper.discard_all()
            self._notify_cleanup_complete()
            self._cleaned_up = True
        elif not self._added_to_dispatch_chains:
            # This must be the first batch, add ourself to the dispatch chains.
            _log.debug("Adding endpoint to dispatch chain")
            self.dispatch_chains.on_endpoint_added(self._iface_name,
                                                   async=True)
            self._added_to_dispatch_chains = True

    def _apply_endpoint_update(self):
        pending_endpoint = self._pending_endpoint
        if pending_endpoint == self.endpoint:
            _log.debug("Endpoint hasn't changed, nothing to do")
            return

        if pending_endpoint:
            # Update/create.
            if pending_endpoint['mac'] != self._mac:
                # Either we have not seen this MAC before, or it has changed.
                _log.debug("Endpoint MAC changed to %s",
                           pending_endpoint["mac"])
                self._mac = pending_endpoint['mac']
                self._mac_changed = True
                # MAC change requires refresh of iptables rules and ARP table.
                self._iptables_in_sync = False
                self._device_in_sync = False

            if self.endpoint is None:
                # This is the first time we have seen the endpoint, so extract
                # the interface name and endpoint ID.
                self._iface_name = pending_endpoint["name"]
                self._suffix = interface_to_suffix(self.config,
                                                   self._iface_name)
                _log.debug("Learned interface name/suffix: %s/%s",
                           self._iface_name, self._suffix)
                # First time through, need to program everything.
                self._iptables_in_sync = False
                self._device_in_sync = False

            # Check if the profile ID or IP addresses have changed, requiring
            # a refresh of the dataplane.
            profile_ids = set(pending_endpoint.get("profile_ids", []))
            if profile_ids != self.rules_ref_helper.required_refs:
                # Profile ID update required iptables update but not device
                # update.
                _log.debug("Profile IDs changed, need to update iptables")
                self._iptables_in_sync = False
            if (self.endpoint and
                    (self.endpoint[self.nets_key] !=
                     pending_endpoint[self.nets_key])):
                # IP addresses have changed, need to update the routing table.
                _log.debug("IP addresses changed, need to update routing")
                self._device_in_sync = False
        else:
            # Delete of the endpoint.  Need to resync everything.
            profile_ids = set()
            self._iptables_in_sync = False
            self._device_in_sync = False

        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self.rules_ref_helper.replace_all(profile_ids)

        self.endpoint = pending_endpoint
        self._endpoint_update_pending = False
        self._pending_endpoint = None

    def _calculate_missing_deps(self):
        """
        Returns a list of missing dependencies.
        """
        missing_deps = []
        if not self.endpoint:
            missing_deps.append("endpoint")
        elif self.endpoint.get("state", "active") != "active":
            missing_deps.append("endpoint active")
        elif not self.endpoint.get("profile_ids"):
            missing_deps.append("profile")
        return missing_deps

    def _maybe_update_iptables(self):
        """
        Update the relevant programming for this endpoint.
        """
        old_missing_deps = self._missing_deps
        self._missing_deps = self._calculate_missing_deps()

        if not self._missing_deps:
            # We have all the dependencies we need to do the programming and
            # the caller has already worked out that iptables needs refreshing.
            _log.info("%s became ready to program.", self)
            self._update_chains()
        elif not old_missing_deps and self._missing_deps:
            # We were active but now we're not, withdraw the dispatch rule
            # and our chain.  We must do this to allow iptables to remove
            # the profile chain when we're being deleted.
            _log.debug("%s not ready, waiting on %s", self, self._missing_deps)
            _log.info("%s not ready.", self)
            self._remove_chains()

    def _update_chains(self):
        updates, deps = _get_endpoint_rules(self.combined_id.endpoint,
                                            self._suffix,
                                            self._mac,
                                            self.endpoint["profile_ids"])
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            try:
                self.iptables_updater.delete_chains(chain_names(self._suffix),
                                                    async=False)
            except FailedSystemCall:
                _log.exception("Failed to remove chains after original "
                               "failure")
        else:
            self._iptables_in_sync = True

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(chain_names(self._suffix),
                                                async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
        else:
            self._iptables_in_sync = True

    def _configure_interface(self):
        """
        Applies sysctls and routes to the interface.

        :param: bool mac_changed: Has the MAC address changed since it was last
                     configured? If so, we reconfigure ARP for the interface in
                     IPv4 (ARP does not exist for IPv6, which uses neighbour
                     solicitation instead).
        """
        try:
            if self.ip_type == IPV4:
                devices.configure_interface_ipv4(self._iface_name)
                reset_arp = self._mac_changed
            else:
                ipv6_gw = self.endpoint.get("ipv6_gateway", None)
                devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
                reset_arp = False

            ips = set()
            for ip in self.endpoint.get(self.nets_key, []):
                ips.add(futils.net_to_ip(ip))
            devices.set_routes(self.ip_type, ips,
                               self._iface_name,
                               self.endpoint["mac"],
                               reset_arp=reset_arp)

        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                _log.info("Interface %s for %s does not exist yet",
                          self._iface_name, self.combined_id)
            elif not devices.interface_up(self._iface_name):
                _log.info("Interface %s for %s is not up yet",
                          self._iface_name, self.combined_id)
            else:
                # Interface flapped back up after we failed?
                _log.warning("Failed to configure interface %s for %s",
                             self._iface_name, self.combined_id)
        else:
            _log.info("Interface %s configured", self._iface_name)
            self._device_in_sync = True

    def _deconfigure_interface(self):
        """
        Removes routes from the interface.
        """
        try:
            devices.set_routes(self.ip_type, set(), self._iface_name, None)
        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                # Deleted under our feet - so the rules are gone.
                _log.debug("Interface %s for %s deleted",
                           self._iface_name, self.combined_id)
            else:
                # An error deleting the routes. Log and continue.
                _log.exception("Cannot delete routes for interface %s for %s",
                               self._iface_name, self.combined_id)
        else:
            _log.info("Interface %s deconfigured", self._iface_name)
            self._device_in_sync = True

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references",
                  self.combined_id)

    def __str__(self):
        return ("LocalEndpoint<%s,id=%s,iface=%s>" %
                (self.ip_type, self.combined_id,
                 self._iface_name or "unknown"))
Exemplo n.º 6
0
class LocalEndpoint(RefCountedActor):
    def __init__(self, config, combined_id, ip_type, iptables_updater, dispatch_chains, rules_manager):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" % (combined_id.endpoint, ip_type))
        assert isinstance(dispatch_chains, DispatchChains)
        assert isinstance(rules_manager, RulesManager)

        self.combined_id = combined_id

        self.config = config
        self.ip_type = ip_type
        self.ip_version = futils.IP_TYPE_TO_VERSION[ip_type]
        if self.ip_type == IPV4:
            self.nets_key = "ipv4_nets"
        else:
            self.nets_key = "ipv6_nets"
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.rules_ref_helper = RefHelper(self, rules_manager, self._on_profiles_ready)

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self.endpoint = None
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track whether the last attempt to program the dataplane succeeded.
        # We'll force a reprogram next time we get a kick.
        self._failed = False
        # And whether we've received an update since last time we programmed.
        self._dirty = False

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str] endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        mac_changed = False

        if not endpoint and not self.endpoint:
            # First time we have been called, but it's a delete! Maybe some
            # odd timing window, but we have nothing to tidy up.
            return

        if endpoint and endpoint["mac"] != self._mac:
            # Either we have not seen this MAC before, or it has changed.
            self._mac = endpoint["mac"]
            mac_changed = True

        if endpoint and not self.endpoint:
            # This is the first time we have seen the endpoint, so extract the
            # interface name and endpoint ID.
            self._iface_name = endpoint["name"]
            self._suffix = interface_to_suffix(self.config, self._iface_name)

        was_ready = self._ready

        # Activate the required profile IDs (and deactivate any that we no
        # longer need).
        if endpoint:
            new_profile_ids = set(endpoint["profile_ids"])
        else:
            new_profile_ids = set()
        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self.rules_ref_helper.replace_all(new_profile_ids)

        if endpoint != self.endpoint or force_reprogram:
            self._dirty = True

        # Store off the endpoint we were passed.
        self.endpoint = endpoint

        if endpoint:
            # Configure the network interface; may fail if not there yet (in
            # which case we'll just do it when the interface comes up).
            self._configure_interface(mac_changed)
        else:
            # Remove the network programming.
            self._deconfigure_interface()

        self._maybe_update(was_ready)
        _log.debug("%s finished processing update", self)

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._ready, "Should be deleted before being unreffed."
        # Removing all profile refs should have been done already but be
        # defensive.
        self.rules_ref_helper.discard_all()
        self._notify_cleanup_complete()

    @actor_message()
    def on_interface_update(self):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick", self.combined_id)
        self._configure_interface()

    @property
    def _missing_deps(self):
        """
        Returns a list of missing dependencies.
        """
        missing_deps = []
        if not self.endpoint:
            missing_deps.append("endpoint")
        elif self.endpoint.get("state", "active") != "active":
            missing_deps.append("endpoint active")
        elif not self.endpoint.get("profile_ids"):
            missing_deps.append("profile")
        return missing_deps

    @property
    def _ready(self):
        """
        Returns whether this LocalEndpoint has any dependencies preventing it
        programming its rules.
        """
        return not self._missing_deps

    def _maybe_update(self, was_ready):
        """
        Update the relevant programming for this endpoint.

        :param bool was_ready: Whether this endpoint has already been
                               successfully configured.
        """
        is_ready = self._ready
        if not is_ready:
            _log.debug("%s not ready, waiting on %s", self, self._missing_deps)
        if self._failed or self._dirty or is_ready != was_ready:
            ifce_name = self._iface_name
            if is_ready:
                # We've got all the info and everything is active.
                if self._failed:
                    _log.warn("Retrying programming after a failure")
                self._failed = False  # Ready to try again...
                _log.info("%s became ready to program.", self)
                self._update_chains()
                self.dispatch_chains.on_endpoint_added(self._iface_name, async=True)
            else:
                # We were active but now we're not, withdraw the dispatch rule
                # and our chain.  We must do this to allow iptables to remove
                # the profile chain.
                _log.info("%s became unready.", self)
                self._failed = False  # Don't care any more.

                self.dispatch_chains.on_endpoint_removed(ifce_name, async=True)
                self._remove_chains()
            self._dirty = False

    def _update_chains(self):
        updates, deps = _get_endpoint_rules(
            self.combined_id.endpoint, self._suffix, self.endpoint["mac"], self.endpoint["profile_ids"]
        )
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            self._failed = True
            self._remove_chains()

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(chain_names(self._suffix), async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
            self._failed = True

    def _configure_interface(self, mac_changed=True):
        """
        Applies sysctls and routes to the interface.

        :param: bool mac_changed: Has the MAC address changed since it was last
                     configured? If so, we reconfigure ARP for the interface in
                     IPv4 (ARP does not exist for IPv6, which uses neighbour
                     solicitation instead).
        """
        try:
            if self.ip_type == IPV4:
                devices.configure_interface_ipv4(self._iface_name)
                reset_arp = mac_changed
            else:
                ipv6_gw = self.endpoint.get("ipv6_gateway", None)
                devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
                reset_arp = False

            ips = set()
            for ip in self.endpoint.get(self.nets_key, []):
                ips.add(futils.net_to_ip(ip))
            devices.set_routes(self.ip_type, ips, self._iface_name, self.endpoint["mac"], reset_arp=reset_arp)

        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                _log.info("Interface %s for %s does not exist yet", self._iface_name, self.combined_id)
            elif not devices.interface_up(self._iface_name):
                _log.info("Interface %s for %s is not up yet", self._iface_name, self.combined_id)
            else:
                # Interface flapped back up after we failed?
                _log.warning("Failed to configure interface %s for %s", self._iface_name, self.combined_id)

    def _deconfigure_interface(self):
        """
        Removes routes from the interface.
        """
        try:
            devices.set_routes(self.ip_type, set(), self._iface_name, None)
        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                # Deleted under our feet - so the rules are gone.
                _log.debug("Interface %s for %s deleted", self._iface_name, self.combined_id)
            else:
                # An error deleting the rules. Log and continue.
                _log.exception("Cannot delete rules for interface %s for %s", self._iface_name, self.combined_id)

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references", self.combined_id)

    def __str__(self):
        return "Endpoint<%s,id=%s,iface=%s>" % (self.ip_type, self.combined_id, self._iface_name or "unknown")
Exemplo n.º 7
0
class LocalEndpoint(RefCountedActor):
    def __init__(self, config, combined_id, ip_type, iptables_updater,
                 dispatch_chains, rules_manager, fip_manager, status_reporter):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        :param fip_manager: FloatingIPManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
                                            (combined_id.endpoint, ip_type))
        assert isinstance(rules_manager, RulesManager)

        self.config = config
        self.iptables_generator = config.plugins["iptables_generator"]

        self.combined_id = combined_id
        self.ip_type = ip_type

        # Other actors we need to talk to.
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.status_reporter = status_reporter
        self.fip_manager = fip_manager

        # Helper for acquiring/releasing profiles.
        self._rules_ref_helper = RefHelper(self, rules_manager,
                                           self._on_profiles_ready)

        # List of global policies that we care about.
        self._pol_ids_by_tier = OrderedDict()

        # List of explicit profile IDs that we've processed.
        self._explicit_profile_ids = None

        # Per-batch state.
        self._pending_endpoint = None
        self._endpoint_update_pending = False
        self._mac_changed = False
        # IPs that no longer belong to this endpoint and need cleaning up.
        self._removed_ips = set()

        # Current endpoint data.
        self.endpoint = None

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track the success/failure of our dataplane programming.
        self._chains_programmed = False
        self._iptables_in_sync = False
        self._device_in_sync = False
        self._profile_ids_dirty = False

        # Oper-state of the Linux interface.
        self._device_is_up = None  # Unknown

        # Our last status report.  Used for de-dupe.
        self._last_status = None

        # One-way flags to indicate that we should clean up/have cleaned up.
        self._unreferenced = False
        self._added_to_dispatch_chains = False
        self._cleaned_up = False

    @property
    def nets_key(self):
        if self.ip_type == IPV4:
            return "ipv4_nets"
        else:
            return "ipv6_nets"

    @property
    def nat_key(self):
        return nat_key(self.ip_type)

    @property
    def _admin_up(self):
        return (not self._unreferenced and self.endpoint
                and self.endpoint.get("state", "active") == "active")

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str]|NoneType endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        assert not self._unreferenced, "Update after being unreferenced"

        # Store off the update, to be handled in _finish_msg_batch.
        self._pending_endpoint = endpoint
        self._endpoint_update_pending = True
        if force_reprogram:
            self._iptables_in_sync = False
            self._device_in_sync = False

    @actor_message()
    def on_tiered_policy_update(self, pols_by_tier):
        """Called to update the ordered set of tiered policies that apply.

        :param OrderedDict pols_by_tier: Ordered mapping from tier name to
               list of policies to apply in that tier.
        """
        _log.debug("New policy IDs for %s: %s", self.combined_id, pols_by_tier)
        if pols_by_tier != self._pol_ids_by_tier:
            self._pol_ids_by_tier = pols_by_tier
            self._iptables_in_sync = False
            self._profile_ids_dirty = True

    @actor_message()
    def on_interface_update(self, iface_up):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick: %s", self.combined_id,
                  iface_up)
        assert not self._unreferenced, "Interface kick after unreference"

        # Use a flag so that we coalesce any duplicate updates in
        # _finish_msg_batch.
        self._device_in_sync = False
        self._device_is_up = iface_up

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._unreferenced, "Duplicate on_unreferenced() call"

        # We should be deleted before being unreferenced.
        assert self.endpoint is None or (self._pending_endpoint is None
                                         and self._endpoint_update_pending)

        # Defer the processing to _finish_msg_batch.
        self._unreferenced = True

    def _finish_msg_batch(self, batch, results):
        if self._cleaned_up:
            # This can occur if we get a callback from a profile via the
            # RefHelper after we've already been deleted.
            _log.warn(
                "_finish_msg_batch() called after being unreferenced,"
                "ignoring.  Batch: %s", batch)
            return

        if self._endpoint_update_pending:
            # Copy the pending update into our data structures.  May work out
            # that iptables or the device is now out of sync.
            _log.debug("Endpoint update pending: %s", self._pending_endpoint)
            self._apply_endpoint_update()

        if self._profile_ids_dirty:
            _log.debug("Profile references need updating")
            self._update_profile_references()

        if not self._iptables_in_sync:
            # Try to update iptables, if successful, will set the
            # _iptables_in_sync flag.
            _log.debug("iptables is out-of-sync, trying to update it")
            if self._admin_up:
                _log.info("%s is 'active', (re)programming chains.", self)
                self._update_chains()
            elif self._chains_programmed:
                # No longer active but our chains are still in place.  Remove
                # them.
                _log.info("%s is not 'active', removing chains.", self)
                self._remove_chains()

        if not self._device_in_sync and self._iface_name:
            # Try to update the device configuration.  If successful, will set
            # the _device_in_sync flag.
            if self._admin_up:
                # Endpoint is supposed to be live, try to configure it.
                _log.debug("Device is out-of-sync, trying to configure it")
                self._configure_interface()
            else:
                # We've been deleted, de-configure the interface.
                _log.debug("Device is out-of-sync, trying to de-configure it")
                self._deconfigure_interface()

        if self._removed_ips:
            # Some IPs have been removed, clean up conntrack.
            _log.debug("Some IPs were removed, cleaning up conntrack")
            self._clean_up_conntrack_entries()

        if self._unreferenced:
            # Endpoint is being removed, clean up...
            _log.debug("Cleaning up after endpoint unreferenced")
            self.dispatch_chains.on_endpoint_removed(self._iface_name,
                                                     async=True)
            self._rules_ref_helper.discard_all()
            self._notify_cleanup_complete()
            self._cleaned_up = True
        elif not self._added_to_dispatch_chains and self._iface_name:
            # This must be the first batch, add ourself to the dispatch chains.
            _log.debug("Adding endpoint to dispatch chain")
            self.dispatch_chains.on_endpoint_added(self._iface_name,
                                                   async=True)
            self._added_to_dispatch_chains = True

        # If changed, report our status back to the datastore.
        self._maybe_update_status()

    def _maybe_update_status(self):
        if not self.config.REPORT_ENDPOINT_STATUS:
            _log.debug("Status reporting disabled. Not reporting status.")
            return

        status, reason = self.oper_status()

        if self._unreferenced or status != self._last_status:
            _log.info("%s: updating status to %s", reason, status)
            if self._unreferenced:
                _log.debug("Unreferenced, reporting status = None")
                status_dict = None
            else:
                _log.debug("Endpoint oper state changed to %s", status)
                status_dict = {"status": status}
            self.status_reporter.on_endpoint_status_changed(
                self.combined_id,
                self.ip_type,
                status_dict,
                async=True,
            )
            self._last_status = status

    def oper_status(self):
        """Calculate the oper status of the endpoint.

        :returns a tuple containing the status and a human-readable reason."""
        if not self._device_is_up:
            # Check this first because we won't try to sync the device if it's
            # oper down.
            reason = "Interface is oper-down"
            status = ENDPOINT_STATUS_DOWN
        elif not self.endpoint:
            reason = "No endpoint data"
            status = ENDPOINT_STATUS_DOWN
        elif not self._iptables_in_sync:
            # Definitely an error, the iptables command failed.
            reason = "Failed to update iptables"
            status = ENDPOINT_STATUS_ERROR
        elif not self._device_in_sync:
            reason = "Failed to update device config"
            status = ENDPOINT_STATUS_ERROR
        elif not self._admin_up:
            # After the tests for being in sync because we handle admin down
            # by removing the configuration from the dataplane.
            reason = "Endpoint is admin down"
            status = ENDPOINT_STATUS_DOWN
        else:
            # All checks passed.  We're up!
            reason = "In sync and device is up"
            status = ENDPOINT_STATUS_UP
        return status, reason

    def _apply_endpoint_update(self):
        pending_endpoint = self._pending_endpoint
        if pending_endpoint == self.endpoint:
            _log.debug("Endpoint hasn't changed, nothing to do")
            return

        # Calculate the set of IPs that we had before this update.  Needed on
        # the update and delete code paths below.
        if self.endpoint:
            old_ips = set(
                futils.net_to_ip(n)
                for n in self.endpoint.get(self.nets_key, []))
            old_nat_mappings = self.endpoint.get(self.nat_key, [])
        else:
            old_ips = set()
            old_nat_mappings = []
        all_old_ips = old_ips | set([n["ext_ip"] for n in old_nat_mappings])

        if pending_endpoint:
            # Update/create.
            if pending_endpoint.get('mac') != self._mac:
                # Either we have not seen this MAC before, or it has changed.
                _log.debug("Endpoint MAC changed to %s",
                           pending_endpoint.get("mac"))
                self._mac = pending_endpoint.get('mac')
                self._mac_changed = True
                # MAC change requires refresh of iptables rules and ARP table.
                self._iptables_in_sync = False
                self._device_in_sync = False

            new_iface_name = pending_endpoint["name"]
            # Interface renames are handled in the EndpointManager by
            # simulating a delete then an add.  We shouldn't see one here.
            assert (self.endpoint is None
                    or self._iface_name == new_iface_name), (
                        "Unexpected change of interface name.")
            if self.endpoint is None:
                # This is the first time we have seen the endpoint, so extract
                # the interface name and endpoint ID.
                self._iface_name = new_iface_name
                self._suffix = interface_to_chain_suffix(
                    self.config, self._iface_name)
                _log.debug("Learned interface name/suffix: %s/%s",
                           self._iface_name, self._suffix)
                # First time through, need to program everything.
                self._iptables_in_sync = False
                self._device_in_sync = False
                if self._device_is_up is None:
                    _log.debug("Learned interface name, checking if device "
                               "is up.")
                    self._device_is_up = (
                        devices.interface_exists(self._iface_name)
                        and devices.interface_up(self._iface_name))

            # Check if the profile ID or IP addresses have changed, requiring
            # a refresh of the dataplane.
            profile_ids = set(pending_endpoint.get("profile_ids", []))
            if profile_ids != self._explicit_profile_ids:
                # Profile ID update requires iptables update but not device
                # update.
                _log.debug(
                    "Profile IDs changed from %s to %s, need to update "
                    "iptables", self._rules_ref_helper.required_refs,
                    profile_ids)
                self._explicit_profile_ids = profile_ids
                self._iptables_in_sync = False
                self._profile_ids_dirty = True

            # Check for changes to values that require a device update.
            if self.endpoint:
                if self.endpoint.get("state") != pending_endpoint.get("state"):
                    _log.debug("Desired interface state updated.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                new_ips = set(
                    futils.net_to_ip(n)
                    for n in pending_endpoint.get(self.nets_key, []))
                if old_ips != new_ips:
                    # IP addresses have changed, need to update the routing
                    # table.
                    _log.debug("IP addresses changed, need to update routing")
                    self._device_in_sync = False
                new_nat_mappings = pending_endpoint.get(self.nat_key, [])
                if old_nat_mappings != new_nat_mappings:
                    _log.debug("NAT mappings have changed, refreshing.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                all_new_ips = new_ips | set(
                    [n["ext_ip"] for n in new_nat_mappings])
                if all_old_ips != all_new_ips:
                    # Ensure we clean up any conntrack entries for IPs that
                    # have been removed.
                    _log.debug("Set of all IPs changed from %s to %s",
                               all_old_ips, all_new_ips)
                    self._removed_ips |= all_old_ips
                    self._removed_ips -= all_new_ips
        else:
            # Delete of the endpoint.  Need to resync everything.
            self._profile_ids_dirty = True
            self._iptables_in_sync = False
            self._device_in_sync = False
            self._removed_ips |= all_old_ips

        self.endpoint = pending_endpoint
        self._endpoint_update_pending = False
        self._pending_endpoint = None

    def _update_profile_references(self):
        if self.endpoint:
            # Combine the explicit profile IDs with the set of policy IDs
            # for our matching selectors.
            profile_ids = set(self._explicit_profile_ids)
            for pol_ids in self._pol_ids_by_tier.itervalues():
                profile_ids.update(pol_ids)
        else:
            profile_ids = set()
        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self._rules_ref_helper.replace_all(profile_ids)
        self._profile_ids_dirty = False

    def _update_chains(self):
        updates, deps = self._endpoint_updates()
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
            self.fip_manager.update_endpoint(self.combined_id,
                                             self.endpoint.get(
                                                 self.nat_key, None),
                                             async=True)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            try:
                self.iptables_updater.delete_chains(
                    self.iptables_generator.endpoint_chain_names(self._suffix),
                    async=False)
                self.fip_manager.update_endpoint(self.combined_id,
                                                 None,
                                                 async=True)
            except FailedSystemCall:
                _log.exception("Failed to remove chains after original "
                               "failure")
        else:
            self._iptables_in_sync = True
            self._chains_programmed = True

    def _endpoint_updates(self):
        raise NotImplementedError()  # pragma: no cover

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(
                self.iptables_generator.endpoint_chain_names(self._suffix),
                async=False)
            self.fip_manager.update_endpoint(self.combined_id,
                                             None,
                                             async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
        else:
            self._iptables_in_sync = True
            self._chains_programmed = False

    def _configure_interface(self):
        """
        Called to apply IP/sysctl config to the interface.

        This base implementation does nothing apart from setting the
        _device_in_sync flag.
        """
        _log.info("Interface %s configured", self._iface_name)
        self._device_in_sync = True

    def _deconfigure_interface(self):
        """
        Called to remove IP/sysctl config from the interface.

        This base implementation does nothing apart from setting the
        _device_in_sync flag.
        """
        _log.info("Interface %s deconfigured", self._iface_name)
        self._device_in_sync = True

    def _clean_up_conntrack_entries(self):
        """Removes conntrack entries for all the IPs in self._removed_ips."""
        _log.debug("Cleaning up conntrack for old IPs: %s", self._removed_ips)
        devices.remove_conntrack_flows(self._removed_ips,
                                       IP_TYPE_TO_VERSION[self.ip_type])
        # We could use self._removed_ips.clear() but it's hard to UT because
        # the UT sees the update.
        self._removed_ips = set()

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references",
                  self.combined_id)

    def __str__(self):
        return (
            "LocalEndpoint<%s,id=%s,iface=%s>" %
            (self.ip_type, self.combined_id, self._iface_name or "unknown"))
Exemplo n.º 8
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, iptables_generator, profile_id, ip_version,
                 iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.iptables_generator = iptables_generator
        self.id = profile_id
        self.ip_version = ip_version
        self._ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self._ipset_refs = RefHelper(self, ipset_mgr, self._on_ipsets_acquired)

        # Latest profile update - a profile dictionary.
        self._pending_profile = None
        # Currently-programmed profile dictionary.
        self._profile = None
        # The IDs of the tags and selector ipsets it requires.
        self._required_ipsets = set()

        # State flags.
        self._notified_ready = False
        self._cleaned_up = False
        self._dead = False
        self._dirty = True

    @actor_message()
    def on_profile_update(self, profile, force_reprogram=False):
        """
        Update the programmed iptables configuration with the new
        profile.

        :param dict[str]|NoneType profile: Dictionary of all profile data or
            None if profile is to be deleted.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert not self._dead, "Shouldn't receive updates after we're dead."
        self._pending_profile = profile
        self._dirty |= force_reprogram

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.
        """
        # Flag that we're dead and then let finish_msg_batch() do the cleanup.
        self._dead = True

    def _on_ipsets_acquired(self):
        """
        Callback from the RefHelper once it's acquired all the ipsets we
        need.

        This is called from an actor_message on our greenlet.
        """
        # Nothing to do here, if this is being called then we're already in
        # a message batch so _finish_msg_batch() will get called next.
        _log.info("All required ipsets acquired.")

    def _finish_msg_batch(self, batch, results):
        # Due to dependency management in IptablesUpdater, we don't need to
        # worry about programming the dataplane before notifying so do it on
        # this common code path.
        if not self._notified_ready:
            self._notify_ready()
            self._notified_ready = True

        if self._dead:
            # Only want to clean up once.  Note: we can get here a second time
            # if we had a pending ipset incref in-flight when we were asked
            # to clean up.
            if not self._cleaned_up:
                try:
                    _log.info("%s unreferenced, removing our chains", self)
                    self._delete_chains()
                    self._ipset_refs.discard_all()
                    self._ipset_refs = None  # Break ref cycle.
                    self._profile = None
                    self._pending_profile = None
                finally:
                    self._cleaned_up = True
                    self._notify_cleanup_complete()
        else:
            if self._pending_profile != self._profile:
                _log.debug("Profile data changed, updating ipset references.")
                # Make sure that all the new tags and selectors are active.
                # We can't discard unneeded ones until we've updated iptables.
                new_tags_and_sels = extract_tags_and_selectors_from_profile(
                    self._pending_profile
                )
                for tag_or_sel in new_tags_and_sels:
                    _log.debug("Requesting ipset for tag %s", tag_or_sel)
                    # Note: acquire_ref() is a no-op if already acquired.
                    self._ipset_refs.acquire_ref(tag_or_sel)

                self._dirty = True
                self._profile = self._pending_profile
                self._required_ipsets = new_tags_and_sels

            if (self._dirty and
                    self._ipset_refs.ready and
                    self._pending_profile is not None):
                _log.info("Ready to program rules for %s", self.id)
                try:
                    self._update_chains()
                except FailedSystemCall as e:
                    _log.error("Failed to program profile chain %s; error: %r",
                               self, e)
                else:
                    # Now we've updated iptables, we can tell the RefHelper
                    # to discard the tags we no longer need.
                    self._ipset_refs.replace_all(self._required_ipsets)
                    self._dirty = False
            elif not self._dirty:
                _log.debug("No changes to program.")
            elif self._pending_profile is None:
                _log.info("Profile is None, removing our chains")
                try:
                    self._delete_chains()
                except FailedSystemCall:
                    _log.exception("Failed to delete chains for profile %s",
                                   self.id)
                else:
                    self._ipset_refs.discard_all()
                    self._dirty = False
            else:
                assert not self._ipset_refs.ready
                _log.info("Can't program rules %s yet, waiting on ipsets",
                          self.id)

    def _delete_chains(self):
        """
        Removes our chains from the dataplane, blocks until complete.
        """
        # Need to block here: have to wait for chains to be deleted
        # before we can decref our ipsets.
        self._iptables_updater.delete_chains(
            self.iptables_generator.profile_chain_names(self.id),
            async=False)

    def _update_chains(self):
        """
        Updates the chains in the dataplane.

        Blocks until the update is complete.

        On entry, self._pending_profile must not be None.

        :raises FailedSystemCall: if the update fails.
        """
        _log.info("%s Programming iptables with our chains.", self)
        assert self._pending_profile is not None, \
            "_update_chains called with no _pending_profile"
        tag_to_ip_set_name = {}
        sel_to_ip_set_name = {}
        for tag_or_sel, ipset in self._ipset_refs.iteritems():
            if isinstance(tag_or_sel, SelectorExpression):
                sel_to_ip_set_name[tag_or_sel] = ipset.ipset_name
            else:
                tag_to_ip_set_name[tag_or_sel] = ipset.ipset_name

        _log.info("Updating chains for profile %s", self.id)
        _log.debug("Profile %s: %s", self.id, self._profile)

        updates, deps = self.iptables_generator.profile_updates(
            self.id,
            self._pending_profile,
            self.ip_version,
            tag_to_ipset=tag_to_ip_set_name,
            selector_to_ipset=sel_to_ip_set_name,
            on_allow="RETURN",
            comment_tag=self.id)

        _log.debug("Queueing programming for rules %s: %s", self.id,
                   updates)

        self._iptables_updater.rewrite_chains(updates, deps, async=False)
Exemplo n.º 9
0
class LocalEndpoint(RefCountedActor):
    def __init__(self, config, combined_id, ip_type, iptables_updater,
                 dispatch_chains, rules_manager):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
                                            (combined_id.endpoint, ip_type))
        assert isinstance(dispatch_chains, DispatchChains)
        assert isinstance(rules_manager, RulesManager)

        self.combined_id = combined_id

        self.config = config
        self.ip_type = ip_type
        self.ip_version = futils.IP_TYPE_TO_VERSION[ip_type]
        if self.ip_type == IPV4:
            self.nets_key = "ipv4_nets"
        else:
            self.nets_key = "ipv6_nets"
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.rules_ref_helper = RefHelper(self, rules_manager,
                                          self._on_profiles_ready)

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self.endpoint = None
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track whether the last attempt to program the dataplane succeeded.
        # We'll force a reprogram next time we get a kick.
        self._failed = False
        # And whether we've received an update since last time we programmed.
        self._dirty = False

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str] endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        mac_changed = False

        if not endpoint and not self.endpoint:
            # First time we have been called, but it's a delete! Maybe some
            # odd timing window, but we have nothing to tidy up.
            return

        if endpoint and endpoint['mac'] != self._mac:
            # Either we have not seen this MAC before, or it has changed.
            self._mac = endpoint['mac']
            mac_changed = True

        if endpoint and not self.endpoint:
            # This is the first time we have seen the endpoint, so extract the
            # interface name and endpoint ID.
            self._iface_name = endpoint["name"]
            self._suffix = interface_to_suffix(self.config, self._iface_name)

        was_ready = self._ready

        # Activate the required profile IDs (and deactivate any that we no
        # longer need).
        if endpoint:
            new_profile_ids = set(endpoint["profile_ids"])
        else:
            new_profile_ids = set()
        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self.rules_ref_helper.replace_all(new_profile_ids)

        if endpoint != self.endpoint or force_reprogram:
            self._dirty = True

        # Store off the endpoint we were passed.
        self.endpoint = endpoint

        if endpoint:
            # Configure the network interface; may fail if not there yet (in
            # which case we'll just do it when the interface comes up).
            self._configure_interface(mac_changed)
        else:
            # Remove the network programming.
            self._deconfigure_interface()

        self._maybe_update(was_ready)
        _log.debug("%s finished processing update", self)

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._ready, "Should be deleted before being unreffed."
        # Removing all profile refs should have been done already but be
        # defensive.
        self.rules_ref_helper.discard_all()
        self._notify_cleanup_complete()

    @actor_message()
    def on_interface_update(self):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick", self.combined_id)
        self._configure_interface()

    @property
    def _missing_deps(self):
        """
        Returns a list of missing dependencies.
        """
        missing_deps = []
        if not self.endpoint:
            missing_deps.append("endpoint")
        elif self.endpoint.get("state", "active") != "active":
            missing_deps.append("endpoint active")
        elif not self.endpoint.get("profile_ids"):
            missing_deps.append("profile")
        return missing_deps

    @property
    def _ready(self):
        """
        Returns whether this LocalEndpoint has any dependencies preventing it
        programming its rules.
        """
        return not self._missing_deps

    def _maybe_update(self, was_ready):
        """
        Update the relevant programming for this endpoint.

        :param bool was_ready: Whether this endpoint has already been
                               successfully configured.
        """
        is_ready = self._ready
        if not is_ready:
            _log.debug("%s not ready, waiting on %s", self, self._missing_deps)
        if self._failed or self._dirty or is_ready != was_ready:
            ifce_name = self._iface_name
            if is_ready:
                # We've got all the info and everything is active.
                if self._failed:
                    _log.warn("Retrying programming after a failure")
                self._failed = False  # Ready to try again...
                _log.info("%s became ready to program.", self)
                self._update_chains()
                self.dispatch_chains.on_endpoint_added(self._iface_name,
                                                       async=True)
            else:
                # We were active but now we're not, withdraw the dispatch rule
                # and our chain.  We must do this to allow iptables to remove
                # the profile chain.
                _log.info("%s became unready.", self)
                self._failed = False  # Don't care any more.

                self.dispatch_chains.on_endpoint_removed(ifce_name, async=True)
                self._remove_chains()
            self._dirty = False

    def _update_chains(self):
        updates, deps = _get_endpoint_rules(
            self.combined_id.endpoint, self._suffix, self.ip_version,
            self.endpoint.get("ipv%s_nets" % self.ip_version, []),
            self.endpoint["mac"], self.endpoint["profile_ids"])
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            self._failed = True
            self._remove_chains()

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(chain_names(self._suffix),
                                                async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
            self._failed = True

    def _configure_interface(self, mac_changed=True):
        """
        Applies sysctls and routes to the interface.

        :param: bool mac_changed: Has the MAC address changed since it was last
                     configured? If so, we reconfigure ARP for the interface in
                     IPv4 (ARP does not exist for IPv6, which uses neighbour
                     solicitation instead).
        """
        try:
            if self.ip_type == IPV4:
                devices.configure_interface_ipv4(self._iface_name)
                reset_arp = mac_changed
            else:
                ipv6_gw = self.endpoint.get("ipv6_gateway", None)
                devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
                reset_arp = False

            ips = set()
            for ip in self.endpoint.get(self.nets_key, []):
                ips.add(futils.net_to_ip(ip))
            devices.set_routes(self.ip_type,
                               ips,
                               self._iface_name,
                               self.endpoint["mac"],
                               reset_arp=reset_arp)

        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                _log.info("Interface %s for %s does not exist yet",
                          self._iface_name, self.combined_id)
            elif not devices.interface_up(self._iface_name):
                _log.info("Interface %s for %s is not up yet",
                          self._iface_name, self.combined_id)
            else:
                # Interface flapped back up after we failed?
                _log.warning("Failed to configure interface %s for %s",
                             self._iface_name, self.combined_id)

    def _deconfigure_interface(self):
        """
        Removes routes from the interface.
        """
        try:
            devices.set_routes(self.ip_type, set(), self._iface_name, None)
        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                # Deleted under our feet - so the rules are gone.
                _log.debug("Interface %s for %s deleted", self._iface_name,
                           self.combined_id)
            else:
                # An error deleting the rules. Log and continue.
                _log.exception("Cannot delete rules for interface %s for %s",
                               self._iface_name, self.combined_id)

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references",
                  self.combined_id)

    def __str__(self):
        return (
            "Endpoint<%s,id=%s,iface=%s>" %
            (self.ip_type, self.combined_id, self._iface_name or "unknown"))
Exemplo n.º 10
0
class LocalEndpoint(RefCountedActor):

    def __init__(self, config, combined_id, ip_type, iptables_updater,
                 dispatch_chains, rules_manager, fip_manager, status_reporter):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        :param fip_manager: FloatingIPManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
                                             (combined_id.endpoint, ip_type))
        assert isinstance(rules_manager, RulesManager)

        self.config = config
        self.iptables_generator = config.plugins["iptables_generator"]

        self.combined_id = combined_id
        self.ip_type = ip_type

        # Other actors we need to talk to.
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.status_reporter = status_reporter
        self.fip_manager = fip_manager

        # Helper for acquiring/releasing profiles.
        self._rules_ref_helper = RefHelper(self, rules_manager,
                                           self._on_profiles_ready)

        # List of global policies that we care about.
        self._pol_ids_by_tier = OrderedDict()

        # List of explicit profile IDs that we've processed.
        self._explicit_profile_ids = None

        # Per-batch state.
        self._pending_endpoint = None
        self._endpoint_update_pending = False
        self._mac_changed = False
        # IPs that no longer belong to this endpoint and need cleaning up.
        self._removed_ips = set()

        # Current endpoint data.
        self.endpoint = None

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track the success/failure of our dataplane programming.
        self._chains_programmed = False
        self._iptables_in_sync = False
        self._device_in_sync = False
        self._profile_ids_dirty = False

        # Oper-state of the Linux interface.
        self._device_is_up = None  # Unknown

        # Our last status report.  Used for de-dupe.
        self._last_status = None

        # One-way flags to indicate that we should clean up/have cleaned up.
        self._unreferenced = False
        self._added_to_dispatch_chains = False
        self._cleaned_up = False

    @property
    def nets_key(self):
        if self.ip_type == IPV4:
            return "ipv4_nets"
        else:
            return "ipv6_nets"

    @property
    def nat_key(self):
        return nat_key(self.ip_type)

    @property
    def _admin_up(self):
        return (not self._unreferenced and
                self.endpoint and
                self.endpoint.get("state", "active") == "active")

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str]|NoneType endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        assert not self._unreferenced, "Update after being unreferenced"

        # Store off the update, to be handled in _finish_msg_batch.
        self._pending_endpoint = endpoint
        self._endpoint_update_pending = True
        if force_reprogram:
            self._iptables_in_sync = False
            self._device_in_sync = False

    @actor_message()
    def on_tiered_policy_update(self, pols_by_tier):
        """Called to update the ordered set of tiered policies that apply.

        :param OrderedDict pols_by_tier: Ordered mapping from tier name to
               list of policies to apply in that tier.
        """
        _log.debug("New policy IDs for %s: %s", self.combined_id,
                   pols_by_tier)
        if pols_by_tier != self._pol_ids_by_tier:
            self._pol_ids_by_tier = pols_by_tier
            self._iptables_in_sync = False
            self._profile_ids_dirty = True

    @actor_message()
    def on_interface_update(self, iface_up):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick: %s",
                  self.combined_id, iface_up)
        assert not self._unreferenced, "Interface kick after unreference"

        # Use a flag so that we coalesce any duplicate updates in
        # _finish_msg_batch.
        self._device_in_sync = False
        self._device_is_up = iface_up

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._unreferenced, "Duplicate on_unreferenced() call"

        # We should be deleted before being unreferenced.
        assert self.endpoint is None or (self._pending_endpoint is None and
                                         self._endpoint_update_pending)

        # Defer the processing to _finish_msg_batch.
        self._unreferenced = True

    def _finish_msg_batch(self, batch, results):
        if self._cleaned_up:
            # This can occur if we get a callback from a profile via the
            # RefHelper after we've already been deleted.
            _log.warn("_finish_msg_batch() called after being unreferenced,"
                      "ignoring.  Batch: %s", batch)
            return

        if self._endpoint_update_pending:
            # Copy the pending update into our data structures.  May work out
            # that iptables or the device is now out of sync.
            _log.debug("Endpoint update pending: %s", self._pending_endpoint)
            self._apply_endpoint_update()

        if self._profile_ids_dirty:
            _log.debug("Profile references need updating")
            self._update_profile_references()

        if not self._iptables_in_sync:
            # Try to update iptables, if successful, will set the
            # _iptables_in_sync flag.
            _log.debug("iptables is out-of-sync, trying to update it")
            if self._admin_up:
                _log.info("%s is 'active', (re)programming chains.", self)
                self._update_chains()
            elif self._chains_programmed:
                # No longer active but our chains are still in place.  Remove
                # them.
                _log.info("%s is not 'active', removing chains.", self)
                self._remove_chains()

        if not self._device_in_sync and self._iface_name:
            # Try to update the device configuration.  If successful, will set
            # the _device_in_sync flag.
            if self._admin_up:
                # Endpoint is supposed to be live, try to configure it.
                _log.debug("Device is out-of-sync, trying to configure it")
                self._configure_interface()
            else:
                # We've been deleted, de-configure the interface.
                _log.debug("Device is out-of-sync, trying to de-configure it")
                self._deconfigure_interface()

        if self._removed_ips:
            # Some IPs have been removed, clean up conntrack.
            _log.debug("Some IPs were removed, cleaning up conntrack")
            self._clean_up_conntrack_entries()

        if self._unreferenced:
            # Endpoint is being removed, clean up...
            _log.debug("Cleaning up after endpoint unreferenced")
            self.dispatch_chains.on_endpoint_removed(self._iface_name,
                                                     async=True)
            self._rules_ref_helper.discard_all()
            self._notify_cleanup_complete()
            self._cleaned_up = True
        elif not self._added_to_dispatch_chains and self._iface_name:
            # This must be the first batch, add ourself to the dispatch chains.
            _log.debug("Adding endpoint to dispatch chain")
            self.dispatch_chains.on_endpoint_added(self._iface_name,
                                                   async=True)
            self._added_to_dispatch_chains = True

        # If changed, report our status back to the datastore.
        self._maybe_update_status()

    def _maybe_update_status(self):
        if not self.config.REPORT_ENDPOINT_STATUS:
            _log.debug("Status reporting disabled. Not reporting status.")
            return

        status, reason = self.oper_status()

        if self._unreferenced or status != self._last_status:
            _log.info("%s: updating status to %s", reason, status)
            if self._unreferenced:
                _log.debug("Unreferenced, reporting status = None")
                status_dict = None
            else:
                _log.debug("Endpoint oper state changed to %s", status)
                status_dict = {"status": status}
            self.status_reporter.on_endpoint_status_changed(
                self.combined_id,
                self.ip_type,
                status_dict,
                async=True,
            )
            self._last_status = status

    def oper_status(self):
        """Calculate the oper status of the endpoint.

        :returns a tuple containing the status and a human-readable reason."""
        if not self._device_is_up:
            # Check this first because we won't try to sync the device if it's
            # oper down.
            reason = "Interface is oper-down"
            status = ENDPOINT_STATUS_DOWN
        elif not self.endpoint:
            reason = "No endpoint data"
            status = ENDPOINT_STATUS_DOWN
        elif not self._iptables_in_sync:
            # Definitely an error, the iptables command failed.
            reason = "Failed to update iptables"
            status = ENDPOINT_STATUS_ERROR
        elif not self._device_in_sync:
            reason = "Failed to update device config"
            status = ENDPOINT_STATUS_ERROR
        elif not self._admin_up:
            # After the tests for being in sync because we handle admin down
            # by removing the configuration from the dataplane.
            reason = "Endpoint is admin down"
            status = ENDPOINT_STATUS_DOWN
        else:
            # All checks passed.  We're up!
            reason = "In sync and device is up"
            status = ENDPOINT_STATUS_UP
        return status, reason

    def _apply_endpoint_update(self):
        pending_endpoint = self._pending_endpoint
        if pending_endpoint == self.endpoint:
            _log.debug("Endpoint hasn't changed, nothing to do")
            return

        # Calculate the set of IPs that we had before this update.  Needed on
        # the update and delete code paths below.
        if self.endpoint:
            old_ips = set(futils.net_to_ip(n) for n in
                          self.endpoint.get(self.nets_key, []))
            old_nat_mappings = self.endpoint.get(self.nat_key, [])
        else:
            old_ips = set()
            old_nat_mappings = []
        all_old_ips = old_ips | set([n["ext_ip"] for n in old_nat_mappings])

        if pending_endpoint:
            # Update/create.
            if pending_endpoint.get('mac') != self._mac:
                # Either we have not seen this MAC before, or it has changed.
                _log.debug("Endpoint MAC changed to %s",
                           pending_endpoint.get("mac"))
                self._mac = pending_endpoint.get('mac')
                self._mac_changed = True
                # MAC change requires refresh of iptables rules and ARP table.
                self._iptables_in_sync = False
                self._device_in_sync = False

            if self.endpoint is None:
                # This is the first time we have seen the endpoint, so extract
                # the interface name and endpoint ID.
                self._iface_name = pending_endpoint["name"]
                self._suffix = interface_to_chain_suffix(self.config,
                                                         self._iface_name)
                _log.debug("Learned interface name/suffix: %s/%s",
                           self._iface_name, self._suffix)
                # First time through, need to program everything.
                self._iptables_in_sync = False
                self._device_in_sync = False
                if self._device_is_up is None:
                    _log.debug("Learned interface name, checking if device "
                               "is up.")
                    self._device_is_up = (
                        devices.interface_exists(self._iface_name) and
                        devices.interface_up(self._iface_name)
                    )

            # Check if the profile ID or IP addresses have changed, requiring
            # a refresh of the dataplane.
            profile_ids = set(pending_endpoint.get("profile_ids", []))
            if profile_ids != self._explicit_profile_ids:
                # Profile ID update requires iptables update but not device
                # update.
                _log.debug("Profile IDs changed from %s to %s, need to update "
                           "iptables", self._rules_ref_helper.required_refs,
                           profile_ids)
                self._explicit_profile_ids = profile_ids
                self._iptables_in_sync = False
                self._profile_ids_dirty = True

            # Check for changes to values that require a device update.
            if self.endpoint:
                if self.endpoint.get("state") != pending_endpoint.get("state"):
                    _log.debug("Desired interface state updated.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                new_ips = set(futils.net_to_ip(n) for n in
                              pending_endpoint.get(self.nets_key, []))
                if old_ips != new_ips:
                    # IP addresses have changed, need to update the routing
                    # table.
                    _log.debug("IP addresses changed, need to update routing")
                    self._device_in_sync = False
                new_nat_mappings = pending_endpoint.get(self.nat_key, [])
                if old_nat_mappings != new_nat_mappings:
                    _log.debug("NAT mappings have changed, refreshing.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                all_new_ips = new_ips | set([n["ext_ip"] for n in
                                             new_nat_mappings])
                if all_old_ips != all_new_ips:
                    # Ensure we clean up any conntrack entries for IPs that
                    # have been removed.
                    _log.debug("Set of all IPs changed from %s to %s",
                               all_old_ips, all_new_ips)
                    self._removed_ips |= all_old_ips
                    self._removed_ips -= all_new_ips
        else:
            # Delete of the endpoint.  Need to resync everything.
            self._profile_ids_dirty = True
            self._iptables_in_sync = False
            self._device_in_sync = False
            self._removed_ips |= all_old_ips

        self.endpoint = pending_endpoint
        self._endpoint_update_pending = False
        self._pending_endpoint = None

    def _update_profile_references(self):
        if self.endpoint:
            # Combine the explicit profile IDs with the set of policy IDs
            # for our matching selectors.
            profile_ids = set(self._explicit_profile_ids)
            for pol_ids in self._pol_ids_by_tier.itervalues():
                profile_ids.update(pol_ids)
        else:
            profile_ids = set()
        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self._rules_ref_helper.replace_all(profile_ids)
        self._profile_ids_dirty = False

    def _update_chains(self):
        updates, deps = self._endpoint_updates()
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
            self.fip_manager.update_endpoint(
                self.combined_id,
                self.endpoint.get(self.nat_key, None),
                async=True
            )
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            try:
                self.iptables_updater.delete_chains(
                    self.iptables_generator.endpoint_chain_names(self._suffix),
                    async=False)
                self.fip_manager.update_endpoint(self.combined_id, None,
                                                 async=True)
            except FailedSystemCall:
                _log.exception("Failed to remove chains after original "
                               "failure")
        else:
            self._iptables_in_sync = True
            self._chains_programmed = True

    def _endpoint_updates(self):
        raise NotImplementedError()  # pragma: no cover

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(
                self.iptables_generator.endpoint_chain_names(self._suffix),
                async=False)
            self.fip_manager.update_endpoint(self.combined_id, None,
                                             async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
        else:
            self._iptables_in_sync = True
            self._chains_programmed = False

    def _configure_interface(self):
        """
        Called to apply IP/sysctl config to the interface.

        This base implementation does nothing apart from setting the
        _device_in_sync flag.
        """
        _log.info("Interface %s configured", self._iface_name)
        self._device_in_sync = True

    def _deconfigure_interface(self):
        """
        Called to remove IP/sysctl config from the interface.

        This base implementation does nothing apart from setting the
        _device_in_sync flag.
        """
        _log.info("Interface %s deconfigured", self._iface_name)
        self._device_in_sync = True

    def _clean_up_conntrack_entries(self):
        """Removes conntrack entries for all the IPs in self._removed_ips."""
        _log.debug("Cleaning up conntrack for old IPs: %s", self._removed_ips)
        devices.remove_conntrack_flows(
            self._removed_ips,
            IP_TYPE_TO_VERSION[self.ip_type]
        )
        # We could use self._removed_ips.clear() but it's hard to UT because
        # the UT sees the update.
        self._removed_ips = set()

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references",
                  self.combined_id)

    def __str__(self):
        return ("LocalEndpoint<%s,id=%s,iface=%s>" %
                (self.ip_type, self.combined_id,
                 self._iface_name or "unknown"))
Exemplo n.º 11
0
class LocalEndpoint(RefCountedActor):
    def __init__(self, config, combined_id, ip_type, iptables_updater,
                 dispatch_chains, rules_manager, fip_manager, status_reporter):
        """
        Controls a single local endpoint.

        :param combined_id: EndpointId for this endpoint.
        :param ip_type: IP type for this endpoint (IPv4 or IPv6)
        :param iptables_updater: IptablesUpdater to use
        :param dispatch_chains: DispatchChains to use
        :param rules_manager: RulesManager to use
        :param fip_manager: FloatingIPManager to use
        """
        super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
                                            (combined_id.endpoint, ip_type))
        assert isinstance(dispatch_chains, DispatchChains)
        assert isinstance(rules_manager, RulesManager)

        self.config = config
        self.iptables_generator = config.plugins["iptables_generator"]

        self.combined_id = combined_id
        self.ip_type = ip_type

        # Other actors we need to talk to.
        self.iptables_updater = iptables_updater
        self.dispatch_chains = dispatch_chains
        self.rules_mgr = rules_manager
        self.status_reporter = status_reporter
        self.fip_manager = fip_manager

        # Helper for acquiring/releasing profiles.
        self.rules_ref_helper = RefHelper(self, rules_manager,
                                          self._on_profiles_ready)

        # Per-batch state.
        self._pending_endpoint = None
        self._endpoint_update_pending = False
        self._mac_changed = False

        # Current endpoint data.
        self.endpoint = None

        # Will be filled in as we learn about the OS interface and the
        # endpoint config.
        self._mac = None
        self._iface_name = None
        self._suffix = None

        # Track the success/failure of our dataplane programming.
        self._chains_programmed = False
        self._iptables_in_sync = False
        self._device_in_sync = False

        # Oper-state of the Linux interface.
        self._device_is_up = None  # Unknown

        # Our last status report.  Used for de-dupe.
        self._last_status = None

        # One-way flags to indicate that we should clean up/have cleaned up.
        self._unreferenced = False
        self._added_to_dispatch_chains = False
        self._cleaned_up = False

    @property
    def nets_key(self):
        if self.ip_type == IPV4:
            return "ipv4_nets"
        else:
            return "ipv6_nets"

    @property
    def _admin_up(self):
        return (not self._unreferenced and self.endpoint
                and self.endpoint.get("state") == "active")

    @actor_message()
    def on_endpoint_update(self, endpoint, force_reprogram=False):
        """
        Called when this endpoint has received an update.
        :param dict[str]|NoneType endpoint: endpoint parameter dictionary.
        """
        _log.info("%s updated: %s", self, endpoint)
        assert not self._unreferenced, "Update after being unreferenced"

        # Store off the update, to be handled in _finish_msg_batch.
        self._pending_endpoint = endpoint
        self._endpoint_update_pending = True
        if force_reprogram:
            self._iptables_in_sync = False
            self._device_in_sync = False

    @actor_message()
    def on_interface_update(self, iface_up):
        """
        Actor event to report that the interface is either up or changed.
        """
        _log.info("Endpoint %s received interface kick: %s", self.combined_id,
                  iface_up)
        assert not self._unreferenced, "Interface kick after unreference"

        # Use a flag so that we coalesce any duplicate updates in
        # _finish_msg_batch.
        self._device_in_sync = False
        self._device_is_up = iface_up

    @actor_message()
    def on_unreferenced(self):
        """
        Overrides RefCountedActor:on_unreferenced.
        """
        _log.info("%s now unreferenced, cleaning up", self)
        assert not self._unreferenced, "Duplicate on_unreferenced() call"

        # We should be deleted before being unreferenced.
        assert self.endpoint is None or (self._pending_endpoint is None
                                         and self._endpoint_update_pending)

        # Defer the processing to _finish_msg_batch.
        self._unreferenced = True

    def _finish_msg_batch(self, batch, results):
        if self._cleaned_up:
            # This can occur if we get a callback from a profile via the
            # RefHelper after we've already been deleted.
            _log.warn(
                "_finish_msg_batch() called after being unreferenced,"
                "ignoring.  Batch: %s", batch)
            return

        if self._endpoint_update_pending:
            # Copy the pending update into our data structures.  May work out
            # that iptables or the device is now out of sync.
            _log.debug("Endpoint update pending: %s", self._pending_endpoint)
            self._apply_endpoint_update()

        if not self._iptables_in_sync:
            # Try to update iptables, if successful, will set the
            # _iptables_in_sync flag.
            _log.debug("iptables is out-of-sync, trying to update it")
            if self._admin_up:
                _log.info("%s is 'active', (re)programming chains.", self)
                self._update_chains()
            elif self._chains_programmed:
                # No longer active but our chains are still in place.  Remove
                # them.
                _log.info("%s is not 'active', removing chains.", self)
                self._remove_chains()

        if not self._device_in_sync and self._iface_name:
            # Try to update the device configuration.  If successful, will set
            # the _device_in_sync flag.
            if self._admin_up:
                # Endpoint is supposed to be live, try to configure it.
                _log.debug("Device is out-of-sync, trying to configure it")
                self._configure_interface()
            else:
                # We've been deleted, de-configure the interface.
                _log.debug("Device is out-of-sync, trying to de-configure it")
                self._deconfigure_interface()

        if self._unreferenced:
            # Endpoint is being removed, clean up...
            _log.debug("Cleaning up after endpoint unreferenced")
            self.dispatch_chains.on_endpoint_removed(self._iface_name,
                                                     async=True)
            self.rules_ref_helper.discard_all()
            self._notify_cleanup_complete()
            self._cleaned_up = True
        elif not self._added_to_dispatch_chains and self._iface_name:
            # This must be the first batch, add ourself to the dispatch chains.
            _log.debug("Adding endpoint to dispatch chain")
            self.dispatch_chains.on_endpoint_added(self._iface_name,
                                                   async=True)
            self._added_to_dispatch_chains = True

        # If changed, report our status back to the datastore.
        self._maybe_update_status()

    def _maybe_update_status(self):
        if not self.config.REPORT_ENDPOINT_STATUS:
            _log.debug("Status reporting disabled. Not reporting status.")
            return

        if not self._device_is_up:
            # Check this first because we won't try to sync the device if it's
            # oper down.
            reason = "Interface is oper-down"
            status = ENDPOINT_STATUS_DOWN
        elif not self.endpoint:
            reason = "No endpoint data"
            status = ENDPOINT_STATUS_DOWN
        elif not self._iptables_in_sync:
            # Definitely an error, the iptables command failed.
            reason = "Failed to update iptables"
            status = ENDPOINT_STATUS_ERROR
        elif not self._device_in_sync:
            reason = "Failed to update device config"
            status = ENDPOINT_STATUS_ERROR
        elif not self._admin_up:
            # After the tests for being in sync because we handle admin down
            # by removing the configuration from the dataplane.
            reason = "Endpoint is admin down"
            status = ENDPOINT_STATUS_DOWN
        else:
            # All checks passed.  We're up!
            reason = "In sync and device is up"
            status = ENDPOINT_STATUS_UP

        if self._unreferenced or status != self._last_status:
            _log.info("%s: updating status to %s", reason, status)
            if self._unreferenced:
                _log.debug("Unreferenced, reporting status = None")
                status_dict = None
            else:
                _log.debug("Endpoint oper state changed to %s", status)
                status_dict = {"status": status}
            self.status_reporter.on_endpoint_status_changed(
                self.combined_id,
                self.ip_type,
                status_dict,
                async=True,
            )
            self._last_status = status

    def _apply_endpoint_update(self):
        pending_endpoint = self._pending_endpoint
        if pending_endpoint == self.endpoint:
            _log.debug("Endpoint hasn't changed, nothing to do")
            return

        if pending_endpoint:
            # Update/create.
            if pending_endpoint['mac'] != self._mac:
                # Either we have not seen this MAC before, or it has changed.
                _log.debug("Endpoint MAC changed to %s",
                           pending_endpoint["mac"])
                self._mac = pending_endpoint['mac']
                self._mac_changed = True
                # MAC change requires refresh of iptables rules and ARP table.
                self._iptables_in_sync = False
                self._device_in_sync = False

            if self.endpoint is None:
                # This is the first time we have seen the endpoint, so extract
                # the interface name and endpoint ID.
                self._iface_name = pending_endpoint["name"]
                self._suffix = interface_to_suffix(self.config,
                                                   self._iface_name)
                _log.debug("Learned interface name/suffix: %s/%s",
                           self._iface_name, self._suffix)
                # First time through, need to program everything.
                self._iptables_in_sync = False
                self._device_in_sync = False
                if self._device_is_up is None:
                    _log.debug("Learned interface name, checking if device "
                               "is up.")
                    self._device_is_up = (
                        devices.interface_exists(self._iface_name)
                        and devices.interface_up(self._iface_name))

            # Check if the profile ID or IP addresses have changed, requiring
            # a refresh of the dataplane.
            profile_ids = set(pending_endpoint.get("profile_ids", []))
            if profile_ids != self.rules_ref_helper.required_refs:
                # Profile ID update required iptables update but not device
                # update.
                _log.debug("Profile IDs changed, need to update iptables")
                self._iptables_in_sync = False

            # Check for changes to values that require a device update.
            if self.endpoint:
                if self.endpoint.get("state") != pending_endpoint.get("state"):
                    _log.debug("Desired interface state updated.")
                    self._device_in_sync = False
                    self._iptables_in_sync = False
                if (self.endpoint[self.nets_key] !=
                        pending_endpoint[self.nets_key]):
                    # IP addresses have changed, need to update the routing
                    # table.
                    _log.debug("IP addresses changed, need to update routing")
                    self._device_in_sync = False
                for key in "ipv4_nat", "ipv6_nat":
                    if (self.endpoint.get(key, None) != pending_endpoint.get(
                            key, None)):
                        _log.debug("NAT mappings have changed, refreshing.")
                        self._device_in_sync = False
                        self._iptables_in_sync = False
        else:
            # Delete of the endpoint.  Need to resync everything.
            profile_ids = set()
            self._iptables_in_sync = False
            self._device_in_sync = False

        # Note: we don't actually need to wait for the activation to finish
        # due to the dependency management in the iptables layer.
        self.rules_ref_helper.replace_all(profile_ids)

        self.endpoint = pending_endpoint
        self._endpoint_update_pending = False
        self._pending_endpoint = None

    def _update_chains(self):
        updates, deps = self.iptables_generator.endpoint_updates(
            IP_TYPE_TO_VERSION[self.ip_type], self.combined_id.endpoint,
            self._suffix, self._mac, self.endpoint["profile_ids"])
        try:
            self.iptables_updater.rewrite_chains(updates, deps, async=False)
            self.fip_manager.update_endpoint(self.combined_id,
                                             self.endpoint.get(
                                                 nat_key(self.ip_type), None),
                                             async=True)
        except FailedSystemCall:
            _log.exception("Failed to program chains for %s. Removing.", self)
            try:
                self.iptables_updater.delete_chains(
                    self.iptables_generator.endpoint_chain_names(self._suffix),
                    async=False)
                self.fip_manager.update_endpoint(self.combined_id,
                                                 None,
                                                 async=True)
            except FailedSystemCall:
                _log.exception("Failed to remove chains after original "
                               "failure")
        else:
            self._iptables_in_sync = True
            self._chains_programmed = True

    def _remove_chains(self):
        try:
            self.iptables_updater.delete_chains(
                self.iptables_generator.endpoint_chain_names(self._suffix),
                async=False)
            self.fip_manager.update_endpoint(self.combined_id,
                                             None,
                                             async=True)
        except FailedSystemCall:
            _log.exception("Failed to delete chains for %s", self)
        else:
            self._iptables_in_sync = True
            self._chains_programmed = False

    def _configure_interface(self):
        """
        Applies sysctls and routes to the interface.
        """
        if not self._device_is_up:
            _log.debug("Device is known to be down, skipping attempt to "
                       "configure it.")
            return
        try:
            if self.ip_type == IPV4:
                devices.configure_interface_ipv4(self._iface_name)
                reset_arp = self._mac_changed
            else:
                ipv6_gw = self.endpoint.get("ipv6_gateway", None)
                devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
                reset_arp = False

            ips = set()
            for ip in self.endpoint.get(self.nets_key, []):
                ips.add(futils.net_to_ip(ip))
            for nat_map in self.endpoint.get(nat_key(self.ip_type), []):
                ips.add(nat_map['ext_ip'])
            devices.set_routes(self.ip_type,
                               ips,
                               self._iface_name,
                               self.endpoint["mac"],
                               reset_arp=reset_arp)

        except (IOError, FailedSystemCall) as e:
            if not devices.interface_exists(self._iface_name):
                _log.info("Interface %s for %s does not exist yet",
                          self._iface_name, self.combined_id)
            elif not devices.interface_up(self._iface_name):
                _log.info("Interface %s for %s is not up yet",
                          self._iface_name, self.combined_id)
            else:
                # Either the interface flapped back up after the failure (in
                # which case we'll retry when the event reaches us) or there
                # was a genuine failure due to bad data or some other factor.
                #
                # Since the former is fairly common, we log at warning level
                # rather than error, which avoids false positives.
                _log.warning(
                    "Failed to configure interface %s for %s: %r.  "
                    "Either the interface is flapping or it is "
                    "misconfigured.", self._iface_name, self.combined_id, e)
        else:
            _log.info("Interface %s configured", self._iface_name)
            self._device_in_sync = True

    def _deconfigure_interface(self):
        """
        Removes routes from the interface.
        """
        try:
            devices.set_routes(self.ip_type, set(), self._iface_name, None)
        except (IOError, FailedSystemCall):
            if not devices.interface_exists(self._iface_name):
                # Deleted under our feet - so the rules are gone.
                _log.info("Interface %s for %s already deleted",
                          self._iface_name, self.combined_id)
            else:
                # An error deleting the routes. Log and continue.
                _log.exception("Cannot delete routes for interface %s for %s",
                               self._iface_name, self.combined_id)
        else:
            _log.info("Interface %s deconfigured", self._iface_name)
            self._device_in_sync = True

    def _on_profiles_ready(self):
        # We don't actually need to talk to the profiles, just log.
        _log.info("Endpoint %s acquired all required profile references",
                  self.combined_id)

    def __str__(self):
        return (
            "LocalEndpoint<%s,id=%s,iface=%s>" %
            (self.ip_type, self.combined_id, self._iface_name or "unknown"))
Exemplo n.º 12
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, profile_id, ip_version, iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.id = profile_id
        self.ip_version = ip_version
        self.ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self.notified_ready = False

        self.ipset_refs = RefHelper(self, ipset_mgr, self._maybe_update)

        self._profile = None
        """
        :type dict|None: filled in by first update.  Reset to None on delete.
        """
        self.dead = False

        self.chain_names = {
            "inbound": profile_to_chain_name("inbound", profile_id),
            "outbound": profile_to_chain_name("outbound", profile_id),
        }
        _log.info("Profile %s has chain names %s",
                  profile_id, self.chain_names)

    @actor_message()
    def on_profile_update(self, profile):
        """
        Update the programmed iptables configuration with the new
        profile.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert profile is None or profile["id"] == self.id
        assert not self.dead, "Shouldn't receive updates after we're dead."

        old_tags = extract_tags_from_profile(self._profile)
        new_tags = extract_tags_from_profile(profile)

        removed_tags = old_tags - new_tags
        added_tags = new_tags - old_tags
        for tag in removed_tags:
            _log.debug("Queueing ipset for tag %s for decref", tag)
            self.ipset_refs.discard_ref(tag)
        for tag in added_tags:
            _log.debug("Requesting ipset for tag %s", tag)
            self.ipset_refs.acquire_ref(tag)

        self._profile = profile
        self._maybe_update()

    def _maybe_update(self):
        if self.dead:
            _log.info("Not updating: profile is dead.")
        elif not self.ipset_refs.ready:
            _log.info("Can't program rules %s yet, waiting on ipsets",
                      self.id)
        else:
            _log.info("Ready to program rules for %s", self.id)
            self._update_chains()

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.  Removes
        our iptables configuration.
        """
        try:
            _log.info("%s unreferenced, removing our chains", self)
            self.dead = True
            chains = []
            for direction in ["inbound", "outbound"]:
                chain_name = self.chain_names[direction]
                chains.append(chain_name)
            self._iptables_updater.delete_chains(chains, async=False)
            self.ipset_refs.discard_all()
            self.ipset_refs = None # Break ref cycle.
            self._profile = None
        finally:
            self._notify_cleanup_complete()

    def _update_chains(self):
        """
        Updates the chains in the dataplane.
        """
        _log.info("%s Programming iptables with our chains.", self)
        updates = {}
        for direction in ("inbound", "outbound"):
            _log.debug("Updating %s chain for profile %s", direction,
                       self.id)
            new_profile = self._profile or {}
            _log.debug("Profile %s: %s", self.id, self._profile)
            rules_key = "%s_rules" % direction
            new_rules = new_profile.get(rules_key, [])
            chain_name = self.chain_names[direction]
            tag_to_ip_set_name = {}
            for tag, ipset in self.ipset_refs.iteritems():
                tag_to_ip_set_name[tag] = ipset.name
            updates[chain_name] = rules_to_chain_rewrite_lines(
                chain_name,
                new_rules,
                self.ip_version,
                tag_to_ip_set_name,
                on_allow="RETURN")
        _log.debug("Queueing programming for rules %s: %s", self.id,
                   updates)
        self._iptables_updater.rewrite_chains(updates, {}, async=False)
        # TODO Isolate exceptions from programming the chains to this profile.
        # Radical thought - could we just say that the profile should be OK,
        # and therefore we don't care? In other words, do we need to handle the
        # error cleverly, or could we just say that since we built the rules
        # they really should always work.
        if not self.notified_ready:
            self._notify_ready()
            self.notified_ready = True
Exemplo n.º 13
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, iptables_generator, profile_id, ip_version,
                 iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.iptables_generator = iptables_generator
        self.id = profile_id
        self.ip_version = ip_version
        self._ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self._ipset_refs = RefHelper(self, ipset_mgr, self._on_ipsets_acquired)

        # Latest profile update - a profile dictionary.
        self._pending_profile = None
        # Currently-programmed profile dictionary.
        self._profile = None
        # The IDs of the tags and selector ipsets it requires.
        self._required_ipsets = set()

        # State flags.
        self._notified_ready = False
        self._cleaned_up = False
        self._dead = False
        self._dirty = True

    @actor_message()
    def on_profile_update(self, profile, force_reprogram=False):
        """
        Update the programmed iptables configuration with the new
        profile.

        :param dict[str]|NoneType profile: Dictionary of all profile data or
            None if profile is to be deleted.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert not self._dead, "Shouldn't receive updates after we're dead."
        self._pending_profile = profile
        self._dirty |= force_reprogram

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.
        """
        # Flag that we're dead and then let finish_msg_batch() do the cleanup.
        self._dead = True

    def _on_ipsets_acquired(self):
        """
        Callback from the RefHelper once it's acquired all the ipsets we
        need.

        This is called from an actor_message on our greenlet.
        """
        # Nothing to do here, if this is being called then we're already in
        # a message batch so _finish_msg_batch() will get called next.
        _log.info("All required ipsets acquired.")

    def _finish_msg_batch(self, batch, results):
        # Due to dependency management in IptablesUpdater, we don't need to
        # worry about programming the dataplane before notifying so do it on
        # this common code path.
        if not self._notified_ready:
            self._notify_ready()
            self._notified_ready = True

        if self._dead:
            # Only want to clean up once.  Note: we can get here a second time
            # if we had a pending ipset incref in-flight when we were asked
            # to clean up.
            if not self._cleaned_up:
                try:
                    _log.info("%s unreferenced, removing our chains", self)
                    self._delete_chains()
                    self._ipset_refs.discard_all()
                    self._ipset_refs = None  # Break ref cycle.
                    self._profile = None
                    self._pending_profile = None
                finally:
                    self._cleaned_up = True
                    self._notify_cleanup_complete()
        else:
            if self._pending_profile != self._profile:
                _log.debug("Profile data changed, updating ipset references.")
                # Make sure that all the new tags and selectors are active.
                # We can't discard unneeded ones until we've updated iptables.
                new_tags_and_sels = extract_tags_and_selectors_from_profile(
                    self._pending_profile)
                for tag_or_sel in new_tags_and_sels:
                    _log.debug("Requesting ipset for tag %s", tag_or_sel)
                    # Note: acquire_ref() is a no-op if already acquired.
                    self._ipset_refs.acquire_ref(tag_or_sel)

                self._dirty = True
                self._profile = self._pending_profile
                self._required_ipsets = new_tags_and_sels

            if (self._dirty and self._ipset_refs.ready
                    and self._pending_profile is not None):
                _log.info("Ready to program rules for %s", self.id)
                try:
                    self._update_chains()
                except FailedSystemCall as e:
                    _log.error("Failed to program profile chain %s; error: %r",
                               self, e)
                else:
                    # Now we've updated iptables, we can tell the RefHelper
                    # to discard the tags we no longer need.
                    self._ipset_refs.replace_all(self._required_ipsets)
                    self._dirty = False
            elif not self._dirty:
                _log.debug("No changes to program.")
            elif self._pending_profile is None:
                _log.info("Profile is None, removing our chains")
                try:
                    self._delete_chains()
                except FailedSystemCall:
                    _log.exception("Failed to delete chains for profile %s",
                                   self.id)
                else:
                    self._ipset_refs.discard_all()
                    self._dirty = False
            else:
                assert not self._ipset_refs.ready
                _log.info("Can't program rules %s yet, waiting on ipsets",
                          self.id)

    def _delete_chains(self):
        """
        Removes our chains from the dataplane, blocks until complete.
        """
        # Need to block here: have to wait for chains to be deleted
        # before we can decref our ipsets.
        self._iptables_updater.delete_chains(
            self.iptables_generator.profile_chain_names(self.id), async=False)

    def _update_chains(self):
        """
        Updates the chains in the dataplane.

        Blocks until the update is complete.

        On entry, self._pending_profile must not be None.

        :raises FailedSystemCall: if the update fails.
        """
        _log.info("%s Programming iptables with our chains.", self)
        assert self._pending_profile is not None, \
            "_update_chains called with no _pending_profile"
        tag_to_ip_set_name = {}
        sel_to_ip_set_name = {}
        for tag_or_sel, ipset in self._ipset_refs.iteritems():
            if isinstance(tag_or_sel, SelectorExpression):
                sel_to_ip_set_name[tag_or_sel] = ipset.ipset_name
            else:
                tag_to_ip_set_name[tag_or_sel] = ipset.ipset_name

        _log.info("Updating chains for profile %s", self.id)
        _log.debug("Profile %s: %s", self.id, self._profile)

        updates, deps = self.iptables_generator.profile_updates(
            self.id,
            self._pending_profile,
            self.ip_version,
            tag_to_ipset=tag_to_ip_set_name,
            selector_to_ipset=sel_to_ip_set_name,
            comment_tag=self.id)

        _log.debug("Queueing programming for rules %s: %s", self.id, updates)

        self._iptables_updater.rewrite_chains(updates, deps, async=False)
Exemplo n.º 14
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, profile_id, ip_version, iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.id = profile_id
        self.ip_version = ip_version
        self._ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self._ipset_refs = RefHelper(self, ipset_mgr, self._on_ipsets_acquired)

        # Latest profile update.
        self._pending_profile = None
        # Currently-programmed profile.
        self._profile = None

        # State flags.
        self._notified_ready = False
        self._cleaned_up = False
        self._dead = False
        self._dirty = True

        self.chain_names = {
            "inbound": profile_to_chain_name("inbound", profile_id),
            "outbound": profile_to_chain_name("outbound", profile_id),
        }
        _log.info("Profile %s has chain names %s",
                  profile_id, self.chain_names)

    @actor_message()
    def on_profile_update(self, profile):
        """
        Update the programmed iptables configuration with the new
        profile.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert not self._dead, "Shouldn't receive updates after we're dead."
        self._pending_profile = profile

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.
        """
        # Flag that we're dead and then let finish_msg_batch() do the cleanup.
        self._dead = True

    def _on_ipsets_acquired(self):
        """
        Callback from the RefHelper once it's acquired all the ipsets we
        need.

        This is called from an actor_message on our greenlet.
        """
        # Nothing to do here, if this is being called then we're already in
        # a message batch so _finish_msg_batch() will get called next.
        _log.info("All required ipsets acquired.")

    def _finish_msg_batch(self, batch, results):
        # Due to dependency management in IptablesUpdater, we don't need to
        # worry about programming the dataplane before notifying so do it on
        # this common code path.
        if not self._notified_ready:
            self._notify_ready()
            self._notified_ready = True

        if self._dead:
            # Only want to clean up once.  Note: we can get here a second time
            # if we had a pending ipset incref in-flight when we were asked
            # to clean up.
            if not self._cleaned_up:
                try:
                    _log.info("%s unreferenced, removing our chains", self)
                    chains = set(self.chain_names.values())
                    # Need to block here: have to wait for chains to be deleted
                    # before we can decref our ipsets.
                    self._iptables_updater.delete_chains(chains, async=False)
                    self._ipset_refs.discard_all()
                    self._ipset_refs = None # Break ref cycle.
                    self._profile = None
                    self._pending_profile = None
                finally:
                    self._cleaned_up = True
                    self._notify_cleanup_complete()
        else:
            if self._pending_profile != self._profile:
                _log.debug("Profile data changed, updating ipset references.")
                old_tags = extract_tags_from_profile(self._profile)
                new_tags = extract_tags_from_profile(self._pending_profile)
                removed_tags = old_tags - new_tags
                added_tags = new_tags - old_tags
                for tag in removed_tags:
                    _log.debug("Queueing ipset for tag %s for decref", tag)
                    self._ipset_refs.discard_ref(tag)
                for tag in added_tags:
                    _log.debug("Requesting ipset for tag %s", tag)
                    self._ipset_refs.acquire_ref(tag)
                self._dirty = True
                self._profile = self._pending_profile

            if self._dirty and self._ipset_refs.ready:
                _log.info("Ready to program rules for %s", self.id)
                try:
                    self._update_chains()
                except CalledProcessError as e:
                    _log.error("Failed to program profile chain %s; error: %r",
                               self, e)
                else:
                    self._dirty = False
            elif not self._dirty:
                _log.debug("No changes to program.")
            elif not self._ipset_refs.ready:
                _log.info("Can't program rules %s yet, waiting on ipsets",
                          self.id)

    def _update_chains(self):
        """
        Updates the chains in the dataplane.
        """
        _log.info("%s Programming iptables with our chains.", self)
        updates = {}
        for direction in ("inbound", "outbound"):
            chain_name = self.chain_names[direction]
            _log.info("Updating %s chain %r for profile %s",
                      direction, chain_name, self.id)
            _log.debug("Profile %s: %s", self.id, self._profile)
            new_profile = self._pending_profile or {}
            rules_key = "%s_rules" % direction
            new_rules = new_profile.get(rules_key, [])
            tag_to_ip_set_name = {}
            for tag, ipset in self._ipset_refs.iteritems():
                tag_to_ip_set_name[tag] = ipset.name
            updates[chain_name] = rules_to_chain_rewrite_lines(
                chain_name,
                new_rules,
                self.ip_version,
                tag_to_ip_set_name,
                on_allow="RETURN",
                comment_tag=self.id)
        _log.debug("Queueing programming for rules %s: %s", self.id,
                   updates)
        self._iptables_updater.rewrite_chains(updates, {}, async=False)
Exemplo n.º 15
0
class ProfileRules(RefCountedActor):
    """
    Actor that owns the per-profile rules chains.
    """
    def __init__(self, profile_id, ip_version, iptables_updater, ipset_mgr):
        super(ProfileRules, self).__init__(qualifier=profile_id)
        assert profile_id is not None

        self.id = profile_id
        self.ip_version = ip_version
        self.ipset_mgr = ipset_mgr
        self._iptables_updater = iptables_updater
        self.notified_ready = False

        self.ipset_refs = RefHelper(self, ipset_mgr, self._maybe_update)

        self._profile = None
        """
        :type dict|None: filled in by first update.  Reset to None on delete.
        """
        self.dead = False

        self.chain_names = {
            "inbound": profile_to_chain_name("inbound", profile_id),
            "outbound": profile_to_chain_name("outbound", profile_id),
        }
        _log.info("Profile %s has chain names %s",
                  profile_id, self.chain_names)

    @actor_message()
    def on_profile_update(self, profile):
        """
        Update the programmed iptables configuration with the new
        profile.
        """
        _log.debug("%s: Profile update: %s", self, profile)
        assert profile is None or profile["id"] == self.id
        assert not self.dead, "Shouldn't receive updates after we're dead."

        old_tags = extract_tags_from_profile(self._profile)
        new_tags = extract_tags_from_profile(profile)

        removed_tags = old_tags - new_tags
        added_tags = new_tags - old_tags
        for tag in removed_tags:
            _log.debug("Queueing ipset for tag %s for decref", tag)
            self.ipset_refs.discard_ref(tag)
        for tag in added_tags:
            _log.debug("Requesting ipset for tag %s", tag)
            self.ipset_refs.acquire_ref(tag)

        self._profile = profile
        self._maybe_update()

    def _maybe_update(self):
        if self.dead:
            _log.info("Not updating: profile is dead.")
        elif not self.ipset_refs.ready:
            _log.info("Can't program rules %s yet, waiting on ipsets",
                      self.id)
        else:
            _log.info("Ready to program rules for %s", self.id)
            self._update_chains()

    @actor_message()
    def on_unreferenced(self):
        """
        Called to tell us that this profile is no longer needed.  Removes
        our iptables configuration.
        """
        try:
            _log.info("%s unreferenced, removing our chains", self)
            self.dead = True
            chains = []
            for direction in ["inbound", "outbound"]:
                chain_name = self.chain_names[direction]
                chains.append(chain_name)
            self._iptables_updater.delete_chains(chains, async=False)
            self.ipset_refs.discard_all()
            self.ipset_refs = None # Break ref cycle.
            self._profile = None
        finally:
            self._notify_cleanup_complete()

    def _update_chains(self):
        """
        Updates the chains in the dataplane.
        """
        _log.info("%s Programming iptables with our chains: %s")
        updates = {}
        for direction in ("inbound", "outbound"):
            _log.debug("Updating %s chain for profile %s", direction,
                       self.id)
            new_profile = self._profile or {}
            _log.debug("Profile %s: %s", self.id, self._profile)
            rules_key = "%s_rules" % direction
            new_rules = new_profile.get(rules_key, [])
            chain_name = self.chain_names[direction]
            tag_to_ip_set_name = {}
            for tag, ipset in self.ipset_refs.iteritems():
                tag_to_ip_set_name[tag] = ipset.name
            updates[chain_name] = rules_to_chain_rewrite_lines(
                chain_name,
                new_rules,
                self.ip_version,
                tag_to_ip_set_name,
                on_allow="RETURN")
        _log.debug("Queueing programming for rules %s: %s", self.id,
                   updates)
        self._iptables_updater.rewrite_chains(updates, {}, async=False)
        # TODO Isolate exceptions from programming the chains to this profile.
        # PLW: Radical thought - could we just say that the profile should be
        # OK, and therefore we don't care? In other words, do we need to handle
        # the error cleverly in the short term, or could we just say that since
        # we built the rules they really should always work.
        if not self.notified_ready:
            self._notify_ready()
            self.notified_ready = True