Example #1
0
PER_ENDPOINT_KEY = ENDPOINT_DIR + "/<endpoint_id>"
CONFIG_PARAM_KEY = CONFIG_DIR + "/<config_param>"
PER_HOST_CONFIG_PARAM_KEY = PER_HOST_DIR + "/config/<config_param>"
TIER_DATA = POLICY_DIR + "/tier/<tier>/metadata"
TIERED_PROFILE = POLICY_DIR + "/tier/<tier>/policy/<policy_id>"

IPAM_DIR = VERSION_DIR + "/ipam"
IPAM_V4_DIR = IPAM_DIR + "/v4"
POOL_V4_DIR = IPAM_V4_DIR + "/pool"
CIDR_V4_KEY = POOL_V4_DIR + "/<pool_id>"

# Max number of events from driver process before we yield to another greenlet.
MAX_EVENTS_BEFORE_YIELD = 200

# Global diagnostic counters.
_stats = StatCounter("Etcd counters")


class EtcdAPI(EtcdClientOwner, Actor):
    """
    Our API to etcd.

    Since the python-etcd API is blocking, we defer API watches to
    a worker greenlet and communicate with it via Events.

    We use a second worker for writing our status back to etcd.  This
    avoids sharing the etcd client between reads and writes, which is
    problematic because we need to handle EtcdClusterIdChanged for polls
    but not for writes.
    """
    def __init__(self, config, hosts_ipset):
Example #2
0
from calico.felix.futils import StatCounter

_log = logging.getLogger(__name__)

# Minimum gevent scheduling delay.  A delay of 0 should mean "yield" but
# gevent has a known issue that a greenlet that sleeps for 0 may be rescheduled
# immediately.  Any small positive value is enough to truly yield.
MIN_DELAY = 0.000001

ResultOrExc = collections.namedtuple("ResultOrExc", ("result", "exception"))

# Local storage to allow diagnostics.
actor_storage = gevent.local.local()

# Global diagnostic counters.
_stats = StatCounter("Actor framework counters")


class Actor(object):
    """
    Class that contains a queue and a greenlet serving that queue.
    """

    max_ops_before_yield = 1000
    """Number of calls to self._maybe_yield before it yields"""

    batch_delay = 0.01
    """
    Minimum delay between schedules of this Actor.  Larger values encourage
    more batching of messages and reduce starvation (but introduce more
    latency when we're under load).
Example #3
0
    def __init__(self, table, config, ip_version=4):
        super(IptablesUpdater, self).__init__(qualifier="v%d-%s" %
                                                        (ip_version, table))
        self.table = table
        self.refresh_interval = config.REFRESH_INTERVAL
        self.iptables_generator = config.plugins["iptables_generator"]
        self.ip_version = ip_version
        if ip_version == 4:
            self._restore_cmd = "iptables-restore"
            self._save_cmd = "iptables-save"
            self._iptables_cmd = "iptables"
        else:
            assert ip_version == 6
            self._restore_cmd = "ip6tables-restore"
            self._save_cmd = "ip6tables-save"
            self._iptables_cmd = "ip6tables"

        self._chains_in_dataplane = None
        """
        Set of chains that we know are actually in the dataplane.  Loaded
        at start of day and then kept in sync.
        """
        self._grace_period_finished = False
        """
        Flag that is set after the graceful restart window is over.
        """

        self._programmed_chain_contents = {}
        """Map from chain name to chain contents, only contains chains that
        have been explicitly programmed."""
        self._inserted_rule_fragments = set()
        """Special-case rule fragments that we've explicitly inserted."""
        self._removed_rule_fragments = set()
        """Special-case rule fragments that we've explicitly removed.
        We need to cache this to defend against other processes accidentally
        reverting our removal."""
        self._missing_chain_overrides = {}
        """Overrides for chain contents when we need to program a chain but
        it's missing."""

        self._required_chains = defaultdict(set)
        """Map from chain name to the set of names of chains that it
        depends on."""
        self._requiring_chains = defaultdict(set)
        """Map from chain to the set of chains that depend on it.
        Inverse of self.required_chains."""

        # Since it's fairly complex to keep track of the changes required
        # for a particular batch and still be able to roll-back the changes
        # to our data structures, we delegate to a per-batch object that
        # does that calculation.
        self._txn = None
        """:type _Transaction: object used to track index changes
        for this batch."""
        self._completion_callbacks = None
        """List of callbacks to issue once the current batch completes."""

        # Diagnostic counters.
        self._stats = StatCounter("IPv%s %s iptables updater" %
                                  (ip_version, table))

        # Avoid duplicating init logic.
        self._reset_batched_work()
        self._load_chain_names_from_iptables(async=True)

        # Optionally, start periodic refresh timer.
        if self.refresh_interval > 0:
            _log.info("Periodic iptables refresh enabled, starting "
                      "resync greenlet")
            refresh_greenlet = gevent.spawn(self._periodic_refresh)
            refresh_greenlet.link_exception(self._on_worker_died)