def test_mainline(self, m_sleep): m_snap_response = Mock() m_snap_response.etcd_index = 1 m_poll_response = Mock() m_poll_response.modifiedIndex = 2 responses = [ m_snap_response, m_poll_response, ResyncRequired(), # Loop 1 EtcdException(), # Loop 2 ExpectedException(), # Loop 3, Break out of loop. ] self.m_client.read.side_effect = iter(responses) with patch.object(self.watcher, "_on_pre_resync", autospec=True) as m_pre_r: with patch.object(self.watcher, "_on_snapshot_loaded", autospec=True) as m_snap_load: self.assertRaises(ExpectedException, self.watcher.loop) # _on_pre_resync() called once per loop. self.assertEqual(m_pre_r.mock_calls, [call(), call(), call()]) # The snapshot only loads successfully the first time. self.assertEqual(m_snap_load.mock_calls, [call(m_snap_response)]) self.assertEqual(self.m_dispatcher.handle_event.mock_calls, [call(m_poll_response)]) # Should sleep after exception. m_sleep.assert_called_once_with(1)
def _resync(self, response, **kwargs): """ Force a resync. :raises ResyncRequired: always. """ _log.warning("Resync triggered due to change to %s", response.key) raise ResyncRequired()
def _force_resync(self, response, **kwargs): LOG.warning("Forcing a resync due to %s to key %s", response.action, response.key) raise ResyncRequired()
def on_ready_flag_set(self, response): if response.value != "true": raise ResyncRequired()
def _on_snapshot_loaded(self, etcd_snapshot_response): """ Loads a snapshot from etcd and passes it to the update splitter. :raises ResyncRequired: if the Ready flag is not set in the snapshot. """ rules_by_id = {} tags_by_id = {} endpoints_by_id = {} ipv4_pools_by_id = {} self.endpoint_ids_per_host.clear() self.ipv4_by_hostname.clear() still_ready = False for child in etcd_snapshot_response.children: profile_id, rules = parse_if_rules(child) if profile_id: rules_by_id[profile_id] = rules continue profile_id, tags = parse_if_tags(child) if profile_id: tags_by_id[profile_id] = tags continue endpoint_id, endpoint = parse_if_endpoint(self._config, child) if endpoint_id and endpoint: endpoints_by_id[endpoint_id] = endpoint self.endpoint_ids_per_host[endpoint_id.host].add(endpoint_id) continue pool_id, pool = parse_if_ipam_v4_pool(child) if pool_id and pool: ipv4_pools_by_id[pool_id] = pool continue if self._config.IP_IN_IP_ENABLED: hostname, ip = parse_if_host_ip(child) if hostname and ip: self.ipv4_by_hostname[hostname] = ip continue # Double-check the flag hasn't changed since we read it before. if child.key == READY_KEY: if child.value == "true": still_ready = True else: _log.warning("Aborting resync because ready flag was" "unset since we read it.") raise ResyncRequired() if not still_ready: _log.warn("Aborting resync; ready flag no longer present.") raise ResyncRequired() # We now know exactly which endpoints are on this host, use that to # clean up any endpoint statuses that should now be gone. our_endpoints_ids = self.endpoint_ids_per_host[self._config.HOSTNAME] self.clean_up_endpoint_statuses(our_endpoints_ids) # Actually apply the snapshot. This does not return anything, but # just sends the relevant messages to the relevant threads to make # all the processing occur. _log.info("Snapshot parsed, passing to update splitter") self.splitter.apply_snapshot(rules_by_id, tags_by_id, endpoints_by_id, ipv4_pools_by_id, async=True) if self._config.IP_IN_IP_ENABLED: # We only support IPv4 for host tracking right now so there's not # much point in going via the splitter. # FIXME Support IP-in-IP for IPv6. _log.info("Sending (%d) host IPs to ipset.", len(self.ipv4_by_hostname)) self.hosts_ipset.replace_members(self.ipv4_by_hostname.values(), async=True)