Esempio n. 1
0
    def add_etcd_gpe_remote_mapping(self, segmentation_id, mac_address, ip):
        """Create a remote GPE overlay to underlay mapping

        Overlay = mac_address + ip_address of the VM's port
        Underlay = IP address of the VPP's underlay interface
        """
        underlay_ip = self.gpe.gpe_underlay_addr
        gpe_key = GPE_KEY_SPACE + '/%s/%s/%s' % (segmentation_id, self.host,
                                                 ip)
        gpe_data = {'mac': mac_address, 'host': underlay_ip}
        LOG.debug('Writing GPE key to etcd %s with gpe_data %s', gpe_key,
                  gpe_data)
        etcdutils.json_writer(self.client_factory.client()).write(
            gpe_key, gpe_data)
Esempio n. 2
0
    def add_etcd_gpe_remote_mapping(self, segmentation_id, mac_address, ip):
        """Create a remote GPE overlay to underlay mapping

        Overlay = mac_address + ip_address of the VM's port
        Underlay = IP address of the VPP's underlay interface
        """
        underlay_ip = self.gpe.gpe_underlay_addr
        gpe_key = GPE_KEY_SPACE + '/%s/%s/%s' % (
            segmentation_id, self.host, ip)
        gpe_data = {'mac': mac_address, 'host': underlay_ip}
        LOG.debug('Writing GPE key to etcd %s with gpe_data %s',
                  gpe_key, gpe_data)
        etcdutils.json_writer(self.client_factory.client()).write(
            gpe_key, gpe_data)
Esempio n. 3
0
    def fetch_remote_gpe_mappings(self, vni):
        """Fetch and add all remote mappings from etcd for the vni

        Thread-safe: creates its own client every time
        """
        key_space = GPE_KEY_SPACE + "/%s" % vni
        LOG.debug("Fetching remote gpe mappings for vni:%s", vni)
        try:
            rv = etcdutils.json_writer(self.client_factory.client()).read(
                key_space, recursive=True)

            for child in rv.children:
                m = re.match(key_space + '/([^/]+)' + '/([^/]+)' + '/([^/]+)',
                             child.key)
                if m:
                    hostname = m.group(1)
                    mac = m.group(2)
                    ip = m.group(3)
                    if self.is_valid_remote_map(vni, hostname):
                        self.gpe.ensure_remote_gpe_mapping(vni, mac, ip,
                                                           child.value)
        except etcd.EtcdKeyNotFound:
            # The remote gpe key is not found. The agent may not have
            # added it to etcd yet. We will be told to read it later.
            # Continue and don't exit.
            pass
        except etcd.EtcdException as e:
            # Error log any other etcd exception
            LOG.error("Etcd exception %s while fetching GPE mappings", e)
            LOG.exception("etcd exception in fetch-gpe-mappings")
Esempio n. 4
0
    def fetch_remote_gpe_mappings(self, vni):
        """Fetch and add all remote mappings from etcd for the vni

        Thread-safe: creates its own client every time
        """
        key_space = GPE_KEY_SPACE + "/%s" % vni
        LOG.debug("Fetching remote gpe mappings for vni:%s", vni)
        try:
            rv = etcdutils.json_writer(self.client_factory.client()).read(
                key_space, recursive=True)

            for child in rv.children:
                m = re.match(key_space + '/([^/]+)' + '/([^/]+)', child.key)
                if m:
                    hostname = m.group(1)
                    ip = m.group(2)
                    data = jsonutils.loads(child.value)
                    mac = mac_str_t(data["mac"])
                    remote_ip = data["host"]
                    if self.is_valid_remote_map(vni, hostname):
                        self.gpe.ensure_remote_gpe_mapping(
                            vni, mac, ip, remote_ip)
        except etcd.EtcdKeyNotFound:
            # The remote gpe key is not found. The agent may not have
            # added it to etcd yet. We will be told to read it later.
            # Continue and don't exit.
            pass
        except etcd.EtcdException as e:
            # Error log any other etcd exception
            LOG.error("Etcd exception %s while fetching GPE mappings", e)
            LOG.exception("etcd exception in fetch-gpe-mappings")
Esempio n. 5
0
    def delete_etcd_gpe_remote_mapping(self, segmentation_id, mac_address):
        """Delete a remote GPE overlay to underlay mapping."""
        gpe_dir = GPE_KEY_SPACE + '/%s/%s' % (segmentation_id, self.host)

        def get_child_keys():
            child_keys = etcdutils.json_writer(
                self.client_factory.client()).read(gpe_dir, recursive=True)
            return child_keys

        for result in get_child_keys().children:
            # TODO(najoy): Fix the type of result. It must be a
            # ParsedEtcdResult that works with signed keys instead
            # of the EtcdResult, and json.loads is not required.
            data = jsonutils.loads(result.value)
            if data['mac'] == mac_address:
                etcdutils.json_writer(self.client_factory.client()
                                      ).delete(result.key)
        # Delete the etcd directory if it's empty
        if len(list(get_child_keys().children)) == 1:
            for result in get_child_keys().children:
                if result.dir:
                    etcdutils.json_writer(
                        self.client_factory.client()).delete(result.key)
Esempio n. 6
0
    def delete_etcd_gpe_remote_mapping(self, segmentation_id, mac_address):
        """Delete a remote GPE overlay to underlay mapping."""
        gpe_dir = GPE_KEY_SPACE + '/%s/%s' % (segmentation_id, self.host)

        def get_child_keys():
            child_keys = etcdutils.json_writer(
                self.client_factory.client()).read(gpe_dir, recursive=True)
            return child_keys

        for result in get_child_keys().children:
            # TODO(najoy): Fix the type of result. It must be a
            # ParsedEtcdResult that works with signed keys instead
            # of the EtcdResult, and json.loads is not required.
            data = jsonutils.loads(result.value)
            if data['mac'] == mac_address:
                etcdutils.json_writer(self.client_factory.client()).delete(
                    result.key)
        # Delete the etcd directory if it's empty
        if len(list(get_child_keys().children)) == 1:
            for result in get_child_keys().children:
                if result.dir:
                    etcdutils.json_writer(self.client_factory.client()).delete(
                        result.key)
Esempio n. 7
0
 def get_child_keys():
     child_keys = etcdutils.json_writer(
         self.client_factory.client()).read(gpe_dir, recursive=True)
     return child_keys
Esempio n. 8
0
 def get_child_keys():
     child_keys = etcdutils.json_writer(
         self.client_factory.client()).read(gpe_dir, recursive=True)
     return child_keys
Esempio n. 9
0
    def _forward_worker(self):
        LOG.debug('forward worker begun')
        etcd_client = self.client_factory.client()
        etcd_writer = etcdutils.json_writer(etcd_client)
        lease_time = cfg.CONF.ml2_vpp.forward_worker_master_lease_time
        recovery_time = cfg.CONF.ml2_vpp.forward_worker_recovery_time

        etcd_election = etcdutils.EtcdElection(etcd_client,
                                               'forward_worker',
                                               self.election_key_space,
                                               work_time=lease_time,
                                               recovery_time=recovery_time)
        while True:
            # Try indefinitely to regain the mastery of this thread pool. Most
            # threads will be sitting here
            etcd_election.wait_until_elected()
            try:
                # Master loop - as long as we are master and can
                # maintain it, process incoming events.

                # Every long running section is preceded by extending
                # mastership of the thread pool and followed by
                # confirmation that we still have mastership (usually
                # by a further extension).

                def work(k, v):
                    self.do_etcd_update(etcd_writer, k, v)

                # We will try to empty the pending rows in the DB
                while True:
                    etcd_election.extend_election(
                        cfg.CONF.ml2_vpp.db_query_time)
                    session = n_context.get_admin_context().session
                    maybe_more = db.journal_read(session, work)
                    if not maybe_more:
                        LOG.debug('forward worker has emptied journal')
                        etcd_election.extend_election(lease_time)
                        break

                # work queue is now empty.

                # Wait to be kicked, or (in case of emergency) run
                # every few seconds in case another thread or process
                # dumped work and failed to get notification to us to
                # process it.
                with eventlet.Timeout(lease_time + 1, False):
                    etcd_election.extend_election(lease_time)
                    try:
                        etcd_client.watch(self.journal_kick_key,
                                          timeout=lease_time)
                    except etcd.EtcdException:
                        # Check the DB queue now, anyway
                        pass
            except etcdutils.EtcdElectionLost:
                # We are no longer master
                pass
            except Exception as e:
                # TODO(ijw): log exception properly
                LOG.warning(
                    "problems in forward worker - Error name is %s. "
                    "proceeding without quiting",
                    type(e).__name__)
                LOG.warning("Exception in forward_worker: %s", e)
                # something went bad; breathe, in case we end
                # up in a tight loop
                time.sleep(1)
                # never quit
                pass