Esempio n. 1
0
    def ensure_amphora_exists(self, load_balancer_id):
        """
        Octavia health manager makes some assumptions about the existence of amphorae.
        That's why even the F5 provider driver has to care about amphora DB entries.
        Otherwise status updates won't work correctly.

        This function creates an amphora entry in the database, if it doesn't already exist.
        """
        device_entry = self._amphora_repo.get(
            db_apis.get_session(), load_balancer_id=load_balancer_id)

        # create amphora mapping if missing
        if not device_entry:
            self._amphora_repo.create(db_apis.get_session(),
                                      id=load_balancer_id,
                                      load_balancer_id=load_balancer_id,
                                      compute_flavor=CONF.host,
                                      status=lib_consts.ACTIVE)
            return

        # update host if not updated yet
        if device_entry.compute_flavor != CONF.host:
            self._amphora_repo.update(db_apis.get_session(),
                                      id=device_entry.id,
                                      compute_flavor=CONF.host)
 def _get_all_loadbalancer(self, network_id):
     LOG.debug("Get load balancers from DB for network id: %s ", network_id)
     vips = self._vip_repo.get_all(db_apis.get_session(),
                                   network_id=network_id)
     loadbalancers = []
     for vip in vips[0]:
         loadbalancers.append(
             self._loadbalancer_repo.get(db_apis.get_session(),
                                         show_deleted=False,
                                         id=vip.load_balancer_id,
                                         server_group_id=CONF.host))
     return [lb for lb in loadbalancers if lb]
    def sync_loadbalancers(self):
        """Sync loadbalancers that are in a PENDING state"""
        lbs = []
        pending_create_lbs = self._loadbalancer_repo.get_all(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_CREATE,
            show_deleted=False)[0]
        for lb in pending_create_lbs:
            # bind to loadbalancer if scheduled to this host
            if CONF.host == self.network_driver.get_scheduled_host(
                    lb.vip.port_id):
                self.ensure_host_set(lb)
                self.ensure_amphora_exists(lb.id)
                lbs.append(lb)

        lbs.extend(
            self._loadbalancer_repo.get_all_from_host(
                db_apis.get_session(),
                provisioning_status=lib_consts.PENDING_UPDATE))

        pools = self._pool_repo.get_pending_from_host(db_apis.get_session())
        lbs.extend([pool.load_balancer for pool in pools])

        l7policies = self._l7policy_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend(
            [l7policy.listener.load_balancer for l7policy in l7policies])

        pending_networks = collections.defaultdict(list)
        for lb in lbs:
            if lb not in pending_networks[lb.vip.network_id]:
                pending_networks[lb.vip.network_id].append(lb)

        for network_id, loadbalancers in pending_networks.items():
            LOG.info("Found pending tenant network %s, syncing...", network_id)
            try:
                if self._refresh(network_id).ok:
                    self.status.update_status(loadbalancers)
            except exceptions.RetryException as e:
                LOG.warning("Device is busy, retrying with next sync: %s", e)
            except o_exceptions.CertificateRetrievalException as e:
                LOG.warning("Could not retrieve certificate for tenant %s: %s",
                            network_id, e)
            except exceptions.AS3Exception as e:
                LOG.error("AS3 exception while syncing tenant %s: %s",
                          network_id, e)
                for lb in loadbalancers:
                    self.status.set_error(lb)
 def update_listener(self, listener_id, listener_updates):
     listener = self._listener_repo.get(db_apis.get_session(),
                                        id=listener_id)
     if self._refresh(listener.load_balancer.vip.network_id).ok:
         self.status.set_active(listener)
     else:
         self.status.set_error(listener)
Esempio n. 5
0
 def ensure_host_set(self, loadbalancer):
     """Assigns the current host to loadbalancer by writing
     it into server_group_id column of loadbalancer table."""
     if CONF.host[:36] != loadbalancer.server_group_id:
         self._loadbalancer_repo.update(db_apis.get_session(),
                                        id=loadbalancer.id,
                                        server_group_id=CONF.host[:36])
Esempio n. 6
0
    def _reset_in_use_quota(self, project_id):
        """ reset in_use quota to None, so it will be recalculated the next time
        :param project_id: project id
        """
        reset_dict = {
            'in_use_load_balancer': None,
            'in_use_listener': None,
            'in_use_pool': None,
            'in_use_health_monitor': None,
            'in_use_member': None,
        }

        lock_session = db_apis.get_session(autocommit=False)
        try:
            self._quota_repo.update(lock_session,
                                    project_id=project_id,
                                    quota=reset_dict)
            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    'Failed to reset quota for '
                    'project: %(proj)s the project may have excess '
                    'quota in use.', {'proj': project_id})
                lock_session.rollback()
Esempio n. 7
0
    def full_sync_reappearing_devices(self):
        session = db_apis.get_session(autocommit=False)

        # Get all pending devices
        booting_devices = self._amphora_repo.get_all(
            session,
            status=constants.AMPHORA_BOOTING,
            compute_flavor=CONF.host,
            load_balancer_id=None)

        for device in booting_devices[0]:
            if CONF.f5_agent.migration and device.role != constants.ROLE_BACKUP:
                LOG.warning(
                    "[Migration Mode] Skipping full sync of active device %s",
                    device.cached_zone)
                continue

            LOG.info("Device reappeared: %s. Doing a full sync.",
                     device.cached_zone)

            # get all load balancers (of this host)
            lbs = self._loadbalancer_repo.get_all_from_host(session,
                                                            show_deleted=False)

            # deduplicate
            for network_id in set([lb.vip.network_id for lb in lbs]):
                self.queue.put((network_id, device.cached_zone))

            # Set device ready
            self._amphora_repo.update(session,
                                      device.id,
                                      status=constants.AMPHORA_READY)
            session.commit()
Esempio n. 8
0
    def cleanup_orphaned_tenants(self):
        LOG.info("Running (24h) tenant cleanup")
        session = db_apis.get_session(autocommit=False)

        for device in self.sync.devices():
            try:
                # Fetch all Tenants
                tenants = self.sync.get_tenants(device)

                # Get all loadbalancers of this host
                for tenant_name, applications in tenants.items():
                    # Convert tenant_name to network_id
                    network_id = tenant_name.replace(constants.PREFIX_NETWORK,
                                                     '').replace('_', '-')

                    # Fetch active loadbalancers for this network
                    octavia_lb_ids = [
                        lb.id
                        for lb in self._loadbalancer_repo.get_all_by_network(
                            session, network_id, show_deleted=False)
                    ]
                    if not octavia_lb_ids:
                        LOG.info("Found orphaned tenant '%s' for device '%s'",
                                 tenant_name, device)
                        self.queue.put((network_id, device))
            except HTTPError:
                # Ignore as3 errors
                pass
Esempio n. 9
0
 def delete_l7policy(self, l7policy_id):
     l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                        id=l7policy_id)
     # could be deleted by sync-loop meanwhile
     if l7policy:
         self.queue.put(
             (l7policy.listener.load_balancer.vip.network_id, None))
 def update_l7rule(self, l7rule_id, l7rule_updates):
     l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
     if self._refresh(
             l7rule.l7policy.listener.load_balancer.vip.network_id).ok:
         self.status.set_active(l7rule)
     else:
         self.status.set_error(l7rule)
    def tenant_update(self, network_id, device=None):
        """ Synchronous call to update F5s with all loadbalancers of a tenant (network_id).

           :param network_id: the as3 tenant
           :param device: hostname of the bigip device, if none use active device
           :return: True if success, else False

        """

        loadbalancers = self._loadbalancer_repo.get_all_by_network(
            db_apis.get_session(), network_id=network_id, show_deleted=False)
        if not loadbalancers:
            return False
        decl = self._declaration_manager.get_declaration(
            {network_id: loadbalancers})

        if CONF.f5_agent.dry_run:
            decl.set_action('dry-run')

        # No config syncing if we are in migration mode or specificly syncing one device
        if not CONF.f5_agent.migration and not device and CONF.f5_agent.sync_to_group:
            decl.set_sync_to_group(CONF.f5_agent.sync_to_group)

        return self.bigip(device).post(tenants=[m_part.get_name(network_id)],
                                       payload=decl)
 def delete_health_monitor(self, health_monitor_id):
     health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                            id=health_monitor_id)
     pool = health_mon.pool
     load_balancer = pool.load_balancer
     if self._refresh(load_balancer.vip.network_id).ok:
         self.status.set_deleted(health_mon)
Esempio n. 13
0
 def delete_health_monitor(self, health_monitor_id):
     health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                            id=health_monitor_id)
     # could be deleted by sync-loop meanwhile
     if health_mon:
         self.queue.put(
             (health_mon.pool.load_balancer.vip.network_id, None))
Esempio n. 14
0
    def create_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        self.queue.put((pool.load_balancer.vip.network_id, None))
 def update_health_monitor(self, health_monitor_id, health_monitor_updates):
     health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                            id=health_monitor_id)
     pool = health_mon.pool
     load_balancer = pool.load_balancer
     if self._refresh(load_balancer.vip.network_id).ok:
         self.status.set_active(health_mon)
     else:
         self.status.set_error(health_mon)
Esempio n. 16
0
    def create_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(member.pool.load_balancer.id)
        self.queue.put((member.pool.load_balancer.vip.network_id, None))
Esempio n. 17
0
    def create_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        self.queue.put(
            (l7rule.l7policy.listener.load_balancer.vip.network_id, None))
Esempio n. 18
0
    def create_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        self.queue.put((health_mon.pool.load_balancer.vip.network_id, None))
    def create_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        if self._refresh(pool.load_balancer.vip.network_id).ok:
            self.status.set_active(pool)
        else:
            self.status.set_error(pool)
    def create_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        if self._refresh(
                l7rule.l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_active(l7rule)
        else:
            self.status.set_error(l7rule)
Esempio n. 21
0
    def pending_sync(self):
        """
        Reconciliation loop that
        - synchronizes load balancers that are in a PENDING state
        - deletes load balancers that are PENDING_DELETE
        - executes a full sync on F5 devices that were offline but are now back online
        """

        # delete load balancers that are PENDING_DELETE
        session = db_apis.get_session()
        lbs_to_delete = self._loadbalancer_repo.get_all_from_host(
            session, provisioning_status=lib_consts.PENDING_DELETE)
        for lb in lbs_to_delete:
            LOG.info("Found pending deletion of lb %s", lb.id)
            self.delete_load_balancer(lb.id)

        # Find pending loadbalancer not yet finally assigned to this host
        lbs = []
        pending_create_lbs = self._loadbalancer_repo.get_all(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_CREATE,
            show_deleted=False)[0]
        for lb in pending_create_lbs:
            # bind to loadbalancer if scheduled to this host
            if CONF.host == self.network_driver.get_scheduled_host(
                    lb.vip.port_id):
                self.ensure_host_set(lb)
                lbs.append(lb)

        # Find pending loadbalancer
        lbs.extend(
            self._loadbalancer_repo.get_all_from_host(
                db_apis.get_session(),
                provisioning_status=lib_consts.PENDING_UPDATE))

        # Make the Octavia health manager happy by creating DB amphora entries
        for lb in lbs:
            self.ensure_amphora_exists(lb.id)

        # Find pending listener
        listeners = self._listener_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend([listener.load_balancer for listener in listeners])

        # Find pending pools
        pools = self._pool_repo.get_pending_from_host(db_apis.get_session())
        lbs.extend([pool.load_balancer for pool in pools])

        # Find pending l7policies
        l7policies = self._l7policy_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend(
            [l7policy.listener.load_balancer for l7policy in l7policies])

        # Deduplicate into networks
        # because each network is synced separately
        pending_networks = set([lb.vip.network_id for lb in lbs])
        for network_id in pending_networks:
            self.queue.put_nowait((network_id, None))
Esempio n. 22
0
 def batch_update_members(self, old_member_ids, new_member_ids,
                          updated_members):
     old_members = [
         self._member_repo.get(db_apis.get_session(), id=mid)
         for mid in old_member_ids
     ]
     new_members = [
         self._member_repo.get(db_apis.get_session(), id=mid)
         for mid in new_member_ids
     ]
     updated_members = [(self._member_repo.get(db_apis.get_session(),
                                               id=m.get('id')), m)
                        for m in updated_members]
     if old_members:
         pool = old_members[0].pool
     elif new_members:
         pool = new_members[0].pool
     elif updated_members:
         pool = updated_members[0][0].pool
     else:
         return
     self.queue.put((pool.load_balancer.vip.network_id, None))
Esempio n. 23
0
    def create_load_balancer(self, load_balancer_id, flavor=None):
        lb = self._loadbalancer_repo.get(db_apis.get_session(),
                                         id=load_balancer_id)
        # We are retrying to fetch load-balancer since API could
        # be still busy inserting the LB into the database.
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(lb.id)
        self.ensure_host_set(lb)
        self.queue.put((lb.vip.network_id, None))
 def batch_update_members(self, old_member_ids, new_member_ids,
                          updated_members):
     old_members = [
         self._member_repo.get(db_apis.get_session(), id=mid)
         for mid in old_member_ids
     ]
     new_members = [
         self._member_repo.get(db_apis.get_session(), id=mid)
         for mid in new_member_ids
     ]
     updated_members = [(self._member_repo.get(db_apis.get_session(),
                                               id=m.get('id')), m)
                        for m in updated_members]
     if old_members:
         pool = old_members[0].pool
     elif new_members:
         pool = new_members[0].pool
     else:
         pool = updated_members[0][0].pool
     load_balancer = pool.load_balancer
     network_id = load_balancer.vip.network_id
     if self._refresh(network_id).ok:
         self.status.update_status([load_balancer])
    def create_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        pool = health_mon.pool
        load_balancer = pool.load_balancer
        if self._refresh(load_balancer.vip.network_id).ok:
            self.status.set_active(health_mon)
        else:
            self.status.set_error(health_mon)
    def pending_sync(self):
        """
        Reconciliation loop that
        - schedules unscheduled load balancers to this worker
        - deletes load balancers that are PENDING_DELETE
        """

        # schedule unscheduled load balancers to this worker
        self.sync_loadbalancers()

        # delete load balancers that are PENDING_DELETE
        lbs_to_delete = self._loadbalancer_repo.get_all_from_host(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_DELETE)
        for lb in lbs_to_delete:
            LOG.info("Found pending deletion of lb %s", lb.id)
            self.delete_load_balancer(lb.id)
    def create_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(member.pool.load_balancer.id)

        if not member.backup:
            try:
                if member_create(self.bigip, member).ok:
                    self.status.set_active(member)
                    return
            except exceptions.AS3Exception:
                pass
        elif self._refresh(member.pool.load_balancer.vip.network_id).ok:
            self.status.set_active(member)
        else:
            self.status.set_error(member)
    def delete_load_balancer(self, load_balancer_id, cascade=False):
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        existing_lbs = [
            loadbalancer
            for loadbalancer in self._get_all_loadbalancer(lb.vip.network_id)
            if loadbalancer.id != lb.id
        ]

        if not existing_lbs:
            # Delete whole tenant
            ret = tenant_delete(self.bigip, lb.vip.network_id)
        else:
            # Don't delete whole tenant
            segmentation_id = self.network_driver.get_segmentation_id(
                lb.vip.network_id)
            ret = tenant_update(self.bigip, self.cert_manager,
                                lb.vip.network_id, existing_lbs,
                                segmentation_id)

        if ret.ok:
            self.status.set_deleted(lb)
Esempio n. 29
0
 def delete_amphora(self, amphora_id):
     self._amphora_repo.delete(db_apis.get_session(), id=amphora_id)
Esempio n. 30
0
 def update_l7rule(self, l7rule_id, l7rule_updates):
     l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
     self.queue.put(
         (l7rule.l7policy.listener.load_balancer.vip.network_id, None))