Exemple #1
0
def retry_on_db_error(func, retry=None):
    """Decorates the given function so that it retries on DB errors.

    Note that the decorator retries the function/method only on some
    of the DB errors that are considered to be worth retrying, like
    deadlocks and disconnections.

    :param func: Function to decorate.
    :param retry: a Retrying object
    :return: Decorated function.
    """
    if not retry:
        retry = tenacity.Retrying(
            retry=tenacity.retry_if_exception_type(_RETRY_ERRORS),
            stop=tenacity.stop_after_attempt(50),
            wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2)
        )

    # The `assigned` arg should be empty as some of the default values are not
    # supported by simply initialized MagicMocks. The consequence may
    # be that the representation will contain the wrapper and not the
    # wrapped function.
    @functools.wraps(func, assigned=[])
    def decorate(*args, **kw):
        # Retrying library decorator might potentially run a decorated
        # function within a new thread so it's safer not to apply the
        # decorator directly to a target method/function because we can
        # lose an authentication context.
        # The solution is to create one more function and explicitly set
        # auth context before calling it (potentially in a new thread).
        auth_ctx = context.ctx() if context.has_ctx() else None

        return retry.call(_with_auth_context, auth_ctx, func, *args, **kw)

    return decorate
Exemple #2
0
def retry_on_db_error(func, retry=None):
    """Decorates the given function so that it retries on DB errors.

    Note that the decorator retries the function/method only on some
    of the DB errors that are considered to be worth retrying, like
    deadlocks and disconnections.

    :param func: Function to decorate.
    :param retry: a Retrying object
    :return: Decorated function.
    """
    if not retry:
        retry = tenacity.Retrying(
            retry=tenacity.retry_if_exception_type(_RETRY_ERRORS),
            stop=tenacity.stop_after_attempt(50),
            wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2))

    # The `assigned` arg should be empty as some of the default values are not
    # supported by simply initialized MagicMocks. The consequence may
    # be that the representation will contain the wrapper and not the
    # wrapped function.
    @functools.wraps(func, assigned=[])
    def decorate(*args, **kw):
        # Retrying library decorator might potentially run a decorated
        # function within a new thread so it's safer not to apply the
        # decorator directly to a target method/function because we can
        # lose an authentication context.
        # The solution is to create one more function and explicitly set
        # auth context before calling it (potentially in a new thread).
        auth_ctx = context.ctx() if context.has_ctx() else None

        return retry.call(_with_auth_context, auth_ctx, func, *args, **kw)

    return decorate
def create_db_retry_object():
    return MistralRetrying(
        retry=tenacity.retry_if_exception_type(
            (sa.exc.OperationalError, db_exc.DBConnectionError)),
        stop=tenacity.stop_after_attempt(10),
        wait=tenacity.wait_incrementing(increment=0.2)  # 0.2 seconds
    )
Exemple #4
0
def retry_on_db_error(func):
    """Decorates the given function so that it retries on DB deadlock errors.

    :param func: Function to decorate.
    :return: Decorated function.
    """
    @functools.wraps(func)
    @tenacity.retry(
        reraise=True,
        retry=tenacity.retry_if_exception_type(_RETRY_ERRORS),
        stop=tenacity.stop_after_attempt(10),
        wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2)
    )
    def decorate(*args, **kw):
        try:
            return func(*args, **kw)
        except db_exc.DBDeadlock:
            LOG.exception(
                "DB error detected, operation will be retried: %s", func)
            raise
    return decorate
Exemple #5
0
class F5vCMPBackend(object):
    def __init__(self, cfg):
        self.conf = cfg
        self.mgmt = None
        self.vcmp_host = parse.urlparse(self.conf.F5_VCMP.host)
        self.vcmp_guest = self.conf.F5_VCMP.guest or self.conf.host
        self.interface_link = self.conf.F5_VCMP.interface
        self._login()

    def _login(self):
        if not self.vcmp_host.username or not self.vcmp_host.password:
            LOG.error("Need to specifcy valid F5_VCMP.host configuration: "
                      "http(s)://<username>:<password>@hostname")
            sys.exit(1)

        self.mgmt = ManagementRoot(self.vcmp_host.hostname,
                                   self.vcmp_host.username,
                                   self.vcmp_host.password,
                                   token=True)

    REQUEST_TIME_SYNC_VLAN = Summary('sync_vcmp_vlans_seconds',
                                     'Time spent processing vcmp vlans')

    @retry(retry=retry_if_exception_type((Timeout, ConnectionError)),
           wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                  RETRY_MAX),
           stop=stop_after_attempt(RETRY_ATTEMPTS))
    @REQUEST_TIME_SYNC_VLAN.time()
    def sync_vlan(self, vlans):
        v = self.mgmt.tm.net.vlans
        orig_vlans = {
            constants.PREFIX_VLAN + name: val
            for name, val in vlans.items()
        }
        for old_vlan in v.get_collection():

            # Not managed by agent
            if not old_vlan.name.startswith(constants.PREFIX_VLAN):
                pass

            # Update
            elif old_vlan.name in orig_vlans:
                vlan = orig_vlans.pop(old_vlan.name)
                if old_vlan.tag != vlan['tag'] or old_vlan.mtu != vlan['mtu']:
                    old_vlan.tag = vlan['tag']
                    old_vlan.mtu = vlan['mtu']
                    old_vlan.hardwareSynccokie = 'enabled'
                    old_vlan.update()

                if not old_vlan.interfaces_s.interfaces.exists(
                        name=self.interface_link):
                    old_vlan.interfaces_s.interfaces.create(
                        tagged=True,
                        name=self.interface_link,
                        tagMode='service')

            # orphaned, try to delete but could be used by another vcmp guest
            else:
                try:
                    old_vlan.delete()
                except iControlUnexpectedHTTPError:
                    pass

        # New ones
        for name, vlan in orig_vlans.items():
            new_vlan = v.vlan.create(name=constants.PREFIX_VLAN + name,
                                     partition='Common',
                                     tag=vlan['tag'],
                                     mtu=vlan['mtu'],
                                     hardwareSynccookie='enabled')
            new_vlan.interfaces_s.interfaces.create(tagged=True,
                                                    name=self.interface_link,
                                                    tagMode='service')

        # Assign VLANs to the correct guest, but keep mgmt networks
        try:
            guest = self.mgmt.tm.vcmp.guests.guest.load(name=self.vcmp_guest)
            new_vlans = map(u'/Common/net-{0}'.format, vlans.keys())
            new_vlans.extend([
                six.text_type(mgmt_vlan) for mgmt_vlan in guest.vlans
                if not mgmt_vlan.startswith('/Common/' + constants.PREFIX_VLAN)
            ])
            if collections.Counter(new_vlans) != collections.Counter(
                    guest.vlans):
                guest.vlans = new_vlans
                guest.update()
        except iControlUnexpectedHTTPError as e:
            LOG.error("Failure configuring guest VLAN", e)
Exemple #6
0
class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
    def __init__(self):

        self._amphora_flows = amphora_flows.AmphoraFlows()
        self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows()
        self._lb_flows = load_balancer_flows.LoadBalancerFlows()
        self._listener_flows = listener_flows.ListenerFlows()
        self._member_flows = member_flows.MemberFlows()
        self._pool_flows = pool_flows.PoolFlows()
        self._l7policy_flows = l7policy_flows.L7PolicyFlows()
        self._l7rule_flows = l7rule_flows.L7RuleFlows()

        self._amphora_repo = repo.AmphoraRepository()
        self._amphora_health_repo = repo.AmphoraHealthRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = repo.PoolRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()

        super(ControllerWorker, self).__init__()

    @tenacity.retry(
        retry=(tenacity.retry_if_result(_is_provisioning_status_pending_update)
               | tenacity.retry_if_exception_type()),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)

    def create_amphora(self):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: amphora_id
        """
        try:
            create_amp_tf = self._taskflow_load(
                self._amphora_flows.get_create_amphora_flow(),
                store={
                    constants.BUILD_TYPE_PRIORITY:
                    constants.LB_CREATE_SPARES_POOL_PRIORITY,
                    constants.FLAVOR: None
                })
            with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
                create_amp_tf.run()

            return create_amp_tf.storage.fetch('amphora')
        except Exception as e:
            LOG.error('Failed to create an amphora due to: {}'.format(str(e)))

    def delete_amphora(self, amphora_id):
        """Deletes an existing Amphora.

        :param amphora_id: ID of the amphora to delete
        :returns: None
        :raises AmphoraNotFound: The referenced Amphora was not found
        """
        amphora = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        delete_amp_tf = self._taskflow_load(
            self._amphora_flows.get_delete_amphora_flow(),
            store={constants.AMPHORA: amphora})
        with tf_logging.DynamicLoggingListener(delete_amp_tf, log=LOG):
            delete_amp_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_health_monitor(self, health_monitor_id):
        """Creates a health monitor.

        :param pool_id: ID of the pool to create a health monitor on
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        create_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_create_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG):
            create_hm_tf.run()

    def delete_health_monitor(self, health_monitor_id):
        """Deletes a health monitor.

        :param pool_id: ID of the pool to delete its health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)

        pool = health_mon.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG):
            delete_hm_tf.run()

    def update_health_monitor(self, health_monitor_id, health_monitor_updates):
        """Updates a health monitor.

        :param pool_id: ID of the pool to have it's health monitor updated
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = None
        try:
            health_mon = self._get_db_obj_until_pending_update(
                self._health_mon_repo, health_monitor_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            health_mon = e.last_attempt.result()

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        update_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_update_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: health_monitor_updates
            })
        with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG):
            update_hm_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_listener(self, listener_id):
        """Creates a listener.

        :param listener_id: ID of the listener to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        load_balancer = listener.load_balancer

        create_listener_tf = self._taskflow_load(
            self._listener_flows.get_create_listener_flow(),
            store={
                constants.LOADBALANCER: load_balancer,
                constants.LISTENERS: [listener]
            })
        with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG):
            create_listener_tf.run()

    def delete_listener(self, listener_id):
        """Deletes a listener.

        :param listener_id: ID of the listener to delete
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        load_balancer = listener.load_balancer

        delete_listener_tf = self._taskflow_load(
            self._listener_flows.get_delete_listener_flow(),
            store={
                constants.LOADBALANCER: load_balancer,
                constants.LISTENER: listener
            })
        with tf_logging.DynamicLoggingListener(delete_listener_tf, log=LOG):
            delete_listener_tf.run()

    def update_listener(self, listener_id, listener_updates):
        """Updates a listener.

        :param listener_id: ID of the listener to update
        :param listener_updates: Dict containing updated listener attributes
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        listener = None
        try:
            listener = self._get_db_obj_until_pending_update(
                self._listener_repo, listener_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Listener did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            listener = e.last_attempt.result()

        load_balancer = listener.load_balancer

        update_listener_tf = self._taskflow_load(
            self._listener_flows.get_update_listener_flow(),
            store={
                constants.LISTENER: listener,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: listener_updates,
                constants.LISTENERS: [listener]
            })
        with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
            update_listener_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_load_balancer(self, load_balancer_id, flavor=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param load_balancer_id: ID of the load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        # TODO(johnsom) convert this to octavia_lib constant flavor
        # once octavia is transitioned to use octavia_lib
        store = {
            constants.LOADBALANCER_ID: load_balancer_id,
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
            constants.FLAVOR: flavor
        }

        topology = lb.topology

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}

        create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
            topology=topology, listeners=lb.listeners)

        create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
            create_lb_tf.run()

    def delete_load_balancer(self, load_balancer_id, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer_id: ID of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)

        if cascade:
            (flow,
             store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb)
        else:
            (flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb)
        store.update({
            constants.LOADBALANCER: lb,
            constants.SERVER_GROUP_ID: lb.server_group_id
        })
        delete_lb_tf = self._taskflow_load(flow, store=store)

        with tf_logging.DynamicLoggingListener(delete_lb_tf, log=LOG):
            delete_lb_tf.run()

    def update_load_balancer(self, load_balancer_id, load_balancer_updates):
        """Updates a load balancer.

        :param load_balancer_id: ID of the load balancer to update
        :param load_balancer_updates: Dict containing updated load balancer
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        lb = None
        try:
            lb = self._get_db_obj_until_pending_update(self._lb_repo,
                                                       load_balancer_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Load balancer did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            lb = e.last_attempt.result()

        listeners, _ = self._listener_repo.get_all(
            db_apis.get_session(), load_balancer_id=load_balancer_id)

        update_lb_tf = self._taskflow_load(
            self._lb_flows.get_update_load_balancer_flow(),
            store={
                constants.LOADBALANCER: lb,
                constants.LISTENERS: listeners,
                constants.UPDATE_DICT: load_balancer_updates
            })

        with tf_logging.DynamicLoggingListener(update_lb_tf, log=LOG):
            update_lb_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_member(self, member_id):
        """Creates a pool member.

        :param member_id: ID of the member to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        create_member_tf = self._taskflow_load(
            self._member_flows.get_create_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.POOL: pool
            })
        with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG):
            create_member_tf.run()

    def delete_member(self, member_id):
        """Deletes a pool member.

        :param member_id: ID of the member to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_member_tf = self._taskflow_load(
            self._member_flows.get_delete_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.POOL: pool
            })
        with tf_logging.DynamicLoggingListener(delete_member_tf, log=LOG):
            delete_member_tf.run()

    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):
        old_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in old_member_ids
        ]
        new_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in new_member_ids
        ]
        updated_members = [(self._member_repo.get(db_apis.get_session(),
                                                  id=m.get('id')), m)
                           for m in updated_members]
        if old_members:
            pool = old_members[0].pool
        elif new_members:
            pool = new_members[0].pool
        else:
            pool = updated_members[0][0].pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        batch_update_members_tf = self._taskflow_load(
            self._member_flows.get_batch_update_members_flow(
                old_members, new_members, updated_members),
            store={
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.POOL: pool
            })
        with tf_logging.DynamicLoggingListener(batch_update_members_tf,
                                               log=LOG):
            batch_update_members_tf.run()

    def update_member(self, member_id, member_updates):
        """Updates a pool member.

        :param member_id: ID of the member to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = None
        try:
            member = self._get_db_obj_until_pending_update(
                self._member_repo, member_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Member did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            member = e.last_attempt.result()

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        update_member_tf = self._taskflow_load(
            self._member_flows.get_update_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.POOL: pool,
                constants.UPDATE_DICT: member_updates
            })
        with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG):
            update_member_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_pool(self, pool_id):
        """Creates a node pool.

        :param pool_id: ID of the pool to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        listeners = pool.listeners
        load_balancer = pool.load_balancer

        create_pool_tf = self._taskflow_load(
            self._pool_flows.get_create_pool_flow(),
            store={
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG):
            create_pool_tf.run()

    def delete_pool(self, pool_id):
        """Deletes a node pool.

        :param pool_id: ID of the pool to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)

        load_balancer = pool.load_balancer
        listeners = pool.listeners

        delete_pool_tf = self._taskflow_load(
            self._pool_flows.get_delete_pool_flow(),
            store={
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG):
            delete_pool_tf.run()

    def update_pool(self, pool_id, pool_updates):
        """Updates a node pool.

        :param pool_id: ID of the pool to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = None
        try:
            pool = self._get_db_obj_until_pending_update(
                self._pool_repo, pool_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Pool did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            pool = e.last_attempt.result()

        listeners = pool.listeners
        load_balancer = pool.load_balancer

        update_pool_tf = self._taskflow_load(
            self._pool_flows.get_update_pool_flow(),
            store={
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: pool_updates
            })
        with tf_logging.DynamicLoggingListener(update_pool_tf, log=LOG):
            update_pool_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7policy(self, l7policy_id):
        """Creates an L7 Policy.

        :param l7policy_id: ID of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if not l7policy:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7policy', l7policy_id)
            raise db_exceptions.NoResultFound

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_create_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_l7policy_tf, log=LOG):
            create_l7policy_tf.run()

    def delete_l7policy(self, l7policy_id):
        """Deletes an L7 policy.

        :param l7policy_id: ID of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)

        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_delete_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_l7policy_tf, log=LOG):
            delete_l7policy_tf.run()

    def update_l7policy(self, l7policy_id, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy_id: ID of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = None
        try:
            l7policy = self._get_db_obj_until_pending_update(
                self._l7policy_repo, l7policy_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'L7 policy did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            l7policy = e.last_attempt.result()

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_update_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: l7policy_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG):
            update_l7policy_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7rule(self, l7rule_id):
        """Creates an L7 Rule.

        :param l7rule_id: ID of the l7rule to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_create_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG):
            create_l7rule_tf.run()

    def delete_l7rule(self, l7rule_id):
        """Deletes an L7 rule.

        :param l7rule_id: ID of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        l7policy = l7rule.l7policy
        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_delete_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_l7rule_tf, log=LOG):
            delete_l7rule_tf.run()

    def update_l7rule(self, l7rule_id, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule_id: ID of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = None
        try:
            l7rule = self._get_db_obj_until_pending_update(
                self._l7rule_repo, l7rule_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'L7 rule did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            l7rule = e.last_attempt.result()

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_update_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: l7rule_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7rule_tf, log=LOG):
            update_l7rule_tf.run()

    def _perform_amphora_failover(self, amp, priority):
        """Internal method to perform failover operations for an amphora.

        :param amp: The amphora to failover
        :param priority: The create priority
        :returns: None
        """

        stored_params = {
            constants.FAILED_AMPHORA: amp,
            constants.LOADBALANCER_ID: amp.load_balancer_id,
            constants.BUILD_TYPE_PRIORITY: priority,
        }

        if amp.status == constants.DELETED:
            LOG.warning(
                'Amphora %s is marked DELETED in the database but '
                'was submitted for failover. Deleting it from the '
                'amphora health table to exclude it from health '
                'checks and skipping the failover.', amp.id)
            self._amphora_health_repo.delete(db_apis.get_session(),
                                             amphora_id=amp.id)
            return

        if (CONF.house_keeping.spare_amphora_pool_size
                == 0) and (CONF.nova.enable_anti_affinity is False):
            LOG.warning("Failing over amphora with no spares pool may "
                        "cause delays in failover times while a new "
                        "amphora instance boots.")

        # if we run with anti-affinity we need to set the server group
        # as well
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amp.id)
        if CONF.nova.enable_anti_affinity and lb:
            stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
        if lb.flavor_id:
            stored_params[constants.FLAVOR] = (
                self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id))
        else:
            stored_params[constants.FLAVOR] = {}

        failover_amphora_tf = self._taskflow_load(
            self._amphora_flows.get_failover_flow(role=amp.role,
                                                  load_balancer=lb),
            store=stored_params)

        with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
            failover_amphora_tf.run()

    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """
        try:
            amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
            if not amp:
                LOG.warning(
                    "Could not fetch Amphora %s from DB, ignoring "
                    "failover request.", amphora_id)
                return
            self._perform_amphora_failover(
                amp, constants.LB_CREATE_FAILOVER_PRIORITY)
            if amp.load_balancer_id:
                LOG.info("Mark ACTIVE in DB for load balancer id: %s",
                         amp.load_balancer_id)
                self._lb_repo.update(db_apis.get_session(),
                                     amp.load_balancer_id,
                                     provisioning_status=constants.ACTIVE)
        except Exception as e:
            try:
                self._lb_repo.update(db_apis.get_session(),
                                     amp.load_balancer_id,
                                     provisioning_status=constants.ERROR)
            except Exception:
                LOG.error("Unable to revert LB status to ERROR.")
            with excutils.save_and_reraise_exception():
                LOG.error("Failover exception: %s", e)

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """

        # Note: This expects that the load balancer is already in
        #       provisioning_status=PENDING_UPDATE state
        try:
            lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)

            # Exclude amphora already deleted
            amps = [a for a in lb.amphorae if a.status != constants.DELETED]
            for amp in amps:
                # failover amphora in backup role
                # Note: this amp may not currently be the backup
                # TODO(johnsom) Change this to query the amp state
                #               once the amp API supports it.
                if amp.role == constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            for amp in amps:
                # failover everyhting else
                if amp.role != constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            self._lb_repo.update(db_apis.get_session(),
                                 load_balancer_id,
                                 provisioning_status=constants.ACTIVE)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error("LB %(lbid)s failover exception: %(exc)s", {
                    'lbid': load_balancer_id,
                    'exc': e
                })
                self._lb_repo.update(db_apis.get_session(),
                                     load_balancer_id,
                                     provisioning_status=constants.ERROR)

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id)

        certrotation_amphora_tf = self._taskflow_load(
            self._amphora_flows.cert_rotate_amphora_flow(),
            store={
                constants.AMPHORA: amp,
                constants.AMPHORA_ID: amp.id
            })

        with tf_logging.DynamicLoggingListener(certrotation_amphora_tf,
                                               log=LOG):
            certrotation_amphora_tf.run()

    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info(
            "Start amphora agent configuration update, amphora's id "
            "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        update_amphora_tf = self._taskflow_load(
            self._amphora_flows.update_amphora_config_flow(),
            store={
                constants.AMPHORA: amp,
                constants.FLAVOR: flavor
            })

        with tf_logging.DynamicLoggingListener(update_amphora_tf, log=LOG):
            update_amphora_tf.run()
Exemple #7
0
        hasattr(ex, 'output')

    return ex


def _get_workflow_execution_resource(wf_ex):
    _load_deferred_output_field(wf_ex)

    return resources.Execution.from_db_model(wf_ex)


# Use retries to prevent possible failures.
@tenacity.retry(
    retry=tenacity.retry_if_exception_type(sa.exc.OperationalError),
    stop=tenacity.stop_after_attempt(10),
    wait=tenacity.wait_incrementing(increment=100)  # 0.1 seconds
)
def _get_workflow_execution(id, must_exist=True):
    with db_api.transaction():
        if must_exist:
            wf_ex = db_api.get_workflow_execution(id)
        else:
            wf_ex = db_api.load_workflow_execution(id)

        return _load_deferred_output_field(wf_ex)


# TODO(rakhmerov): Make sure to make all needed renaming on public API.


class ExecutionsController(rest.RestController):
class StatusManager(object):
    def __init__(self):
        # On Stein we don't have the get_socket option yet. We can't just pass None though, because while
        # DriverLibrary.__init__() doesn't have a problem with it due to taking **kwargs, it passes **kwargs to its
        # super class, which is object, which does not take any arguments...
        try:
            self._octavia_driver_lib = driver_lib.DriverLibrary(
                status_socket=CONF.driver_agent.status_socket_path,
                stats_socket=CONF.driver_agent.stats_socket_path,
                get_socket=CONF.driver_agent.get_socket_path,
            )
        except cfg.NoSuchOptError as e:
            if e.opt_name != 'get_socket_path':
                raise e
            self._octavia_driver_lib = driver_lib.DriverLibrary(
                status_socket=CONF.driver_agent.status_socket_path,
                stats_socket=CONF.driver_agent.stats_socket_path,
            )

    def status_dict(self, obj, cascade=False):
        """ Returns status dict for octavia object,
         deleted if status was PENDING_DELETE, else active.
         Ignores error status.

        :param obj: octavia object
        """

        # Cascade Delete: force deleted
        if cascade:
            return [self._status_obj(obj, lib_consts.DELETED)]

        # Don't update errored objects
        if obj.provisioning_status == lib_consts.ERROR:
            return []

        # Don't update already active objects:
        if obj.provisioning_status == lib_consts.ACTIVE:
            return []

        if utils.pending_delete(obj):
            return [self._status_obj(obj, lib_consts.DELETED)]
        else:
            return [self._status_obj(obj, lib_consts.ACTIVE)]

    def update_status(self, loadbalancers):
        """For each load balancer set the provisioning_status of it and all its children to ACTIVE if it is
        PENDING_UPDATE or PENDING_CREATE, or to DELETED if it is PENDING_DELETE. Ignore ERROR status.

        :param loadbalancers: octavia loadbalancers list
        """

        status = defaultdict(list)

        # Load Balancers
        for loadbalancer in loadbalancers:
            cascade = False
            status[lib_consts.LOADBALANCERS].extend(
                self.status_dict(loadbalancer))

            # Cascade?
            if loadbalancer.provisioning_status == lib_consts.PENDING_DELETE:
                cascade = True

            # Listeners
            for listener in loadbalancer.listeners:
                status[lib_consts.LISTENERS].extend(
                    self.status_dict(listener, cascade))

                # L7Policies
                for l7policy in listener.l7policies:
                    status[lib_consts.L7POLICIES].extend(
                        self.status_dict(l7policy, cascade))

                    # L7Rules
                    for l7rule in l7policy.l7rules:
                        status[lib_consts.L7RULES].extend(
                            self.status_dict(l7rule, cascade))

            # Pools
            for pool in loadbalancer.pools:
                status[lib_consts.POOLS].extend(self.status_dict(
                    pool, cascade))

                # Members
                for member in pool.members:
                    status[lib_consts.MEMBERS].extend(
                        self.status_dict(member, cascade))

                # Health Monitors
                if pool.health_monitor:
                    status[lib_consts.HEALTHMONITORS].extend(
                        self.status_dict(pool.health_monitor, cascade))

        self._update_status_to_octavia(status)

    @staticmethod
    def _status_obj(obj, provisioning_status):
        """Return status object for statup update api consumption

        :param obj: octavia object containing ID
        :param provisioning_status: provisioning status
        :return: status object
        """
        status_obj = {
            lib_consts.ID: obj.id,
            lib_consts.PROVISIONING_STATUS: provisioning_status
        }

        if isinstance(obj, data_models.LoadBalancer
                      ) and provisioning_status == lib_consts.ACTIVE:
            status_obj[lib_consts.OPERATING_STATUS] = lib_consts.ONLINE
        if isinstance(obj, data_models.HealthMonitor
                      ) and provisioning_status == lib_consts.ACTIVE:
            status_obj[lib_consts.OPERATING_STATUS] = lib_consts.ONLINE
        if isinstance(obj, data_models.L7Policy
                      ) and provisioning_status == lib_consts.ACTIVE:
            status_obj[lib_consts.OPERATING_STATUS] = lib_consts.ONLINE
        if isinstance(obj, data_models.L7Rule
                      ) and provisioning_status == lib_consts.ACTIVE:
            status_obj[lib_consts.OPERATING_STATUS] = lib_consts.ONLINE

        return status_obj

    @tenacity.retry(retry=tenacity.retry_if_exception_type(),
                    wait=tenacity.wait_incrementing(start=1, increment=10),
                    stop=tenacity.stop_after_attempt(max_attempt_number=3))
    def _update_status_to_octavia(self, status):
        try:
            self._octavia_driver_lib.update_loadbalancer_status(status)
        except driver_exceptions.UpdateStatusError as e:
            msg = ("Error while updating status to octavia: "
                   "%s") % e.fault_string
            LOG.error(msg)
            raise driver_exceptions.UpdateStatusError(msg)
        finally:
            # Update amphora to DELETED if LB is DELETED, so that they get cleaned up together
            amp_repo = AmphoraRepository()
            session = db_apis.get_session()
            lbs = status.get('loadbalancers') or []
            for lb in lbs:
                if lb['provisioning_status'] == lib_consts.DELETED:
                    amp_repo.update(session,
                                    lb['id'],
                                    status=lib_consts.DELETED,
                                    force_provisioning_status=True)

    @staticmethod
    def get_obj_type(obj):
        if isinstance(obj, data_models.LoadBalancer):
            return lib_consts.LOADBALANCERS

        # Listener
        if isinstance(obj, data_models.Listener):
            return lib_consts.LISTENERS

        # Pool
        if isinstance(obj, data_models.Pool):
            return lib_consts.POOLS

        # Member
        if isinstance(obj, data_models.Member):
            return lib_consts.MEMBERS

        # Health Monitor
        if isinstance(obj, data_models.HealthMonitor):
            return lib_consts.HEALTHMONITORS

        # L7Policy
        if isinstance(obj, data_models.L7Policy):
            return lib_consts.L7POLICIES

        # L7Rule
        if isinstance(obj, data_models.L7Rule):
            return lib_consts.L7RULES

    def set_error(self, obj):
        """Set provisioning_state of octavia object to ERROR
        :param obj: octavia object like loadbalancer, pools, etc.
        """
        obj.provisioning_status = lib_consts.ERROR
        self._update_status_to_octavia({
            self.get_obj_type(obj): [self._status_obj(obj, lib_consts.ERROR)]
        })
Exemple #9
0
 def test_incrementing_sleep(self):
     r = Retrying(wait=tenacity.wait_incrementing(start=500, increment=100))
     self.assertEqual(500, r.wait(1, 6546))
     self.assertEqual(600, r.wait(2, 6546))
     self.assertEqual(700, r.wait(3, 6546))
def main(msg: func.QueueMessage) -> None:
    """
    Main function, triggered by Azure Storage Queue, parsed queue content and call ingest_to_ADX
    :param msg: func.QueueMessage
    :return: None
    """
    logging.info('Python queue trigger function processed a queue item: %s',
                 msg.get_body().decode('utf-8'))
    # Set the logging level for all azure-* libraries
    logging.getLogger('azure').setLevel(logging.WARNING)
    modification_time = None
    get_config_values()
    #get message content
    #queue from checkpoint function
    content_json = json.loads(msg.get_body().decode('utf-8'))
    file_url = content_json['data']['url']
    logging.info(f"{LOG_MESSAGE_HEADER} file_url:{file_url}")
    msg_time = p.parse(content_json['eventTime'])
    try:
        #modification time is the time databricks processed finished
        modification_time = p.parse(content_json['modificationTime'])
    except Exception:
        modification_time = msg_time

    #get file size from storage queue directly
    file_size = content_json['data']['contentLength']
    # Sharing: New logic based on new schema
    target_database, target_table = get_target_info(file_url)
    logging.info(f"{LOG_MESSAGE_HEADER} target_database:{target_database}, target_table:{target_table}")
    
    #set up if log table connection when duplicate check enable
    if IS_DUPLICATE_CHECK:
        #get log table name
        get_table_connection()
        log_table_name = get_logtable_name()
        check_create_table(log_table_name)
        #get table connection
        get_table_connection()
        processed_Flag, log_table_key = check_file_process_log(log_table_name, file_url, target_database)
    else:
        processed_Flag = False
    #use regexp to check file
    regexp = re.compile(EVENT_SUBJECT_FILTER_REGEX)
    if regexp.search(file_url):  # Check if file path match criteria
        if processed_Flag is False:
            #initailize kusto client
            initialize_kusto_client()
            # Retry max RETRY_MAX_ATTEMPT_NUMBER times
            # Wait starting from random (0 to 3) secs. increment util max wait time
            retryer = Retrying(stop=stop_after_attempt(RETRY_MAX_ATTEMPT_NUMBER), \
                        wait=wait_random(0, 3)+ wait_incrementing(start=1, \
                        increment=RETRY_WAIT_INCREMENT_VALUE, \
                        max=RETRY_MAX_WAIT_TIME), \
                        before_sleep=before_sleep_log(logging, logging.WARNING), reraise=True)
            ingest_source_id = retryer(ingest_to_adx, file_url, file_size, target_database, \
                target_table, msg_time, modification_time)
            logging.info(f"ingest_source_id:{ingest_source_id}")

            if IS_DUPLICATE_CHECK:
            # Add Log in to logtable
                insert_log_table(log_table_name, target_database, log_table_key, msg_time, \
                    modification_time, file_url, ingest_source_id)
        else:
                logging.warning(f"{DUPLICATE_EVENT_NAME} DUPLICATE DATA Subject : {file_url} \
                    has been processed already. Skip process.")      
    else:
        logging.warning(
            "%s Subject : %s does not match regular express %s. Skip process. ", \
                LOG_MESSAGE_HEADER, file_url, EVENT_SUBJECT_FILTER_REGEX)
Exemple #11
0
class A10ControllerWorker(base_taskflow.BaseTaskFlowEngine):
    def __init__(self):
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._pool_repo = repo.PoolRepository()
        self._member_repo = a10repo.MemberRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._lb_flows = a10_load_balancer_flows.LoadBalancerFlows()
        self._listener_flows = a10_listener_flows.ListenerFlows()
        self._pool_flows = a10_pool_flows.PoolFlows()
        self._member_flows = a10_member_flows.MemberFlows()
        self._health_monitor_flows = a10_health_monitor_flows.HealthMonitorFlows(
        )
        self._l7policy_flows = a10_l7policy_flows.L7PolicyFlows()
        self._l7rule_flows = a10_l7rule_flows.L7RuleFlows()
        self._vthunder_flows = vthunder_flows.VThunderFlows()
        self._vthunder_repo = a10repo.VThunderRepository()
        self._exclude_result_logging_tasks = ()
        super(A10ControllerWorker, self).__init__()

    def create_amphora(self):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: amphora_id
        """
        create_vthunder_tf = self._taskflow_load(
            self._vthunder_flows.get_create_vthunder_flow(),
            store={
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY
            })
        with tf_logging.DynamicLoggingListener(create_vthunder_tf, log=LOG):

            create_vthunder_tf.run()

        return create_vthunder_tf.storage.fetch('amphora')

    def create_health_monitor(self, health_monitor_id):
        """Creates a health monitor.

        :param pool_id: ID of the pool to create a health monitor on
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        create_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_create_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                a10constants.WRITE_MEM_SHARED_PART: True
            })
        with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG):
            create_hm_tf.run()

    def delete_health_monitor(self, health_monitor_id):
        """Deletes a health monitor.

        :param pool_id: ID of the pool to delete its health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)

        pool = health_mon.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                a10constants.WRITE_MEM_SHARED_PART: True
            })
        with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG):
            delete_hm_tf.run()

    def update_health_monitor(self, health_monitor_id, health_monitor_updates):
        """Updates a health monitor.

        :param pool_id: ID of the pool to have it's health monitor updated
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = None
        try:
            health_mon = self._get_db_obj_until_pending_update(
                self._health_mon_repo, health_monitor_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            health_mon = e.last_attempt.result()

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        update_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_update_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: health_monitor_updates,
                a10constants.WRITE_MEM_SHARED_PART: True
            })
        with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG):
            update_hm_tf.run()

    def create_listener(self, listener_id):
        """Function to create listener for A10 provider"""

        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        load_balancer = listener.load_balancer
        parent_project_list = utils.get_parent_project_list()
        listener_parent_proj = utils.get_parent_project(listener.project_id)

        if (listener.project_id in parent_project_list
                or (listener_parent_proj
                    and listener_parent_proj in parent_project_list)
                or listener.project_id in CONF.hardware_thunder.devices):
            create_listener_tf = self._taskflow_load(
                self._listener_flows.get_rack_vthunder_create_listener_flow(
                    listener.project_id),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })
        else:
            create_listener_tf = self._taskflow_load(
                self._listener_flows.get_create_listener_flow(),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })

        with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG):
            create_listener_tf.run()

    def delete_listener(self, listener_id):
        """Function to delete a listener for A10 provider"""

        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        load_balancer = listener.load_balancer

        if listener.project_id in CONF.hardware_thunder.devices:
            delete_listener_tf = self._taskflow_load(
                self._listener_flows.get_delete_rack_listener_flow(),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })
        else:
            delete_listener_tf = self._taskflow_load(
                self._listener_flows.get_delete_listener_flow(),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })
        with tf_logging.DynamicLoggingListener(delete_listener_tf, log=LOG):
            delete_listener_tf.run()

    def update_listener(self, listener_id, listener_updates):
        """Function to Update a listener for A10 provider"""

        listener = None
        try:
            listener = self._get_db_obj_until_pending_update(
                self._listener_repo, listener_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Listener did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            listener = e.last_attempt.result()

        load_balancer = listener.load_balancer

        update_listener_tf = self._taskflow_load(
            self._listener_flows.get_update_listener_flow(),
            store={
                constants.LISTENER: listener,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: listener_updates
            })
        with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
            update_listener_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_load_balancer(self, load_balancer_id, flavor=None):
        """Function to create load balancer for A10 provider"""

        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        store = {
            constants.LOADBALANCER_ID: load_balancer_id,
            constants.VIP: lb.vip,
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
            constants.FLAVOR: flavor
        }

        topology = CONF.a10_controller_worker.loadbalancer_topology

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}

        if lb.project_id in CONF.hardware_thunder.devices:
            create_lb_flow = self._lb_flows.get_create_rack_vthunder_load_balancer_flow(
                vthunder_conf=CONF.hardware_thunder.devices[lb.project_id],
                topology=topology,
                listeners=lb.listeners)
            create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        else:
            create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
                topology=topology, listeners=lb.listeners)
            create_lb_tf = self._taskflow_load(create_lb_flow, store=store)

        with tf_logging.DynamicLoggingListener(
                create_lb_tf,
                log=LOG,
                hide_inputs_outputs_of=self._exclude_result_logging_tasks):
            create_lb_tf.run()

    def delete_load_balancer(self, load_balancer_id, cascade=False):
        """Function to delete load balancer for A10 provider"""

        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        vthunder = self._vthunder_repo.get_vthunder_from_lb(
            db_apis.get_session(), load_balancer_id)
        deleteCompute = False
        if vthunder:
            deleteCompute = self._vthunder_repo.get_delete_compute_flag(
                db_apis.get_session(), vthunder.compute_id)
        if cascade:
            (flow, store) = self._lb_flows.get_delete_load_balancer_flow(
                lb, deleteCompute, True)
        else:
            (flow, store) = self._lb_flows.get_delete_load_balancer_flow(
                lb, deleteCompute, False)

        store.update({
            constants.LOADBALANCER: lb,
            constants.VIP: lb.vip,
            constants.SERVER_GROUP_ID: lb.server_group_id
        })

        delete_lb_tf = self._taskflow_load(flow, store=store)

        with tf_logging.DynamicLoggingListener(delete_lb_tf, log=LOG):
            delete_lb_tf.run()

    def update_load_balancer(self, load_balancer_id, load_balancer_updates):
        """Function to update load balancer for A10 provider"""

        lb = None
        try:
            lb = self._get_db_obj_until_pending_update(self._lb_repo,
                                                       load_balancer_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Load balancer did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            lb = e.last_attempt.result()

        listeners, _ = self._listener_repo.get_all(
            db_apis.get_session(), load_balancer_id=load_balancer_id)

        update_lb_tf = self._taskflow_load(
            self._lb_flows.get_update_load_balancer_flow(),
            store={
                constants.LOADBALANCER: lb,
                constants.VIP: lb.vip,
                constants.LISTENERS: listeners,
                constants.UPDATE_DICT: load_balancer_updates
            })

        with tf_logging.DynamicLoggingListener(update_lb_tf, log=LOG):
            update_lb_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_member(self, member_id):
        """Creates a pool member.

        :param member_id: ID of the member to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """

        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        topology = CONF.a10_controller_worker.loadbalancer_topology
        parent_project_list = utils.get_parent_project_list()
        member_parent_proj = utils.get_parent_project(member.project_id)

        if (member.project_id in parent_project_list or
            (member_parent_proj and member_parent_proj in parent_project_list)
                or member.project_id in CONF.hardware_thunder.devices):
            create_member_tf = self._taskflow_load(
                self._member_flows.get_rack_vthunder_create_member_flow(),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool
                })
        else:
            create_member_tf = self._taskflow_load(
                self._member_flows.get_create_member_flow(topology=topology),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool
                })

        with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG):
            create_member_tf.run()

    def delete_member(self, member_id):
        """Deletes a pool member.

        :param member_id: ID of the member to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        if member.project_id in CONF.hardware_thunder.devices:
            delete_member_tf = self._taskflow_load(
                self._member_flows.get_rack_vthunder_delete_member_flow(),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool
                })
        else:
            delete_member_tf = self._taskflow_load(
                self._member_flows.get_delete_member_flow(),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool
                })
        with tf_logging.DynamicLoggingListener(delete_member_tf, log=LOG):
            delete_member_tf.run()

    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):

        raise exceptions.NotImplementedError(
            user_fault_string='This provider does not support members yet',
            operator_fault_string='This provider does not support members yet')

    def update_member(self, member_id, member_updates):
        """Updates a pool member.

        :param member_id: ID of the member to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = None
        try:
            member = self._get_db_obj_until_pending_update(
                self._member_repo, member_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Member did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            member = e.last_attempt.result()

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        if member.project_id in CONF.hardware_thunder.devices:
            update_member_tf = self._taskflow_load(
                self._member_flows.get_rack_vthunder_update_member_flow(),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool,
                    constants.UPDATE_DICT: member_updates
                })
        else:
            update_member_tf = self._taskflow_load(
                self._member_flows.get_update_member_flow(),
                store={
                    constants.MEMBER: member,
                    constants.LISTENERS: listeners,
                    constants.LOADBALANCER: load_balancer,
                    constants.POOL: pool,
                    constants.UPDATE_DICT: member_updates
                })

        with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG):
            update_member_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_pool(self, pool_id):
        """Creates a node pool.

        :param pool_id: ID of the pool to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        listeners = pool.listeners
        default_listener = None
        if listeners:
            default_listener = pool.listeners[0]
        load_balancer = pool.load_balancer

        create_pool_tf = self._taskflow_load(
            self._pool_flows.get_create_pool_flow(),
            store={
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LISTENER: default_listener,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG):
            create_pool_tf.run()

    def delete_pool(self, pool_id):
        """Deletes a node pool.

        :param pool_id: ID of the pool to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)

        load_balancer = pool.load_balancer
        listeners = pool.listeners
        default_listener = None
        if listeners:
            default_listener = pool.listeners[0]
        members = pool.members
        health_monitor = pool.health_monitor
        mem_count = self._member_repo.get_member_count(
            db_apis.get_session(), project_id=pool.project_id)
        mem_count = mem_count - len(members) + 1
        store = {
            constants.POOL: pool,
            constants.LISTENERS: listeners,
            constants.LISTENER: default_listener,
            constants.LOADBALANCER: load_balancer,
            constants.HEALTH_MON: health_monitor,
            a10constants.MEMBER_COUNT: mem_count
        }

        delete_pool_tf = self._taskflow_load(
            self._pool_flows.get_delete_pool_flow(members, health_monitor,
                                                  store),
            store=store)
        with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG):
            delete_pool_tf.run()

    def update_pool(self, pool_id, pool_updates):
        """Updates a node pool.

        :param pool_id: ID of the pool to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """

        pool = None
        try:
            pool = self._get_db_obj_until_pending_update(
                self._pool_repo, pool_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'Pool did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            pool = e.last_attempt.result()

        listeners = pool.listeners
        default_listener = None
        if listeners:
            default_listener = pool.listeners[0]
        load_balancer = pool.load_balancer

        update_pool_tf = self._taskflow_load(
            self._pool_flows.get_update_pool_flow(),
            store={
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LISTENER: default_listener,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: pool_updates
            })
        with tf_logging.DynamicLoggingListener(update_pool_tf, log=LOG):
            update_pool_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7policy(self, l7policy_id):
        """Creates an L7 Policy.

        :param l7policy_id: ID of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if not l7policy:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7policy', l7policy_id)
            raise db_exceptions.NoResultFound

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_create_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_l7policy_tf, log=LOG):
            create_l7policy_tf.run()

    def delete_l7policy(self, l7policy_id):
        """Deletes an L7 policy.
        :param l7policy_id: ID of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)

        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_delete_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_l7policy_tf, log=LOG):
            delete_l7policy_tf.run()

    def update_l7policy(self, l7policy_id, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy_id: ID of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """

        l7policy = None
        try:
            l7policy = self._get_db_obj_until_pending_update(
                self._l7policy_repo, l7policy_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'L7 policy did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            l7policy = e.last_attempt.result()

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_update_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: l7policy_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG):
            update_l7policy_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7rule(self, l7rule_id):
        """Creates an L7 Rule.

        :param l7rule_id: ID of the l7rule to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_create_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG):
            create_l7rule_tf.run()

    def delete_l7rule(self, l7rule_id):
        """Deletes an L7 rule.
        :param l7rule_id: ID of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        l7policy = l7rule.l7policy
        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_delete_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_l7rule_tf, log=LOG):
            delete_l7rule_tf.run()

    def update_l7rule(self, l7rule_id, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule_id: ID of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = None
        try:
            l7rule = self._get_db_obj_until_pending_update(
                self._l7rule_repo, l7rule_id)
        except tenacity.RetryError as e:
            LOG.warning(
                'L7 rule did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            l7rule = e.last_attempt.result()

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_update_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: l7rule_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7rule_tf, log=LOG):
            update_l7rule_tf.run()

    def _switch_roles_for_ha_flow(self, vthunder):
        lock_session = db_apis.get_session(autocommit=False)
        if vthunder.role == constants.ROLE_MASTER:
            LOG.info(
                "Master vThunder %s has failed, searching for existing backup vThunder.",
                vthunder.ip_address)
            backup_vthunder = self._vthunder_repo.get_backup_vthunder_from_lb(
                lock_session, lb_id=vthunder.loadbalancer_id)
            if backup_vthunder:
                LOG.info("Making Backup vThunder %s as MASTER NOW",
                         backup_vthunder.ip_address)
                self._vthunder_repo.update(lock_session,
                                           backup_vthunder.id,
                                           role=constants.ROLE_MASTER)

                LOG.info("Putting %s to failed vThunders", vthunder.ip_address)

                self._vthunder_repo.update(lock_session,
                                           vthunder.id,
                                           role=constants.ROLE_BACKUP,
                                           status=a10constants.FAILED)

                lock_session.commit()
                LOG.info("vThunder %s's status is FAILED", vthunder.ip_address)
                status = {
                    'vthunders': [{
                        "id": vthunder.vthunder_id,
                        "status": a10constants.FAILED,
                        "ip_address": vthunder.ip_address
                    }]
                }
                LOG.info(str(status))
            else:
                LOG.warning("No backup found for failed MASTER %s",
                            vthunder.ip_address)

        elif vthunder.role == constants.ROLE_BACKUP:
            LOG.info("BACKUP vThunder %s has failed", vthunder.ip_address)
            self._vthunder_repo.update(lock_session,
                                       vthunder.id,
                                       status=a10constants.FAILED)
            LOG.info("vThunder %s's status is FAILED", vthunder.ip_address)
            status = {
                'vthunders': [{
                    "id": vthunder.vthunder_id,
                    "status": a10constants.FAILED,
                    "ip_address": vthunder.ip_address
                }]
            }
            lock_session.commit()
            LOG.info(str(status))
        # TODO(ytsai-a10) check if we need to call lock_session.close() to release db lock

    def failover_amphora(self, vthunder_id):
        """Perform failover operations for an vThunder.
        :param vthunder_id: ID for vThunder to failover
        :returns: None
        """
        try:
            vthunder = self._vthunder_repo.get(db_apis.get_session(),
                                               vthunder_id=vthunder_id)
            if not vthunder:
                LOG.warning(
                    "Could not fetch vThunder %s from DB, ignoring "
                    "failover request.", vthunder.vthunder_id)
                return

            LOG.info("Starting Failover process on %s", vthunder.ip_address)
            # feature : db role switching for HA flow
            self._switch_roles_for_ha_flow(vthunder)

            # TODO(hthompson6) delete failed one
            # TODO(hthompson6) boot up new amps
            # TODO(hthompson6) vrrp sync

        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error("vThunder %(id)s failover exception: %(exc)s", {
                    'id': vthunder_id,
                    'exc': e
                })

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """

        raise exceptions.NotImplementedError(
            user_fault_string='This provider does not support loadbalancer '
            'failover yet.',
            operator_fault_string='This provider does not support loadbalancer '
            'failover yet.')

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        raise exceptions.NotImplementedError(
            user_fault_string='This provider does not support rotating Amphora '
            'certs.',
            operator_fault_string='This provider does not support rotating '
            'Amphora certs. We will use preconfigured '
            'devices.')

    def _get_db_obj_until_pending_update(self, repo, id):
        return repo.get(db_apis.get_session(), id=id)

    def perform_write_memory(self, thunders):
        """Perform write memory operations for a thunders

        :param thunders: group of thunder objects
        :returns: None
        """
        store = {a10constants.WRITE_MEM_SHARED_PART: True}

        for vthunder in thunders:
            delete_compute = False
            if vthunder.status == 'DELETED' and vthunder.compute_id is not None:
                delete_compute = self._vthunder_repo.get_delete_compute_flag(
                    db_apis.get_session(), vthunder.compute_id)
            try:
                write_mem_tf = self._taskflow_load(
                    self._vthunder_flows.get_write_memory_flow(
                        vthunder, store, delete_compute),
                    store=store)

                with tf_logging.DynamicLoggingListener(write_mem_tf, log=LOG):
                    write_mem_tf.run()
            except Exception:
                # continue on other thunders (assume exception is logged)
                pass

    def perform_reload_check(self, thunders):
        """Perform check for thunders see if thunder reload before write memory

        :param thunders: group of thunder objects
        :returns: None
        """
        store = {}
        for vthunder in thunders:
            try:
                reload_check_tf = self._taskflow_load(
                    self._vthunder_flows.get_reload_check_flow(
                        vthunder, store),
                    store=store)
                with tf_logging.DynamicLoggingListener(reload_check_tf,
                                                       log=LOG):
                    reload_check_tf.run()
            except Exception:
                # continue on other thunders (assume exception is logged)
                pass
Exemple #12
0
class Notifier:
  _KEEP_ALIVE_INTERVAL = 10.0
  _TIME_TO_HANDLE_REQUESTS = 100e-3

  def __init__(self, port: int):
    self._configurations = []
    self._condition = asyncio.Condition()

    local_ip = self._get_local_ip()
    self._json = {
      'local_reg': {
        'ip': local_ip,
        'notify': 0,
        'port': port,
        'uri': "/local_lan"
      }
    }

  def _get_local_ip(self):
    sock = None
    try:
      sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
      sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
      sock.connect(('10.255.255.255', 1))
      return sock.getsockname()[0]
    finally:
      if sock:
        sock.close()

  def register_device(self, device: BaseDevice):
    if (not device in self._configurations):
      headers = {
        'Accept': 'application/json',
        'Connection': 'keep-alive',
        'Content-Type': 'application/json',
        'Host': device.ip_address,
        'Accept-Encoding': 'gzip'
      }
      self._configurations.append(_NotifyConfiguration(device, headers, False, 0))

  async def _notify(self):
    async with self._condition:
      self._condition.notify_all()

  def notify(self):
    loop = asyncio.get_event_loop()
    asyncio.run_coroutine_threadsafe(self._notify(), loop)

  async def start(self):
    async with aiohttp.ClientSession(conn_timeout=5.0) as session:
      async with self._condition:
        while True:
          queues_empty = True
          try:
            for entry in self._configurations:
              now = time.time()
              queue_size = entry.device.commands_queue.qsize()
              if queue_size > 1:
                queues_empty = False
              if now - entry.last_timestamp >= self._KEEP_ALIVE_INTERVAL or queue_size > 0:
                await self._perform_request(session, entry)
                entry.last_timestamp = now
          except:
            logging.exception('[KeepAlive] Failed to send local_reg keep alive to the AC.')
          if queues_empty:
            logging.debug('[KeepAlive] Waiting for notification or timeout')
            try:
              await asyncio.wait_for(self._condition.wait(), timeout=self._KEEP_ALIVE_INTERVAL)
              #await self._wait_on_condition_with_timeout(self._condition, self._KEEP_ALIVE_INTERVAL)
            except concurrent.futures.TimeoutError:
              pass
          else:
            # give some time to clean up the queues
            await asyncio.sleep(self._TIME_TO_HANDLE_REQUESTS)

  @retry(retry=retry_if_exception_type(ConnectionError), wait=wait_incrementing(start=0.5, increment=1.5, max=10))
  async def _perform_request(self, session: aiohttp.ClientSession, config: _NotifyConfiguration) -> None:
    method = 'PUT' if config.alive else 'POST'
    self._json['local_reg']['notify'] = int(config.device.commands_queue.qsize() > 0)
    url = 'http://{}/local_reg.json'.format(config.device.ip_address)
    try:
      logging.debug('[KeepAlive] Sending {} {} {}'.format(method, url, json.dumps(self._json)))
      async with session.request(method, url, json=self._json, headers=config.headers) as resp:
        if resp.status != HTTPStatus.ACCEPTED.value:
          resp_data = await resp.text()
          logging.error('[KeepAlive] Sending local_reg failed: {}, {}'.format(resp.status, resp_data))
          raise ConnectionError('Sending local_reg failed: {}, {}'.format(resp.status, resp_data))
    except:
      config.alive = False
      raise
    config.alive = True
Exemple #13
0
def get_tasks_in_group(group_id):
    tasks = []

    def _save_tasks(response):
        tasks.extend(response["tasks"])

    queue = taskcluster.get_service("queue")
    queue.listTaskGroup(group_id, paginationHandler=_save_tasks)

    return tasks


@tenacity.retry(
    stop=tenacity.stop_after_attempt(5),
    wait=tenacity.wait_incrementing(start=7, increment=7),
    reraise=True,
)
def download_binary(url, path):
    """Download a binary file from an url"""

    try:
        artifact = requests.get(url, stream=True)
        artifact.raise_for_status()

        with open(path, "wb") as f:
            for chunk in artifact.iter_content(chunk_size=8192):
                f.write(chunk)

    except Exception:
        try:
Exemple #14
0
class StatusManager(object):
    def __init__(self, bigip):
        self.bigip = bigip
        self._octavia_driver_lib = driver_lib.DriverLibrary(
            status_socket=CONF.driver_agent.status_socket_path,
            stats_socket=CONF.driver_agent.stats_socket_path
        )

    def set_active(self, obj):
        """Set provisioning_state of octavia object and all ancestors to
        ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj)

    def set_deleted(self, obj):
        """Set provisioning_state of octavia object to DELETED and all
        ancestors to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj, lib_consts.DELETED)

    def set_error(self, obj):
        """Set provisioning_state of octavia object to ERROR and all
        ancestors to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj, lib_consts.ERROR)

    def update_status(self, loadbalancers):
        """Set provisioning_state of loadbalancers and all it's
        children to ACTIVE if PENDING_UPDATE or PENDING_CREATE, else
        DELETED for PENDING_DELETED.

        :param loadbalancers: octavia loadbalancers list
        """

        def _set_deleted_or_active(obj):
            """Sets octavia object to deleted if status was PENDING_DELETE

            :param obj: octavia object
            """
            if utils.pending_delete(obj):
                self.set_deleted(obj)
            else:
                self.set_active(obj)

        for loadbalancer in loadbalancers:
            _set_deleted_or_active(loadbalancer)

            for listener in loadbalancer.listeners:
                _set_deleted_or_active(listener)

                for l7policy in listener.l7policies:
                    _set_deleted_or_active(l7policy)

                    for l7rule in l7policy.l7rules:
                        _set_deleted_or_active(l7rule)

            for pool in loadbalancer.pools:
                _set_deleted_or_active(pool)

                for member in pool.members:
                    _set_deleted_or_active(member)

                if pool.health_monitor:
                    _set_deleted_or_active(pool.health_monitor)

    def _set_obj_and_ancestors(self, obj, state=lib_consts.ACTIVE):
        """Set provisioning_state of octavia object to state and set all ancestors
        to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        obj_status = self._status_obj(obj, state)

        # Load Balancer
        if isinstance(obj, data_models.LoadBalancer):
            self._update_status_to_octavia({
                lib_consts.LOADBALANCERS: [obj_status]
            })

        # Listener
        if isinstance(obj, data_models.Listener):
            self._update_status_to_octavia({
                lib_consts.LISTENERS: [obj_status],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.load_balancer)]
            })

        # Pool
        if isinstance(obj, data_models.Pool):
            self._update_status_to_octavia({
                lib_consts.POOLS: [obj_status],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.load_balancer)]
            })

        # Member
        if isinstance(obj, data_models.Member):
            self._update_status_to_octavia({
                lib_consts.MEMBERS: [obj_status],
                lib_consts.POOLS: [self._status_obj(obj.pool)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.pool.load_balancer)]
            })

        # Health Monitor
        if isinstance(obj, data_models.HealthMonitor):
            self._update_status_to_octavia({
                lib_consts.HEALTHMONITORS: [obj_status],
                lib_consts.POOLS: [self._status_obj(obj.pool)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.pool.load_balancer)]
            })

        # L7Policy
        if isinstance(obj, data_models.L7Policy):
            self._update_status_to_octavia({
                lib_consts.L7POLICIES: [obj_status],
                lib_consts.LISTENERS: [self._status_obj(obj.listener)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.listener.load_balancer)]
            })

        # L7Rule
        if isinstance(obj, data_models.L7Rule):
            self._update_status_to_octavia({
                lib_consts.L7RULES: [obj_status],
                lib_consts.L7POLICIES: [self._status_obj(obj.l7policy)],
                lib_consts.LISTENERS: [self._status_obj(obj.l7policy.listener)],
                lib_consts.LOADBALANCERS: [self._status_obj(
                    obj.l7policy.listener.load_balancer)]
            })

    @staticmethod
    def _status_obj(obj,
                    provisioning_status=lib_consts.ACTIVE):
        """Return status object for statup update api consumption

        :param obj: octavia object containing ID
        :param provisioning_status: provisioning status
        :return: status object
        """
        return {
            lib_consts.ID: obj.id,
            lib_consts.PROVISIONING_STATUS: provisioning_status
        }

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(),
        wait=tenacity.wait_incrementing(start=1, increment=10),
        stop=tenacity.stop_after_attempt(max_attempt_number=3))
    def _update_status_to_octavia(self, status):
        try:
            self._octavia_driver_lib.update_loadbalancer_status(status)
        except driver_exceptions.UpdateStatusError as e:
            msg = ("Error while updating status to octavia: "
                   "%s") % e.fault_string
            LOG.error(msg)
            raise driver_exceptions.UpdateStatusError(msg)
Exemple #15
0
import functools

from oslo_db import exception as db_exc
from oslo_log import log as logging
import tenacity

from mistral import context
from mistral import exceptions as exc
from mistral.services import security

LOG = logging.getLogger(__name__)


@tenacity.retry(retry=tenacity.retry_if_exception_type(db_exc.DBDeadlock),
                stop=tenacity.stop_after_attempt(50),
                wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2))
def _with_auth_context(auth_ctx, func, *args, **kw):
    """Runs the given function with the specified auth context.

    :param auth_ctx: Authentication context.
    :param func: Function to run with the specified auth context.
    :param args: Function positional arguments.
    :param kw: Function keyword arguments.
    :return: Function result.
    """
    old_auth_ctx = context.ctx() if context.has_ctx() else None

    context.set_ctx(auth_ctx)

    try:
        return func(*args, **kw)
Exemple #16
0
class F5DoClient(object):
    def __init__(self, bigip_url, enable_verify=True, enable_token=True):
        self.bigip = parse.urlsplit(bigip_url, allow_fragments=False)
        self.enable_verify = enable_verify
        self.enable_token = enable_token
        self.token = None
        self.s = self._create_session()

    def _url(self, path):
        return parse.urlunsplit(
            parse.SplitResult(scheme=self.bigip.scheme,
                              netloc=self.bigip.hostname,
                              path=path,
                              query='',
                              fragment=''))

    def _create_session(self):
        session = requests.Session()
        session.verify = self.enable_verify
        return session

    def authorized(func):
        @functools.wraps(func)
        def wrapper(self, *args, **kwargs):
            try:
                return func(self, *args, **kwargs)
            except HTTPError as e:
                if e.response.status_code == 401:
                    self.reauthorize()
                    return func(self, *args, **kwargs)
                else:
                    raise e

        return wrapper

    @retry(
        retry=retry_if_exception_type(HTTPError),
        wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=stop_after_attempt(RETRY_ATTEMPTS),
    )
    def reauthorize(self):
        # Login
        credentials = {
            "username": self.bigip.username,
            "password": self.bigip.password,
            "loginProviderName": "tmos"
        }
        basicauth = HTTPBasicAuth(self.bigip.username, self.bigip.password)
        r = self.s.post(self._url(DO_LOGIN_PATH),
                        json=credentials,
                        auth=basicauth)
        r.raise_for_status()
        self.token = r.json()['token']['token']

        self.s.headers.update({'X-F5-Auth-Token': self.token})

        patch_timeout = {"timeout": "36000"}
        r = self.s.patch(self._url(DO_TOKENS_PATH.format(self.token)),
                         json=patch_timeout)
        LOG.debug("Reauthorized!")

    @retry(retry=retry_if_exception_type(HTTPError),
           wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                  RETRY_MAX),
           stop=stop_after_attempt(RETRY_ATTEMPTS))
    @authorized
    def post(self, **kwargs):
        LOG.debug("Calling POST with JSON %s", kwargs.get('json'))
        response = self.s.post(self._url(DO_PATH), **kwargs)
        response.raise_for_status()
        LOG.debug("POST finished with %d: %s", response.status_code,
                  response.text)
        return response

    @retry(retry=retry_if_exception_type(HTTPError),
           wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                  RETRY_MAX),
           stop=stop_after_attempt(RETRY_ATTEMPTS))
    @authorized
    def get(self):
        response = self.s.get(self._url(DO_PATH))
        response.raise_for_status()
        LOG.debug("GET finished with %d: %s", response.status_code,
                  response.text)
        return response
class BaseTroveTest(test.BaseTestCase):
    credentials = ('admin', 'primary')
    datastore = None
    instance = None
    instance_id = None
    instance_ip = None
    password = ""
    create_user = True
    enable_root = False

    @classmethod
    def get_resource_name(cls, resource_type):
        prefix = "trove-tempest-%s" % cls.__name__
        return data_utils.rand_name(resource_type, prefix=prefix)

    @classmethod
    def skip_checks(cls):
        super(BaseTroveTest, cls).skip_checks()

        if not CONF.service_available.trove:
            raise cls.skipException("Database service is not available.")

        if cls.datastore not in CONF.database.enabled_datastores:
            raise cls.skipException(
                "Datastore %s is not enabled." % cls.datastore
            )

    @classmethod
    def get_swift_client(cls):
        auth_version = "3.0"
        auth_url = CONF.identity.uri_v3
        user = cls.os_primary.credentials.username
        key = cls.os_primary.credentials.password
        tenant_name = cls.os_primary.credentials.project_name
        region_name = cls.os_primary.region
        os_options = {'tenant_name': tenant_name, 'region_name': region_name}

        return swift_client.Connection(
            auth_url, user, key, auth_version=auth_version,
            os_options=os_options)

    @classmethod
    def get_swift_admin_client(cls):
        auth_version = "3.0"
        auth_url = CONF.identity.uri_v3
        user = cls.os_admin.credentials.username
        key = cls.os_admin.credentials.password
        tenant_name = cls.os_admin.credentials.project_name
        region_name = cls.os_admin.region
        os_options = {'tenant_name': tenant_name, 'region_name': region_name}

        return swift_client.Connection(
            auth_url, user, key, auth_version=auth_version,
            os_options=os_options)

    @classmethod
    def setup_clients(cls):
        super(BaseTroveTest, cls).setup_clients()

        cls.client = cls.os_primary.database.TroveClient()
        cls.admin_client = cls.os_admin.database.TroveClient()
        cls.admin_server_client = cls.os_admin.servers_client
        cls.account_client = cls.os_primary.account_client
        cls.container_client = cls.os_primary.container_client
        cls.object_client = cls.os_primary.object_client
        cls.admin_container_client = cls.os_admin.container_client
        cls.admin_object_client = cls.os_admin.object_client
        # Swift client is special, we want to re-use the log_generator func
        # in python-troveclient.
        cls.swift = cls.get_swift_client()
        cls.swift_admin = cls.get_swift_admin_client()

    @classmethod
    def setup_credentials(cls):
        # Do not create network resources automatically.
        cls.set_network_resources()
        super(BaseTroveTest, cls).setup_credentials()

    @classmethod
    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(exceptions.Conflict),
        wait=tenacity.wait_incrementing(1, 1, 5),
        stop=tenacity.stop_after_attempt(15)
    )
    def _delete_network(cls, net_id):
        """Make sure the network is deleted.

        Neutron can be slow to clean up ports from the subnets/networks.
        Retry this delete a few times if we get a "Conflict" error to give
        neutron time to fully cleanup the ports.
        """
        networks_client = cls.os_primary.networks_client
        try:
            networks_client.delete_network(net_id)
        except Exception:
            LOG.error('Unable to delete network %s', net_id)
            raise

    @classmethod
    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(exceptions.Conflict),
        wait=tenacity.wait_incrementing(1, 1, 5),
        stop=tenacity.stop_after_attempt(15)
    )
    def _delete_subnet(cls, subnet_id):
        """Make sure the subnet is deleted.

        Neutron can be slow to clean up ports from the subnets/networks.
        Retry this delete a few times if we get a "Conflict" error to give
        neutron time to fully cleanup the ports.
        """
        subnets_client = cls.os_primary.subnets_client
        try:
            subnets_client.delete_subnet(subnet_id)
        except Exception:
            LOG.error('Unable to delete subnet %s', subnet_id)
            raise

    @classmethod
    def _create_network(cls):
        """Create database instance network."""
        networks_client = cls.os_primary.networks_client
        subnets_client = cls.os_primary.subnets_client
        routers_client = cls.os_primary.routers_client

        if CONF.database.shared_network:
            private_network = CONF.database.shared_network
            if not uuidutils.is_uuid_like(private_network):
                networks = networks_client.list_networks()['networks']
                for net in networks:
                    if net['name'] == private_network:
                        private_network = net['id']
                        break
                else:
                    raise exceptions.NotFound(
                        'Shared network %s not found' % private_network
                    )

            cls.private_network = private_network
            return

        network_kwargs = {"name": cls.get_resource_name("network")}
        result = networks_client.create_network(**network_kwargs)
        LOG.info('Private network created: %s', result['network'])
        cls.private_network = result['network']["id"]
        cls.addClassResourceCleanup(
            utils.wait_for_removal,
            cls._delete_network,
            networks_client.show_network,
            cls.private_network
        )

        subnet_kwargs = {
            'name': cls.get_resource_name("subnet"),
            'network_id': cls.private_network,
            'cidr': CONF.database.subnet_cidr,
            'ip_version': 4
        }
        result = subnets_client.create_subnet(**subnet_kwargs)
        subnet_id = result['subnet']['id']
        LOG.info('Private subnet created: %s', result['subnet'])
        cls.addClassResourceCleanup(
            utils.wait_for_removal,
            cls._delete_subnet,
            subnets_client.show_subnet,
            subnet_id
        )

        # In dev node, Trove instance needs to connect with control host
        router_params = {
            'name': cls.get_resource_name("router"),
            'external_gateway_info': {
                "network_id": CONF.network.public_network_id
            }
        }
        result = routers_client.create_router(**router_params)
        router_id = result['router']['id']
        LOG.info('Private router created: %s', result['router'])
        cls.addClassResourceCleanup(
            utils.wait_for_removal,
            routers_client.delete_router,
            routers_client.show_router,
            router_id
        )

        routers_client.add_router_interface(router_id, subnet_id=subnet_id)
        LOG.info('Subnet %s added to the router %s', subnet_id, router_id)
        cls.addClassResourceCleanup(
            routers_client.remove_router_interface,
            router_id,
            subnet_id=subnet_id
        )

    @classmethod
    def resource_setup(cls):
        super(BaseTroveTest, cls).resource_setup()

        # Create network for database instance, use cls.private_network as the
        # network ID.
        cls._create_network()

        instance = cls.create_instance(create_user=cls.create_user)
        cls.instance_id = instance['id']
        cls.wait_for_instance_status(cls.instance_id)
        cls.instance = cls.client.get_resource(
            "instances", cls.instance_id)['instance']
        cls.instance_ip = cls.get_instance_ip(cls.instance)

        if cls.enable_root:
            cls.password = cls.get_root_pass(cls.instance_id)

    def assert_single_item(self, items, **props):
        return self.assert_multiple_items(items, 1, **props)[0]

    def assert_multiple_items(self, items, count, **props):
        """Check if a object is in a list of objects.

        e.g. props is a sub-dict, items is a list of dicts.
        """

        def _matches(item, **props):
            for prop_name, prop_val in props.items():
                v = item[prop_name] if isinstance(
                    item, dict) else getattr(item, prop_name)

                if v != prop_val:
                    return False

            return True

        filtered_items = list(
            [item for item in items if _matches(item, **props)]
        )

        found = len(filtered_items)

        if found != count:
            LOG.info("[FAIL] items=%s, expected_props=%s", str(items), props)
            self.fail("Wrong number of items found [props=%s, "
                      "expected=%s, found=%s]" % (props, count, found))

        return filtered_items

    @classmethod
    def delete_swift_account(cls):
        LOG.info(f"Cleaning up Swift account")
        try:
            cls.account_client.delete('')
        except exceptions.NotFound:
            pass

    @classmethod
    def delete_swift_containers(cls, container_client, object_client,
                                containers):
        """Remove containers and all objects in them.

        The containers should be visible from the container_client given.
        Will not throw any error if the containers don't exist.
        Will not check that object and container deletions succeed.
        After delete all the objects from a container, it will wait 3
        seconds before delete the container itself, in order for deployments
        using HA proxy sync the deletion properly, otherwise, the container
        might fail to be deleted because it's not empty.
        """
        if isinstance(containers, str):
            containers = [containers]

        for cont in containers:
            try:
                params = {'limit': 9999, 'format': 'json'}
                _, objlist = container_client.list_container_objects(
                    cont,
                    params)
                # delete every object in the container
                for obj in objlist:
                    test_utils.call_and_ignore_notfound_exc(
                        object_client.delete_object, cont, obj['name'])

                # sleep 3 seconds to sync the deletion of the objects
                # in HA deployment
                time.sleep(3)

                container_client.delete_container(cont)
            except exceptions.NotFound:
                pass

    @classmethod
    def create_instance(cls, name=None, datastore_version=None,
                        database=constants.DB_NAME, username=constants.DB_USER,
                        password=constants.DB_PASS, backup_id=None,
                        replica_of=None, create_user=True):
        """Create database instance.

        Creating database instance is time-consuming, so we define this method
        as a class method, which means the instance is shared in a single
        TestCase. According to
        https://docs.openstack.org/tempest/latest/write_tests.html#adding-a-new-testcase,
        all test methods within a TestCase are assumed to be executed serially.
        """
        name = name or cls.get_resource_name("instance")

        # Flavor, volume, datastore are not needed for creating replica.
        if replica_of:
            body = {
                "instance": {
                    "name": name,
                    "nics": [{"net-id": cls.private_network}],
                    "access": {"is_public": True},
                    "replica_of": replica_of,
                }
            }

        # Get datastore version. Get from API if the default ds version is not
        # configured.
        elif not datastore_version:
            default_versions = CONF.database.default_datastore_versions
            datastore_version = default_versions.get(cls.datastore)
            if not datastore_version:
                res = cls.client.list_resources("datastores")
                for d in res['datastores']:
                    if d['name'] == cls.datastore:
                        if d.get('default_version'):
                            datastore_version = d['default_version']
                        else:
                            datastore_version = d['versions'][0]['name']
                        break
            if not datastore_version:
                message = ('Failed to get available datastore version.')
                raise exceptions.TempestException(message)

        if not replica_of:
            body = {
                "instance": {
                    "name": name,
                    "datastore": {
                        "type": cls.datastore,
                        "version": datastore_version
                    },
                    "flavorRef": CONF.database.flavor_id,
                    "volume": {
                        "size": 1,
                        "type": CONF.database.volume_type
                    },
                    "nics": [{"net-id": cls.private_network}],
                    "access": {"is_public": True}
                }
            }
            if backup_id:
                body['instance'].update(
                    {'restorePoint': {'backupRef': backup_id}})
            if create_user:
                body['instance'].update({
                    'databases': [{"name": database}],
                    "users": [
                        {
                            "name": username,
                            "password": password,
                            "databases": [{"name": database}]
                        }
                    ]
                })

        res = cls.client.create_resource("instances", body)
        cls.addClassResourceCleanup(cls.wait_for_instance_status,
                                    res["instance"]["id"],
                                    need_delete=True,
                                    expected_status="DELETED")

        return res["instance"]

    @classmethod
    def restart_instance(cls, instance_id):
        """Restart database service and wait until it's healthy."""
        cls.client.create_resource(
            f"instances/{instance_id}/action",
            {"restart": {}},
            expected_status_code=202,
            need_response=False)
        cls.wait_for_instance_status(instance_id)

    @classmethod
    def wait_for_instance_status(cls, id,
                                 expected_status=["HEALTHY", "ACTIVE"],
                                 need_delete=False,
                                 timeout=CONF.database.database_build_timeout):
        def _wait():
            try:
                res = cls.client.get_resource("instances", id)
                cur_status = res["instance"]["status"]
            except exceptions.NotFound:
                if need_delete or "DELETED" in expected_status:
                    LOG.info('Instance %s is deleted', id)
                    raise loopingcall.LoopingCallDone()
                return

            if cur_status in expected_status:
                LOG.info('Instance %s becomes %s', id, cur_status)
                raise loopingcall.LoopingCallDone()
            elif "ERROR" not in expected_status and cur_status == "ERROR":
                # If instance status goes to ERROR but is not expected, stop
                # waiting

                res = cls.admin_client.get_resource("instances", id)
                LOG.info(f'Instance fault msg: {res["instance"].get("fault")}')

                # Show trove-guestagent log for debug purpose.
                # Only admin user is able to publish and show the trove guest
                # agent log. Make sure the container is deleted after fetching
                # the log.
                try:
                    LOG.info(f"Publishing guest log for instance {id}")
                    cls.publish_log(id, 'guest')
                    LOG.info(f"Getting guest log content for instance {id}")
                    log_gen = cls.log_generator(id, 'guest', lines=0)
                    log_content = "".join([chunk for chunk in log_gen()])
                    LOG.info(
                        f"\n=============================================\n"
                        f"Trove guest agent log for instance {id}\n"
                        f"=============================================")
                    LOG.info(log_content)
                except Exception as err:
                    LOG.warning(f"Failed to get guest log for instance {id}, "
                                f"error: {str(err)}")
                finally:
                    # Remove the swift container of database logs.
                    LOG.info(f"Deleting swift container "
                             f"{CONF.database.database_log_container}")
                    cls.delete_swift_containers(
                        cls.admin_container_client, cls.admin_object_client,
                        CONF.database.database_log_container)

                message = "Instance status is ERROR."
                caller = test_utils.find_test_caller()
                if caller:
                    message = '({caller}) {message}'.format(caller=caller,
                                                            message=message)
                raise exceptions.UnexpectedResponseCode(message)

        if type(expected_status) != list:
            expected_status = [expected_status]

        if need_delete:
            # If resource already removed, return
            try:
                cls.client.get_resource("instances", id)
            except exceptions.NotFound:
                LOG.info('Instance %s not found', id)
                return

            LOG.info(f"Deleting instance {id}")
            cls.admin_client.force_delete_instance(id)

        timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_wait)
        try:
            timer.start(interval=10, timeout=timeout, initial_delay=5).wait()
        except loopingcall.LoopingCallTimeOut:
            message = ("Instance %s is not in the expected status: %s" %
                       (id, expected_status))
            caller = test_utils.find_test_caller()
            if caller:
                message = '({caller}) {message}'.format(caller=caller,
                                                        message=message)
            raise exceptions.TimeoutException(message)

    @classmethod
    def get_instance_ip(cls, instance=None):
        if not instance:
            instance = cls.client.get_resource(
                "instances", cls.instance_id)['instance']

        # TODO(lxkong): IPv6 needs to be tested.
        v4_ip = None

        if 'addresses' in instance:
            for addr_info in instance['addresses']:
                if addr_info['type'] == 'private':
                    v4_ip = addr_info['address']
                if addr_info['type'] == 'public':
                    v4_ip = addr_info['address']
                    break
        else:
            ips = instance.get('ip', [])
            for ip in ips:
                if netutils.is_valid_ipv4(ip):
                    v4_ip = ip

        if not v4_ip:
            message = ('Failed to get instance IP address.')
            raise exceptions.TempestException(message)

        return v4_ip

    def get_databases(self, instance_id, **kwargs):
        url = f'instances/{instance_id}/databases'
        ret = self.client.list_resources(url)
        return ret['databases']

    def get_users(self, instance_id):
        url = f'instances/{instance_id}/users'
        ret = self.client.list_resources(url)
        return ret['users']

    @classmethod
    def create_backup(cls, instance_id, backup_name, incremental=False,
                      parent_id=None, description=None):
        body = {
            "backup": {
                "name": backup_name,
                "instance": instance_id,
                "incremental": 1 if incremental else 0,
            }
        }
        if description:
            body['backup']['description'] = description
        if parent_id:
            body['backup']['parent_id'] = parent_id

        res = cls.client.create_resource("backups", body,
                                         expected_status_code=202)
        cls.addClassResourceCleanup(cls.wait_for_backup_status,
                                    res["backup"]['id'],
                                    expected_status='',
                                    need_delete=True)
        return res["backup"]

    @classmethod
    def delete_backup(cls, backup_id, ignore_notfound=False):
        cls.client.delete_resource('backups', backup_id,
                                   ignore_notfound=ignore_notfound)

    @classmethod
    def wait_for_backup_status(cls, id, expected_status=["COMPLETED"],
                               need_delete=False):
        def _wait():
            try:
                res = cls.client.get_resource("backups", id)
                cur_status = res["backup"]["status"]
            except exceptions.NotFound:
                if need_delete or "DELETED" in expected_status:
                    LOG.info('Backup %s is deleted', id)
                    raise loopingcall.LoopingCallDone()
                return

            if cur_status in expected_status:
                LOG.info('Backup %s becomes %s', id, cur_status)
                raise loopingcall.LoopingCallDone()
            elif "FAILED" not in expected_status and cur_status == "FAILED":
                # If backup status goes to FAILED but is not expected, stop
                # waiting
                message = "Backup status is FAILED."
                caller = test_utils.find_test_caller()
                if caller:
                    message = '({caller}) {message}'.format(caller=caller,
                                                            message=message)
                raise exceptions.UnexpectedResponseCode(message)

        if type(expected_status) != list:
            expected_status = [expected_status]

        if need_delete:
            # If resource already removed, return
            try:
                cls.client.get_resource("backups", id)
            except exceptions.NotFound:
                LOG.info('Backup %s not found', id)
                return

            LOG.info(f"Deleting backup {id}")
            cls.delete_backup(id, ignore_notfound=True)

        timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_wait)
        try:
            timer.start(interval=10,
                        timeout=CONF.database.backup_wait_timeout).wait()
        except loopingcall.LoopingCallTimeOut:
            message = ("Backup %s is not in the expected status: %s" %
                       (id, expected_status))
            caller = test_utils.find_test_caller()
            if caller:
                message = '({caller}) {message}'.format(caller=caller,
                                                        message=message)
            raise exceptions.TimeoutException(message)

    @classmethod
    def get_root_pass(cls, instance_id):
        resp = cls.client.create_resource(f"instances/{instance_id}/root", {})
        return resp['user']['password']

    @classmethod
    def rebuild_instance(cls, instance_id, image_id):
        rebuild_req = {
            "rebuild": {
                "image_id": image_id
            }
        }
        cls.admin_client.create_resource(
            f"mgmt/instances/{instance_id}/action",
            rebuild_req, expected_status_code=202,
            need_response=False)
        cls.wait_for_instance_status(instance_id)

    @classmethod
    def create_config(cls, name, values, datastore, datastore_version):
        create_config = {
            "configuration": {
                "datastore": {
                    "type": datastore,
                    "version": datastore_version
                },
                "values": values,
                "name": name
            }
        }
        config = cls.client.create_resource('configurations', create_config)
        return config

    @classmethod
    def attach_config(cls, instance_id, config_id):
        attach_config = {
            "instance": {
                "configuration": config_id
            }
        }
        cls.client.put_resource(f'instances/{instance_id}', attach_config)

    @classmethod
    def detach_config(cls, instance_id):
        detach_config = {
            "instance": {
                "configuration": None
            }
        }
        cls.client.put_resource(f'instances/{instance_id}', detach_config)

    @classmethod
    def publish_log(cls, instance_id, name='guest'):
        client = cls.admin_client if name == 'guest' else cls.client
        req_body = {
            'name': name,
            'publish': 1
        }
        client.create_resource(f"instances/{instance_id}/log",
                               req_body)

    @classmethod
    def get_log_info(cls, instance_id, name='guest'):
        req_body = {
            'name': name,
        }
        return cls.admin_client.create_resource(
            f"instances/{instance_id}/log",
            req_body)

    @classmethod
    def _get_container_info(cls, instance_id, log_name):
        try:
            log_info = cls.get_log_info(instance_id, log_name)['log']
            container = log_info['container']
            prefix = log_info['prefix']
            metadata_file = log_info['metafile']
            return container, prefix, metadata_file
        except swift_client.ClientException as ex:
            if ex.http_status == 404:
                raise trove_exc.GuestLogNotFound()
            raise trove_exc.TroveTempestException()

    @classmethod
    def log_generator(cls, instance_id, log_name, lines=50):
        """Return generator to yield the last <lines> lines of guest log.

        This method is copied from python-troveclient.
        """
        swift_cli = cls.swift_admin if log_name == 'guest' else cls.swift

        def _log_generator(instance_id, log_name, lines):
            try:
                container, prefix, metadata_file = cls._get_container_info(
                    instance_id, log_name)

                head, body = swift_cli.get_container(container, prefix=prefix)
                log_obj_to_display = []

                if lines:
                    total_lines = lines
                    partial_results = False
                    parts = sorted(body, key=lambda obj: obj['last_modified'],
                                   reverse=True)

                    for part in parts:
                        obj_hdrs = swift_cli.head_object(
                            container,
                            part['name'])
                        obj_lines = int(obj_hdrs['x-object-meta-lines'])
                        log_obj_to_display.insert(0, part)
                        if obj_lines >= lines:
                            partial_results = True
                            break
                        lines -= obj_lines
                    if not partial_results:
                        lines = total_lines

                    part = log_obj_to_display.pop(0)
                    hdrs, log_obj = swift_cli.get_object(
                        container,
                        part['name'])
                    log_by_lines = log_obj.decode().splitlines()
                    yield "\n".join(log_by_lines[-1 * lines:]) + "\n"
                else:
                    # Show all the logs
                    log_obj_to_display = sorted(
                        body, key=lambda obj: obj['last_modified'])

                for log_part in log_obj_to_display:
                    headers, log_obj = swift_cli.get_object(
                        container,
                        log_part['name'])
                    yield log_obj.decode()
            except swift_client.ClientException as ex:
                if ex.http_status == 404:
                    raise trove_exc.GuestLogNotFound()
                raise trove_exc.TroveTempestException()

        return lambda: _log_generator(instance_id, log_name, lines)
Exemple #18
0
class Message(object):
    """Zaqar message type of notification."""
    def __init__(self, queue_name, **kwargs):
        self.user = kwargs.get('user', '')
        self.project = kwargs.get('project', '')
        self.domain = kwargs.get('domain', '')

        self.queue_name = queue_name

        self._zaqarclient = None
        self._keystoneclient = None

    def zaqar(self):
        if self._zaqarclient is not None:
            return self._zaqarclient
        params = self._build_conn_params(self.user, self.project)
        self._zaqarclient = driver_base.SenlinDriver().message(params)
        return self._zaqarclient

    def _build_conn_params(self, user, project):
        """Build connection params for specific user and project.

        :param user: The ID of the user for which a trust will be used.
        :param project: The ID of the project for which a trust will be used.
        :returns: A dict containing the required parameters for connection
                  creation.
        """
        service_creds = senlin_context.get_service_credentials()
        params = {
            'username': service_creds.get('username'),
            'password': service_creds.get('password'),
            'auth_url': service_creds.get('auth_url'),
            'user_domain_name': service_creds.get('user_domain_name')
        }

        cred = co.Credential.get(oslo_context.get_current(), user, project)
        if cred is None:
            raise exception.TrustNotFound(trustor=user)
        params['trust_id'] = cred.cred['openstack']['trust']

        return params

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(exception.EResourceCreation),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def post_lifecycle_hook_message(self, lifecycle_action_token, node_id,
                                    resource_id, lifecycle_transition_type):
        message_list = [{
            "ttl": CONF.notification.ttl,
            "body": {
                "lifecycle_action_token": lifecycle_action_token,
                "node_id": node_id,
                "resource_id": resource_id,
                "lifecycle_transition_type": lifecycle_transition_type
            }
        }]
        try:
            if not self.zaqar().queue_exists(self.queue_name):
                kwargs = {
                    "_max_messages_post_size":
                    CONF.notification.max_message_size,
                    "description": "Senlin lifecycle hook notification",
                    "name": self.queue_name
                }
                self.zaqar().queue_create(**kwargs)

            return self.zaqar().message_post(self.queue_name, message_list)
        except exception.InternalError as ex:
            raise exception.EResourceCreation(type='queue', message=str(ex))
Exemple #19
0
class F5iControlRestBackend(F5Backend):
    def __init__(self, cfg, uri):
        super(F5iControlRestBackend, self).__init__()
        self.conf = cfg
        self.device = parse.urlparse(uri)
        self.devices = []  # SelfIP Ports
        self.mgmt = None

        # Prometheus counters
        self.vlan_update = Counter('vlan_update', 'Updates of vlans')
        self.vlan_create = Counter('vlan_create', 'Creations of vlans')
        self.vlan_delete = Counter('vlan_delete', 'Deletions of vlans')
        self.selfip_update = Counter('selfip_update', 'Updates of selfips')
        self.selfip_create = Counter('selfip_create', 'Creations of selfips')
        self.selfip_delete = Counter('selfip_delete', 'Deletions of selfips')
        self._login()

    def _login(self):
        if not self.device.username or not self.device.password:
            LOG.error("Need to specify valid F5.devices configuration: "
                      "http(s)://<username>:<password>@hostname,...")
            sys.exit(1)

        self.mgmt = ManagementRoot(self.device.hostname,
                                   self.device.username,
                                   self.device.password,
                                   token=True,
                                   verify=self.conf.F5.https_verify)

        interface = self.conf.F5_VCMP.interface
        trunk = self.mgmt.tm.net.trunks.trunk.load(name=interface)
        self.mac = trunk.macAddress

    def get_mac(self):
        return self.mac

    REQUEST_TIME_SYNC_VLANS = Summary('sync_vlan_seconds',
                                      'Time spent processing vlans')

    @REQUEST_TIME_SYNC_VLANS.time()
    def _sync_vlans(self, vlans):
        new_vlans = {
            constants.PREFIX_VLAN + name: val
            for name, val in vlans.items()
        }
        v = self.mgmt.tm.net.vlans
        for old_vlan in v.get_collection():
            # Not managed by agent
            if not old_vlan.name.startswith(constants.PREFIX_VLAN):
                pass

            # Update
            elif old_vlan.name in new_vlans:
                vlan = new_vlans.pop(old_vlan.name)
                if old_vlan.tag != vlan['tag'] or old_vlan.mtu != vlan['mtu']:
                    old_vlan.tag = vlan['tag']
                    old_vlan.mtu = vlan['mtu']
                    old_vlan.update()
                    self.vlan_update.inc()

            # orphaned
            else:
                try:
                    old_vlan.delete()
                    self.vlan_delete.inc()
                except iControlUnexpectedHTTPError:
                    pass

        # New ones
        for name, vlan in new_vlans.items():
            v.vlan.create(name=name,
                          partition='Common',
                          tag=vlan['tag'],
                          mtu=vlan['mtu'])
            self.vlan_create.inc()

    REQUEST_TIME_SYNC_SELFIPS = Summary('sync_selfip_seconds',
                                        'Time spent processing selfips')

    @REQUEST_TIME_SYNC_SELFIPS.time()
    def _sync_selfips(self, selfips):
        sips = self.mgmt.tm.net.selfips.get_collection()
        self.devices = [
            sip.name[len(constants.PREFIX_SELFIP):] for sip in sips
            if sip.name.startswith(constants.PREFIX_SELFIP)
        ]
        for old_sip in sips:
            # Not managed by agent
            if not old_sip.name.startswith(constants.PREFIX_SELFIP):
                continue

            # Update
            elif old_sip.name in selfips:
                selfip = selfips.pop(old_sip.name)
                if old_sip.vlan != '/Common/{}'.format(
                        selfip['vlan']
                ) or old_sip.address != selfip['ip_address']:
                    old_sip.vlan = '/Common/{}'.format(constants.PREFIX_VLAN +
                                                       selfip['vlan'])
                    old_sip.address = selfip['ip_address']
                    old_sip.update()
                    self.selfip_update.inc()

            # orphaned
            else:
                old_sip.delete()
                self.selfip_delete.inc()

        # New ones
        for name, selfip in selfips.items():
            if self.mac != selfip['mac']:
                continue

            self.mgmt.tm.net.selfips.selfip.create(
                name=constants.PREFIX_SELFIP + name,
                partition='Common',
                vlan=constants.PREFIX_VLAN + selfip['vlan'],
                address=selfip['ip_address'])
            self.selfip_create.inc()

    SYNC_ALL_EXCEPTIONS = Counter('sync_exceptions',
                                  'Exceptions during sync_all')

    @retry(retry=retry_if_exception_type((Timeout, ConnectionError)),
           wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                  RETRY_MAX),
           stop=stop_after_attempt(RETRY_ATTEMPTS))
    @SYNC_ALL_EXCEPTIONS.count_exceptions()
    def sync_all(self, vlans, selfips):
        try:
            self._sync_vlans(vlans)
        except iControlUnexpectedHTTPError as e:
            LOG.exception(e)

        try:
            self._sync_selfips(selfips)
        except iControlUnexpectedHTTPError as e:
            LOG.exception(e)

    def plug_interface(self, network_segment, device):
        name = constants.PREFIX_SELFIP + device
        if self.mgmt.tm.net.selfips.selfip.exists(name=name):
            return True

        # wait till sync-loop processed the port
        return False

    GET_DEVICES_EXCEPTIONS = Counter('get_devices_exceptions',
                                     'Exceptions during get_devices')

    @retry(retry=retry_if_exception_type((Timeout, ConnectionError)),
           wait=wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                  RETRY_MAX),
           stop=stop_after_attempt(RETRY_ATTEMPTS))
    @GET_DEVICES_EXCEPTIONS.count_exceptions()
    def get_devices(self):
        return self.devices
Exemple #20
0
class ControllerWorker(object):
    def __init__(self):

        self._amphora_repo = repo.AmphoraRepository()
        self._amphora_health_repo = repo.AmphoraHealthRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = repo.PoolRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()
        self._az_repo = repo.AvailabilityZoneRepository()

        if CONF.task_flow.jobboard_enabled:
            persistence = tsk_driver.MysqlPersistenceDriver()

            self.jobboard_driver = stevedore_driver.DriverManager(
                namespace='octavia.worker.jobboard_driver',
                name=CONF.task_flow.jobboard_backend_driver,
                invoke_args=(persistence, ),
                invoke_on_load=True).driver
        else:
            self.tf_engine = base_taskflow.BaseTaskFlowEngine()

    @tenacity.retry(
        retry=(tenacity.retry_if_result(_is_provisioning_status_pending_update)
               | tenacity.retry_if_exception_type()),
        wait=tenacity.wait_incrementing(
            CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
            CONF.haproxy_amphora.api_db_commit_retry_backoff,
            CONF.haproxy_amphora.api_db_commit_retry_max),
        stop=tenacity.stop_after_attempt(
            CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)

    @property
    def services_controller(self):
        return base_taskflow.TaskFlowServiceController(self.jobboard_driver)

    def run_flow(self, func, *args, **kwargs):
        if CONF.task_flow.jobboard_enabled:
            self.services_controller.run_poster(func, *args, **kwargs)
        else:
            tf = self.tf_engine.taskflow_load(func(*args), **kwargs)
            with tf_logging.DynamicLoggingListener(tf, log=LOG):
                tf.run()

    def create_amphora(self, availability_zone=None):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: uuid
        """
        try:
            store = {
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY,
                constants.FLAVOR: None,
                constants.SERVER_GROUP_ID: None,
                constants.AVAILABILITY_ZONE: None
            }
            if availability_zone:
                store[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), availability_zone))
            self.run_flow(flow_utils.get_create_amphora_flow,
                          store=store,
                          wait=True)
        except Exception as e:
            LOG.error('Failed to create an amphora due to: %s', str(e))

    def delete_amphora(self, amphora_id):
        """Deletes an existing Amphora.

        :param amphora_id: ID of the amphora to delete
        :returns: None
        :raises AmphoraNotFound: The referenced Amphora was not found
        """
        try:
            amphora = self._amphora_repo.get(db_apis.get_session(),
                                             id=amphora_id)
            store = {constants.AMPHORA: amphora.to_dict()}
            self.run_flow(flow_utils.get_delete_amphora_flow, store=store)
        except Exception as e:
            LOG.error('Failed to delete a amphora %s due to: %s', amphora_id,
                      str(e))
            return
        LOG.info('Finished deleting amphora %s.', amphora_id)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        if not db_health_monitor:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'healthmonitor',
                health_monitor[constants.HEALTHMONITOR_ID])
            raise db_exceptions.NoResultFound

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_health_monitor_flow, store=store)

    def delete_health_monitor(self, health_monitor):
        """Deletes a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.PROJECT_ID: load_balancer.project_id
        }
        self.run_flow(flow_utils.get_delete_health_monitor_flow, store=store)

    def update_health_monitor(self, original_health_monitor,
                              health_monitor_updates):
        """Updates a health monitor.

        :param original_health_monitor: Provider health monitor dict
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        try:
            db_health_monitor = self._get_db_obj_until_pending_update(
                self._health_mon_repo,
                original_health_monitor[constants.HEALTHMONITOR_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_health_monitor = e.last_attempt.result()

        pool = db_health_monitor.pool

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.HEALTH_MON: original_health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.UPDATE_DICT: health_monitor_updates
        }
        self.run_flow(flow_utils.get_update_health_monitor_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_listener(self, listener):
        """Creates a listener.

        :param listener: A listener provider dictionary.
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=listener[constants.LISTENER_ID])
        if not db_listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener[constants.LISTENER_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_listener.load_balancer
        listeners = load_balancer.listeners
        dict_listeners = []
        for li in listeners:
            dict_listeners.append(
                provider_utils.db_listener_to_provider_listener(li).to_dict())
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.LISTENERS: dict_listeners,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id
        }

        self.run_flow(flow_utils.get_create_listener_flow, store=store)

    def delete_listener(self, listener):
        """Deletes a listener.

        :param listener: A listener provider dictionary to delete
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        store = {
            constants.LISTENER: listener,
            constants.LOADBALANCER_ID: listener[constants.LOADBALANCER_ID],
            constants.PROJECT_ID: listener[constants.PROJECT_ID]
        }
        self.run_flow(flow_utils.get_delete_listener_flow, store=store)

    def update_listener(self, listener, listener_updates):
        """Updates a listener.

        :param listener: A listener provider dictionary to update
        :param listener_updates: Dict containing updated listener attributes
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        db_lb = self._lb_repo.get(db_apis.get_session(),
                                  id=listener[constants.LOADBALANCER_ID])
        store = {
            constants.LISTENER: listener,
            constants.UPDATE_DICT: listener_updates,
            constants.LOADBALANCER_ID: db_lb.id,
            constants.LISTENERS: [listener]
        }
        self.run_flow(flow_utils.get_update_listener_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_load_balancer(self,
                             loadbalancer,
                             flavor=None,
                             availability_zone=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param loadbalancer: The dict of load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(),
                               id=loadbalancer[constants.LOADBALANCER_ID])
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer',
                loadbalancer[constants.LOADBALANCER_ID])
            raise db_exceptions.NoResultFound

        store = {
            lib_consts.LOADBALANCER_ID:
            loadbalancer[lib_consts.LOADBALANCER_ID],
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
            lib_consts.FLAVOR: flavor,
            lib_consts.AVAILABILITY_ZONE: availability_zone
        }

        topology = lb.topology
        if (not CONF.nova.enable_anti_affinity
                or topology == constants.TOPOLOGY_SINGLE):
            store[constants.SERVER_GROUP_ID] = None

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                lb.listeners))

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}
        self.run_flow(flow_utils.get_create_load_balancer_flow,
                      topology,
                      listeners=listeners_dicts,
                      store=store)

    def delete_load_balancer(self, load_balancer, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer: Dict of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        loadbalancer_id = load_balancer[constants.LOADBALANCER_ID]
        db_lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer_id)
        store = {
            constants.LOADBALANCER: load_balancer,
            constants.LOADBALANCER_ID: loadbalancer_id,
            constants.SERVER_GROUP_ID: db_lb.server_group_id,
            constants.PROJECT_ID: db_lb.project_id
        }
        if cascade:
            listeners = flow_utils.get_listeners_on_lb(db_lb)
            pools = flow_utils.get_pools_on_lb(db_lb)

            self.run_flow(flow_utils.get_cascade_delete_load_balancer_flow,
                          load_balancer,
                          listeners,
                          pools,
                          store=store)
        else:
            self.run_flow(flow_utils.get_delete_load_balancer_flow,
                          load_balancer,
                          store=store)

    def update_load_balancer(self, original_load_balancer,
                             load_balancer_updates):
        """Updates a load balancer.

        :param original_load_balancer: Dict of the load balancer to update
        :param load_balancer_updates: Dict containing updated load balancer
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        store = {
            constants.LOADBALANCER:
            original_load_balancer,
            constants.LOADBALANCER_ID:
            original_load_balancer[constants.LOADBALANCER_ID],
            constants.UPDATE_DICT:
            load_balancer_updates
        }

        self.run_flow(flow_utils.get_update_load_balancer_flow, store=store)

    def create_member(self, member):
        """Creates a pool member.

        :param member: A member provider dictionary to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_create_member_flow, store=store)

    def delete_member(self, member):
        """Deletes a pool member.

        :param member: A member provider dictionary to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_delete_member_flow, store=store)

    def batch_update_members(self, old_members, new_members, updated_members):
        updated_members = [(provider_utils.db_member_to_provider_member(
            self._member_repo.get(db_apis.get_session(),
                                  id=m.get(constants.ID))).to_dict(), m)
                           for m in updated_members]
        provider_old_members = [
            provider_utils.db_member_to_provider_member(
                self._member_repo.get(db_apis.get_session(),
                                      id=m.get(constants.ID))).to_dict()
            for m in old_members
        ]
        if old_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=old_members[0][constants.POOL_ID])
        elif new_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=new_members[0][constants.POOL_ID])
        else:
            pool = self._pool_repo.get(
                db_apis.get_session(),
                id=updated_members[0][0][constants.POOL_ID])
        load_balancer = pool.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_batch_update_members_flow,
                      provider_old_members,
                      new_members,
                      updated_members,
                      store=store)

    def update_member(self, member, member_updates):
        """Updates a pool member.

        :param member_id: A member provider dictionary  to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        # TODO(ataraday) when other flows will use dicts - revisit this
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.UPDATE_DICT: member_updates
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_update_member_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_pool(self, pool):
        """Creates a node pool.

        :param pool: Provider pool dict to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """

        # TODO(ataraday) It seems we need to get db pool here anyway to get
        # proper listeners
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])
        if not db_pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool[constants.POOL_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_pool_flow, store=store)

    def delete_pool(self, pool):
        """Deletes a node pool.

        :param pool: Provider pool dict to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))
        load_balancer = db_pool.load_balancer

        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.PROJECT_ID: db_pool.project_id
        }
        self.run_flow(flow_utils.get_delete_pool_flow, store=store)

    def update_pool(self, origin_pool, pool_updates):
        """Updates a node pool.

        :param origin_pool: Provider pool dict to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        try:
            db_pool = self._get_db_obj_until_pending_update(
                self._pool_repo, origin_pool[constants.POOL_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Pool did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_pool = e.last_attempt.result()

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {
            constants.POOL_ID: db_pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.UPDATE_DICT: pool_updates
        }
        self.run_flow(flow_utils.get_update_pool_flow, store=store)

    def create_l7policy(self, l7policy):
        """Creates an L7 Policy.

        :param l7policy: Provider dict of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id
        }
        self.run_flow(flow_utils.get_create_l7policy_flow, store=store)

    def delete_l7policy(self, l7policy):
        """Deletes an L7 policy.

        :param l7policy: Provider dict of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])
        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id
        }
        self.run_flow(flow_utils.get_delete_l7policy_flow, store=store)

    def update_l7policy(self, original_l7policy, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy: Provider dict of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: original_l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id,
            constants.UPDATE_DICT: l7policy_updates
        }
        self.run_flow(flow_utils.get_update_l7policy_flow, store=store)

    def create_l7rule(self, l7rule):
        """Creates an L7 Rule.

        :param l7rule: Provider dict l7rule
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])

        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        store = {
            constants.L7RULE: l7rule,
            constants.L7POLICY: l7policy_dict.to_dict(),
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id
        }
        self.run_flow(flow_utils.get_create_l7rule_flow, store=store)

    def delete_l7rule(self, l7rule):
        """Deletes an L7 rule.

        :param l7rule: Provider dict of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])
        l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy)
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))

        store = {
            constants.L7RULE: l7rule,
            constants.L7POLICY: l7policy.to_dict(),
            constants.LISTENERS: listeners_dicts,
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LOADBALANCER_ID: load_balancer.id
        }
        self.run_flow(flow_utils.get_delete_l7rule_flow, store=store)

    def update_l7rule(self, original_l7rule, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule: Origin dict of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(
            db_apis.get_session(), id=original_l7rule[constants.L7POLICY_ID])
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        store = {
            constants.L7RULE: original_l7rule,
            constants.L7POLICY: l7policy_dict.to_dict(),
            constants.LISTENERS: listeners_dicts,
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.UPDATE_DICT: l7rule_updates
        }
        self.run_flow(flow_utils.get_update_l7rule_flow, store=store)

    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        Note: This expects the load balancer to already be in
        provisioning_status=PENDING_UPDATE state.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises octavia.common.exceptions.NotFound: The referenced amphora was
                                                    not found
        """
        amphora = None
        try:
            amphora = self._amphora_repo.get(db_apis.get_session(),
                                             id=amphora_id)
            if amphora is None:
                LOG.error(
                    'Amphora failover for amphora %s failed because '
                    'there is no record of this amphora in the '
                    'database. Check that the [house_keeping] '
                    'amphora_expiry_age configuration setting is not '
                    'too short. Skipping failover.', amphora_id)
                raise exceptions.NotFound(resource=constants.AMPHORA,
                                          id=amphora_id)

            if amphora.status == constants.DELETED:
                LOG.warning(
                    'Amphora %s is marked DELETED in the database but '
                    'was submitted for failover. Deleting it from the '
                    'amphora health table to exclude it from health '
                    'checks and skipping the failover.', amphora.id)
                self._amphora_health_repo.delete(db_apis.get_session(),
                                                 amphora_id=amphora.id)
                return

            loadbalancer = None
            if amphora.load_balancer_id:
                loadbalancer = self._lb_repo.get(db_apis.get_session(),
                                                 id=amphora.load_balancer_id)
            lb_amp_count = None
            if loadbalancer:
                if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
                    lb_amp_count = 2
                elif loadbalancer.topology == constants.TOPOLOGY_SINGLE:
                    lb_amp_count = 1

            az_metadata = {}
            flavor_dict = {}
            lb_id = None
            vip_dict = {}
            server_group_id = None
            if loadbalancer:
                lb_id = loadbalancer.id
                # Even if the LB doesn't have a flavor, create one and
                # pass through the topology.
                if loadbalancer.flavor_id:
                    flavor_dict = self._flavor_repo.get_flavor_metadata_dict(
                        db_apis.get_session(), loadbalancer.flavor_id)
                    flavor_dict[constants.LOADBALANCER_TOPOLOGY] = (
                        loadbalancer.topology)
                else:
                    flavor_dict = {
                        constants.LOADBALANCER_TOPOLOGY: loadbalancer.topology
                    }
                if loadbalancer.availability_zone:
                    az_metadata = (
                        self._az_repo.get_availability_zone_metadata_dict(
                            db_apis.get_session(),
                            loadbalancer.availability_zone))
                vip_dict = loadbalancer.vip.to_dict()
                server_group_id = loadbalancer.server_group_id
            provider_lb_dict = (
                provider_utils.db_loadbalancer_to_provider_loadbalancer
            )(loadbalancer).to_dict() if loadbalancer else loadbalancer

            stored_params = {
                constants.AVAILABILITY_ZONE: az_metadata,
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_FAILOVER_PRIORITY,
                constants.FLAVOR: flavor_dict,
                constants.LOADBALANCER: provider_lb_dict,
                constants.SERVER_GROUP_ID: server_group_id,
                constants.LOADBALANCER_ID: lb_id,
                constants.VIP: vip_dict
            }

            self.run_flow(flow_utils.get_failover_amphora_flow,
                          amphora.to_dict(),
                          lb_amp_count,
                          store=stored_params,
                          wait=True)

            LOG.info(
                "Successfully completed the failover for an amphora: %s", {
                    "id": amphora_id,
                    "load_balancer_id": lb_id,
                    "lb_network_ip": amphora.lb_network_ip,
                    "compute_id": amphora.compute_id,
                    "role": amphora.role
                })

        except Exception as e:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception("Amphora %s failover exception: %s", amphora_id,
                              str(e))
                self._amphora_repo.update(db_apis.get_session(),
                                          amphora_id,
                                          status=constants.ERROR)
                if amphora and amphora.load_balancer_id:
                    self._lb_repo.update(db_apis.get_session(),
                                         amphora.load_balancer_id,
                                         provisioning_status=constants.ERROR)

    @staticmethod
    def _get_amphorae_for_failover(load_balancer):
        """Returns an ordered list of amphora to failover.

        :param load_balancer: The load balancer being failed over.
        :returns: An ordered list of amphora to failover,
                  first amp to failover is last in the list
        :raises octavia.common.exceptions.InvalidTopology: LB has an unknown
                                                           topology.
        """
        if load_balancer.topology == constants.TOPOLOGY_SINGLE:
            # In SINGLE topology, amp failover order does not matter
            return [
                a.to_dict() for a in load_balancer.amphorae
                if a.status != constants.DELETED
            ]

        if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
            # In Active/Standby we should preference the standby amp
            # for failover first in case the Active is still able to pass
            # traffic.
            # Note: The active amp can switch at any time and in less than a
            #       second, so this is "best effort".
            amphora_driver = utils.get_amphora_driver()
            timeout_dict = {
                constants.CONN_MAX_RETRIES:
                CONF.haproxy_amphora.failover_connection_max_retries,
                constants.CONN_RETRY_INTERVAL:
                CONF.haproxy_amphora.failover_connection_retry_interval
            }
            amps = []
            selected_amp = None
            for amp in load_balancer.amphorae:
                if amp.status == constants.DELETED:
                    continue
                if selected_amp is None:
                    try:
                        if amphora_driver.get_interface_from_ip(
                                amp, load_balancer.vip.ip_address,
                                timeout_dict):
                            # This is a potential ACTIVE, add it to the list
                            amps.append(amp.to_dict())
                        else:
                            # This one doesn't have the VIP IP, so start
                            # failovers here.
                            selected_amp = amp
                            LOG.debug(
                                "Selected amphora %s as the initial "
                                "failover amphora.", amp.id)
                    except Exception:
                        # This amphora is broken, so start failovers here.
                        selected_amp = amp
                else:
                    # We have already found a STANDBY, so add the rest to the
                    # list without querying them.
                    amps.append(amp.to_dict())
            # Put the selected amphora at the end of the list so it is
            # first to failover.
            if selected_amp:
                amps.append(selected_amp.to_dict())
            return amps

        LOG.error(
            'Unknown load balancer topology found: %s, aborting '
            'failover.', load_balancer.topology)
        raise exceptions.InvalidTopology(topology=load_balancer.topology)

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        Note: This expects the load balancer to already be in
        provisioning_status=PENDING_UPDATE state.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises octavia.commom.exceptions.NotFound: The load balancer was not
                                                    found.
        """
        try:
            lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
            if lb is None:
                raise exceptions.NotFound(resource=constants.LOADBALANCER,
                                          id=load_balancer_id)

            # Get the ordered list of amphorae to failover for this LB.
            amps = self._get_amphorae_for_failover(lb)

            if lb.topology == constants.TOPOLOGY_SINGLE:
                if len(amps) != 1:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'one should exist. Repairing.', len(amps),
                        load_balancer_id)
            elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:

                if len(amps) != 2:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'two should exist. Repairing.', len(amps),
                        load_balancer_id)
            else:
                LOG.error(
                    'Unknown load balancer topology found: %s, aborting '
                    'failover!', lb.topology)
                raise exceptions.InvalidTopology(topology=lb.topology)

            # We must provide a topology in the flavor definition
            # here for the amphora to be created with the correct
            # configuration.
            if lb.flavor_id:
                flavor = self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id)
                flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
            else:
                flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}

            provider_lb_dict = (
                provider_utils.db_loadbalancer_to_provider_loadbalancer(
                    lb).to_dict() if lb else lb)

            provider_lb_dict[constants.FLAVOR] = flavor

            stored_params = {
                constants.LOADBALANCER: provider_lb_dict,
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_FAILOVER_PRIORITY,
                constants.SERVER_GROUP_ID: lb.server_group_id,
                constants.LOADBALANCER_ID: lb.id,
                constants.FLAVOR: flavor
            }

            if lb.availability_zone:
                stored_params[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), lb.availability_zone))
            else:
                stored_params[constants.AVAILABILITY_ZONE] = {}

            self.run_flow(flow_utils.get_failover_LB_flow,
                          amps,
                          provider_lb_dict,
                          store=stored_params,
                          wait=True)

            LOG.info('Failover of load balancer %s completed successfully.',
                     lb.id)

        except Exception as e:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception("LB %(lbid)s failover exception: %(exc)s", {
                    'lbid': load_balancer_id,
                    'exc': str(e)
                })
                self._lb_repo.update(db_apis.get_session(),
                                     load_balancer_id,
                                     provisioning_status=constants.ERROR)

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        LOG.info("Start amphora cert rotation, amphora's id is: %s",
                 amphora_id)

        store = {
            constants.AMPHORA: amp.to_dict(),
            constants.AMPHORA_ID: amphora_id
        }

        self.run_flow(flow_utils.cert_rotate_amphora_flow, store=store)
        LOG.info("Finished amphora cert rotation, amphora's id was: %s",
                 amphora_id)

    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info(
            "Start amphora agent configuration update, amphora's id "
            "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        store = {constants.AMPHORA: amp.to_dict(), constants.FLAVOR: flavor}

        self.run_flow(flow_utils.update_amphora_config_flow, store=store)
        LOG.info(
            "Finished amphora agent configuration update, amphora's id "
            "was: %s", amphora_id)
class LoadBalancerBaseTest(test.BaseTestCase):
    """Base class for load balancer tests."""

    # Setup cls.os_roles_lb_member. cls.os_primary, cls.os_roles_lb_member,
    # and cls.os_roles_lb_admin credentials.
    credentials = ['admin', ['lb_member', CONF.load_balancer.member_role]]

    client_manager = clients.ManagerV2
    webserver1_response = 1
    webserver2_response = 5
    used_ips = []

    @classmethod
    def skip_checks(cls):
        """Check if we should skip all of the children tests."""
        super(LoadBalancerBaseTest, cls).skip_checks()

        service_list = {
            'load_balancer': CONF.service_available.load_balancer,
        }

        live_service_list = {
            'compute': CONF.service_available.nova,
            'image': CONF.service_available.glance,
            'neutron': CONF.service_available.neutron
        }

        if not CONF.load_balancer.test_with_noop:
            service_list.update(live_service_list)

        for service, available in service_list.items():
            if not available:
                skip_msg = ("{0} skipped as {1} service is not "
                            "available.".format(cls.__name__, service))
                raise cls.skipException(skip_msg)

        # We must be able to reach our VIP and instances
        if not (CONF.network.project_networks_reachable
                or CONF.network.public_network_id):
            msg = ('Either project_networks_reachable must be "true", or '
                   'public_network_id must be defined.')
            raise cls.skipException(msg)

    @classmethod
    def setup_credentials(cls):
        """Setup test credentials and network resources."""
        # Do not auto create network resources
        cls.set_network_resources()
        super(LoadBalancerBaseTest, cls).setup_credentials()

    @classmethod
    def setup_clients(cls):
        """Setup client aliases."""
        super(LoadBalancerBaseTest, cls).setup_clients()
        cls.lb_mem_float_ip_client = cls.os_roles_lb_member.floating_ips_client
        cls.lb_mem_keypairs_client = cls.os_roles_lb_member.keypairs_client
        cls.lb_mem_net_client = cls.os_roles_lb_member.networks_client
        cls.lb_mem_ports_client = cls.os_roles_lb_member.ports_client
        cls.lb_mem_routers_client = cls.os_roles_lb_member.routers_client
        cls.lb_mem_SG_client = cls.os_roles_lb_member.security_groups_client
        cls.lb_mem_SGr_client = (
            cls.os_roles_lb_member.security_group_rules_client)
        cls.lb_mem_servers_client = cls.os_roles_lb_member.servers_client
        cls.lb_mem_subnet_client = cls.os_roles_lb_member.subnets_client
        cls.mem_lb_client = cls.os_roles_lb_member.loadbalancer_client
        cls.mem_listener_client = cls.os_roles_lb_member.listener_client
        cls.mem_pool_client = cls.os_roles_lb_member.pool_client
        cls.mem_member_client = cls.os_roles_lb_member.member_client
        cls.mem_healthmonitor_client = (
            cls.os_roles_lb_member.healthmonitor_client)
        cls.mem_l7policy_client = cls.os_roles_lb_member.l7policy_client
        cls.mem_l7rule_client = cls.os_roles_lb_member.l7rule_client
        cls.lb_admin_amphora_client = cls.os_admin.amphora_client
        cls.lb_admin_flavor_profile_client = (
            cls.os_admin.flavor_profile_client)
        cls.lb_admin_flavor_client = cls.os_admin.flavor_client
        cls.mem_flavor_client = cls.os_roles_lb_member.flavor_client
        cls.mem_provider_client = cls.os_roles_lb_member.provider_client
        cls.os_admin_servers_client = cls.os_admin.servers_client
        cls.lb_admin_capabilities_client = (
            cls.os_admin.flavor_capabilities_client)

    @classmethod
    def resource_setup(cls):
        """Setup resources needed by the tests."""
        super(LoadBalancerBaseTest, cls).resource_setup()

        conf_lb = CONF.load_balancer

        cls.api_version = cls.mem_lb_client.get_max_api_version()

        if conf_lb.test_subnet_override and not conf_lb.test_network_override:
            raise exceptions.InvalidConfiguration(
                "Configuration value test_network_override must be "
                "specified if test_subnet_override is used.")

        show_subnet = cls.lb_mem_subnet_client.show_subnet
        if CONF.load_balancer.test_with_noop:
            cls.lb_member_vip_net = {'id': uuidutils.generate_uuid()}
            cls.lb_member_vip_subnet = {'id': uuidutils.generate_uuid()}
            cls.lb_member_1_net = {'id': uuidutils.generate_uuid()}
            cls.lb_member_1_subnet = {'id': uuidutils.generate_uuid()}
            cls.lb_member_2_net = {'id': uuidutils.generate_uuid()}
            cls.lb_member_2_subnet = {'id': uuidutils.generate_uuid()}
            if CONF.load_balancer.test_with_ipv6:
                cls.lb_member_vip_ipv6_net = {'id': uuidutils.generate_uuid()}
                cls.lb_member_vip_ipv6_subnet = {
                    'id': uuidutils.generate_uuid()
                }
                cls.lb_member_1_ipv6_subnet = {'id': uuidutils.generate_uuid()}
                cls.lb_member_2_ipv6_subnet = {'id': uuidutils.generate_uuid()}
                cls.lb_member_vip_ipv6_subnet_stateful = True
            return
        elif CONF.load_balancer.test_network_override:
            if conf_lb.test_subnet_override:
                override_subnet = show_subnet(conf_lb.test_subnet_override)
            else:
                override_subnet = None

            show_net = cls.lb_mem_net_client.show_network
            override_network = show_net(conf_lb.test_network_override)
            override_network = override_network.get('network')

            cls.lb_member_vip_net = override_network
            cls.lb_member_vip_subnet = override_subnet
            cls.lb_member_1_net = override_network
            cls.lb_member_1_subnet = override_subnet
            cls.lb_member_2_net = override_network
            cls.lb_member_2_subnet = override_subnet

            if (CONF.load_balancer.test_with_ipv6
                    and conf_lb.test_IPv6_subnet_override):
                override_ipv6_subnet = show_subnet(
                    conf_lb.test_IPv6_subnet_override)
                cls.lb_member_vip_ipv6_subnet = override_ipv6_subnet
                cls.lb_member_1_ipv6_subnet = override_ipv6_subnet
                cls.lb_member_2_ipv6_subnet = override_ipv6_subnet
                cls.lb_member_vip_ipv6_subnet_stateful = False
                if (override_ipv6_subnet[0]['ipv6_address_mode'] ==
                        'dhcpv6-stateful'):
                    cls.lb_member_vip_ipv6_subnet_stateful = True
            else:
                cls.lb_member_vip_ipv6_subnet = None
                cls.lb_member_1_ipv6_subnet = None
                cls.lb_member_2_ipv6_subnet = None
        else:
            cls._create_networks()

        LOG.debug('Octavia Setup: lb_member_vip_net = {}'.format(
            cls.lb_member_vip_net[const.ID]))
        if cls.lb_member_vip_subnet:
            LOG.debug('Octavia Setup: lb_member_vip_subnet = {}'.format(
                cls.lb_member_vip_subnet[const.ID]))
        LOG.debug('Octavia Setup: lb_member_1_net = {}'.format(
            cls.lb_member_1_net[const.ID]))
        if cls.lb_member_1_subnet:
            LOG.debug('Octavia Setup: lb_member_1_subnet = {}'.format(
                cls.lb_member_1_subnet[const.ID]))
        LOG.debug('Octavia Setup: lb_member_2_net = {}'.format(
            cls.lb_member_2_net[const.ID]))
        if cls.lb_member_2_subnet:
            LOG.debug('Octavia Setup: lb_member_2_subnet = {}'.format(
                cls.lb_member_2_subnet[const.ID]))
        if CONF.load_balancer.test_with_ipv6:
            if cls.lb_member_vip_ipv6_subnet:
                LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = '
                          '{}'.format(cls.lb_member_vip_ipv6_subnet[const.ID]))
            if cls.lb_member_1_ipv6_subnet:
                LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format(
                    cls.lb_member_1_ipv6_subnet[const.ID]))
            if cls.lb_member_2_ipv6_subnet:
                LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format(
                    cls.lb_member_2_ipv6_subnet[const.ID]))

    @classmethod
    # Neutron can be slow to clean up ports from the subnets/networks.
    # Retry this delete a few times if we get a "Conflict" error to give
    # neutron time to fully cleanup the ports.
    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(exceptions.Conflict),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _logging_delete_network(cls, net_id):
        try:
            cls.lb_mem_net_client.delete_network(net_id)
        except Exception:
            LOG.error(
                'Unable to delete network {}. Active ports:'.format(net_id))
            LOG.error(cls.lb_mem_ports_client.list_ports())
            raise

    @classmethod
    # Neutron can be slow to clean up ports from the subnets/networks.
    # Retry this delete a few times if we get a "Conflict" error to give
    # neutron time to fully cleanup the ports.
    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(exceptions.Conflict),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _logging_delete_subnet(cls, subnet_id):
        try:
            cls.lb_mem_subnet_client.delete_subnet(subnet_id)
        except Exception:
            LOG.error(
                'Unable to delete subnet {}. Active ports:'.format(subnet_id))
            LOG.error(cls.lb_mem_ports_client.list_ports())
            raise

    @classmethod
    def _create_networks(cls):
        """Creates networks, subnets, and routers used in tests.

        The following are expected to be defined and available to the tests:
            cls.lb_member_vip_net
            cls.lb_member_vip_subnet
            cls.lb_member_vip_ipv6_subnet (optional)
            cls.lb_member_1_net
            cls.lb_member_1_subnet
            cls.lb_member_1_ipv6_subnet (optional)
            cls.lb_member_2_net
            cls.lb_member_2_subnet
            cls.lb_member_2_ipv6_subnet (optional)
        """

        # Create tenant VIP network
        network_kwargs = {
            'name': data_utils.rand_name("lb_member_vip_network")
        }
        if CONF.network_feature_enabled.port_security:
            # Note: Allowed Address Pairs requires port security
            network_kwargs['port_security_enabled'] = True
        result = cls.lb_mem_net_client.create_network(**network_kwargs)
        cls.lb_member_vip_net = result['network']
        LOG.info('lb_member_vip_net: {}'.format(cls.lb_member_vip_net))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_network,
                                    cls.lb_mem_net_client.show_network,
                                    cls.lb_member_vip_net['id'])

        # Create tenant VIP subnet
        subnet_kwargs = {
            'name': data_utils.rand_name("lb_member_vip_subnet"),
            'network_id': cls.lb_member_vip_net['id'],
            'cidr': CONF.load_balancer.vip_subnet_cidr,
            'ip_version': 4
        }
        result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
        cls.lb_member_vip_subnet = result['subnet']
        LOG.info('lb_member_vip_subnet: {}'.format(cls.lb_member_vip_subnet))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_subnet,
                                    cls.lb_mem_subnet_client.show_subnet,
                                    cls.lb_member_vip_subnet['id'])

        # Create tenant VIP IPv6 subnet
        if CONF.load_balancer.test_with_ipv6:
            # See if ipv6-private-subnet exists and use it if so.
            priv_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
                name='ipv6-private-subnet')['subnets']

            cls.lb_member_vip_ipv6_subnet_stateful = False
            if len(priv_ipv6_subnet) == 1:
                if (priv_ipv6_subnet[0]['ipv6_address_mode'] ==
                        'dhcpv6-stateful'):
                    cls.lb_member_vip_ipv6_subnet_stateful = True
                cls.lb_member_vip_ipv6_subnet = priv_ipv6_subnet[0]
                cls.lb_member_vip_ipv6_net = {
                    'id': priv_ipv6_subnet[0]['network_id']
                }
            else:
                subnet_kwargs = {
                    'name': data_utils.rand_name("lb_member_vip_ipv6_subnet"),
                    'network_id': cls.lb_member_vip_net['id'],
                    'cidr': CONF.load_balancer.vip_ipv6_subnet_cidr,
                    'ip_version': 6
                }
                result = cls.lb_mem_subnet_client.create_subnet(
                    **subnet_kwargs)
                cls.lb_member_vip_ipv6_net = cls.lb_member_vip_net
                cls.lb_member_vip_ipv6_subnet = result['subnet']
                cls.addClassResourceCleanup(
                    waiters.wait_for_not_found, cls._logging_delete_subnet,
                    cls.lb_mem_subnet_client.show_subnet,
                    cls.lb_member_vip_ipv6_subnet['id'])

            LOG.info('lb_member_vip_ipv6_subnet: {}'.format(
                cls.lb_member_vip_ipv6_subnet))

        # Create tenant member 1 network
        network_kwargs = {'name': data_utils.rand_name("lb_member_1_network")}
        if CONF.network_feature_enabled.port_security:
            if CONF.load_balancer.enable_security_groups:
                network_kwargs['port_security_enabled'] = True
            else:
                network_kwargs['port_security_enabled'] = False
        result = cls.lb_mem_net_client.create_network(**network_kwargs)
        cls.lb_member_1_net = result['network']
        LOG.info('lb_member_1_net: {}'.format(cls.lb_member_1_net))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_network,
                                    cls.lb_mem_net_client.show_network,
                                    cls.lb_member_1_net['id'])

        # Create tenant member 1 subnet
        subnet_kwargs = {
            'name': data_utils.rand_name("lb_member_1_subnet"),
            'network_id': cls.lb_member_1_net['id'],
            'cidr': CONF.load_balancer.member_1_ipv4_subnet_cidr,
            'ip_version': 4
        }
        result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
        cls.lb_member_1_subnet = result['subnet']
        LOG.info('lb_member_1_subnet: {}'.format(cls.lb_member_1_subnet))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_subnet,
                                    cls.lb_mem_subnet_client.show_subnet,
                                    cls.lb_member_1_subnet['id'])

        # Create tenant member 1 ipv6 subnet
        if CONF.load_balancer.test_with_ipv6:
            subnet_kwargs = {
                'name': data_utils.rand_name("lb_member_1_ipv6_subnet"),
                'network_id': cls.lb_member_1_net['id'],
                'cidr': CONF.load_balancer.member_1_ipv6_subnet_cidr,
                'ip_version': 6
            }
            result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
            cls.lb_member_1_subnet_prefix = (
                CONF.load_balancer.member_1_ipv6_subnet_cidr.rpartition('/')[2]
            )
            assert (cls.lb_member_1_subnet_prefix.isdigit())
            cls.lb_member_1_ipv6_subnet = result['subnet']
            LOG.info('lb_member_1_ipv6_subnet: {}'.format(
                cls.lb_member_1_ipv6_subnet))
            cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                        cls._logging_delete_subnet,
                                        cls.lb_mem_subnet_client.show_subnet,
                                        cls.lb_member_1_ipv6_subnet['id'])

        # Create tenant member 2 network
        network_kwargs = {'name': data_utils.rand_name("lb_member_2_network")}
        if CONF.network_feature_enabled.port_security:
            if CONF.load_balancer.enable_security_groups:
                network_kwargs['port_security_enabled'] = True
            else:
                network_kwargs['port_security_enabled'] = False
        result = cls.lb_mem_net_client.create_network(**network_kwargs)
        cls.lb_member_2_net = result['network']
        LOG.info('lb_member_2_net: {}'.format(cls.lb_member_2_net))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_network,
                                    cls.lb_mem_net_client.show_network,
                                    cls.lb_member_2_net['id'])

        # Create tenant member 2 subnet
        subnet_kwargs = {
            'name': data_utils.rand_name("lb_member_2_subnet"),
            'network_id': cls.lb_member_2_net['id'],
            'cidr': CONF.load_balancer.member_2_ipv4_subnet_cidr,
            'ip_version': 4
        }
        result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
        cls.lb_member_2_subnet = result['subnet']
        LOG.info('lb_member_2_subnet: {}'.format(cls.lb_member_2_subnet))
        cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                    cls._logging_delete_subnet,
                                    cls.lb_mem_subnet_client.show_subnet,
                                    cls.lb_member_2_subnet['id'])

        # Create tenant member 2 ipv6 subnet
        if CONF.load_balancer.test_with_ipv6:
            subnet_kwargs = {
                'name': data_utils.rand_name("lb_member_2_ipv6_subnet"),
                'network_id': cls.lb_member_2_net['id'],
                'cidr': CONF.load_balancer.member_2_ipv6_subnet_cidr,
                'ip_version': 6
            }
            result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
            cls.lb_member_2_subnet_prefix = (
                CONF.load_balancer.member_2_ipv6_subnet_cidr.rpartition('/')[2]
            )
            assert (cls.lb_member_2_subnet_prefix.isdigit())
            cls.lb_member_2_ipv6_subnet = result['subnet']
            LOG.info('lb_member_2_ipv6_subnet: {}'.format(
                cls.lb_member_2_ipv6_subnet))
            cls.addClassResourceCleanup(waiters.wait_for_not_found,
                                        cls._logging_delete_subnet,
                                        cls.lb_mem_subnet_client.show_subnet,
                                        cls.lb_member_2_ipv6_subnet['id'])

    @classmethod
    def _setup_lb_network_kwargs(cls,
                                 lb_kwargs,
                                 ip_version=None,
                                 use_fixed_ip=False):
        if not ip_version:
            ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
        if cls.lb_member_vip_subnet or cls.lb_member_vip_ipv6_subnet:
            ip_index = data_utils.rand_int_id(start=10, end=100)
            while ip_index in cls.used_ips:
                ip_index = data_utils.rand_int_id(start=10, end=100)
            cls.used_ips.append(ip_index)
            if ip_version == 4:
                subnet_id = cls.lb_member_vip_subnet[const.ID]
                if CONF.load_balancer.test_with_noop:
                    lb_vip_address = '198.18.33.33'
                else:
                    subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
                    network = ipaddress.IPv4Network(subnet['subnet']['cidr'])
                    lb_vip_address = str(network[ip_index])
            else:
                subnet_id = cls.lb_member_vip_ipv6_subnet[const.ID]
                if CONF.load_balancer.test_with_noop:
                    lb_vip_address = '2001:db8:33:33:33:33:33:33'
                else:
                    subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
                    network = ipaddress.IPv6Network(subnet['subnet']['cidr'])
                    lb_vip_address = str(network[ip_index])
                    # If the subnet is IPv6 slaac or dhcpv6-stateless
                    # neutron does not allow a fixed IP
                    if not cls.lb_member_vip_ipv6_subnet_stateful:
                        use_fixed_ip = False
            lb_kwargs[const.VIP_SUBNET_ID] = subnet_id
            if use_fixed_ip:
                lb_kwargs[const.VIP_ADDRESS] = lb_vip_address
            if CONF.load_balancer.test_with_noop:
                lb_kwargs[const.VIP_NETWORK_ID] = (
                    cls.lb_member_vip_net[const.ID])
        else:
            lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
            lb_kwargs[const.VIP_SUBNET_ID] = None
class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
    def __init__(self):

        self._amphora_flows = amphora_flows.AmphoraFlows()
        self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows()
        self._lb_flows = load_balancer_flows.LoadBalancerFlows()
        self._listener_flows = listener_flows.ListenerFlows()
        self._member_flows = member_flows.MemberFlows()
        self._pool_flows = pool_flows.PoolFlows()
        self._l7policy_flows = l7policy_flows.L7PolicyFlows()
        self._l7rule_flows = l7rule_flows.L7RuleFlows()

        self._amphora_repo = repo.AmphoraRepository()
        self._amphora_health_repo = repo.AmphoraHealthRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = repo.PoolRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()
        self._az_repo = repo.AvailabilityZoneRepository()

        super(ControllerWorker, self).__init__()

    @tenacity.retry(
        retry=(tenacity.retry_if_result(_is_provisioning_status_pending_update)
               | tenacity.retry_if_exception_type()),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)

    def create_amphora(self, availability_zone=None):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: amphora_id
        """
        try:
            store = {
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY,
                constants.FLAVOR: None,
                constants.AVAILABILITY_ZONE: None
            }
            if availability_zone:
                store[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), availability_zone))
            create_amp_tf = self._taskflow_load(
                self._amphora_flows.get_create_amphora_flow(), store=store)
            with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
                create_amp_tf.run()

            return create_amp_tf.storage.fetch('amphora')
        except Exception as e:
            LOG.error('Failed to create an amphora due to: {}'.format(str(e)))

    def delete_amphora(self, amphora_id):
        """Deletes an existing Amphora.

        :param amphora_id: ID of the amphora to delete
        :returns: None
        :raises AmphoraNotFound: The referenced Amphora was not found
        """
        amphora = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        delete_amp_tf = self._taskflow_load(
            self._amphora_flows.get_delete_amphora_flow(),
            store={constants.AMPHORA: amphora.to_dict()})
        with tf_logging.DynamicLoggingListener(delete_amp_tf, log=LOG):
            delete_amp_tf.run()

    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        create_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_create_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb
            })
        with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG):
            create_hm_tf.run()

    def delete_health_monitor(self, health_monitor):
        """Deletes a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.PROJECT_ID: load_balancer.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG):
            delete_hm_tf.run()

    def update_health_monitor(self, original_health_monitor,
                              health_monitor_updates):
        """Updates a health monitor.

        :param original_health_monitor: Provider health monitor dict
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        try:
            db_health_monitor = self._get_db_obj_until_pending_update(
                self._health_mon_repo,
                original_health_monitor[constants.HEALTHMONITOR_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_health_monitor = e.last_attempt.result()

        pool = db_health_monitor.pool

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        update_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_update_health_monitor_flow(),
            store={
                constants.HEALTH_MON: original_health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.UPDATE_DICT: health_monitor_updates
            })
        with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG):
            update_hm_tf.run()

    def create_listener(self, listener):
        """Creates a listener.

        :param listener: A listener provider dictionary.
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=listener[constants.LISTENER_ID])
        if not db_listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener[constants.LISTENER_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_listener.load_balancer
        listeners = load_balancer.listeners
        dict_listeners = []
        for l in listeners:
            dict_listeners.append(
                provider_utils.db_listener_to_provider_listener(l).to_dict())
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        create_listener_tf = self._taskflow_load(
            self._listener_flows.get_create_listener_flow(),
            store={
                constants.LISTENERS: dict_listeners,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG):
            create_listener_tf.run()

    def delete_listener(self, listener):
        """Deletes a listener.

        :param listener: A listener provider dictionary to delete
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        # TODO(johnsom) Remove once the provider data model includes
        #               the project ID
        lb = self._lb_repo.get(db_apis.get_session(),
                               id=listener[constants.LOADBALANCER_ID])
        delete_listener_tf = self._taskflow_load(
            self._listener_flows.get_delete_listener_flow(),
            store={
                constants.LISTENER: listener,
                constants.LOADBALANCER_ID: listener[constants.LOADBALANCER_ID],
                constants.PROJECT_ID: lb.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_listener_tf, log=LOG):
            delete_listener_tf.run()

    def update_listener(self, listener, listener_updates):
        """Updates a listener.

        :param listener: A listener provider dictionary to update
        :param listener_updates: Dict containing updated listener attributes
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        db_lb = self._lb_repo.get(db_apis.get_session(),
                                  id=listener[constants.LOADBALANCER_ID])
        update_listener_tf = self._taskflow_load(
            self._listener_flows.get_update_listener_flow(),
            store={
                constants.LISTENER: listener,
                constants.UPDATE_DICT: listener_updates,
                constants.LOADBALANCER_ID: db_lb.id,
                constants.LISTENERS: [listener]
            })
        with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
            update_listener_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_load_balancer(self,
                             loadbalancer,
                             flavor=None,
                             availability_zone=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param loadbalancer: The dict of load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(),
                               id=loadbalancer[constants.LOADBALANCER_ID])
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer',
                loadbalancer[constants.LOADBALANCER_ID])
            raise db_exceptions.NoResultFound

        # TODO(johnsom) convert this to octavia_lib constant flavor
        # once octavia is transitioned to use octavia_lib
        store = {
            constants.LOADBALANCER_ID: loadbalancer[constants.LOADBALANCER_ID],
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
            constants.FLAVOR: flavor,
            constants.AVAILABILITY_ZONE: availability_zone
        }

        topology = lb.topology
        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                lb.listeners))

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}

        create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
            topology=topology, listeners=listeners_dicts)

        create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
            create_lb_tf.run()

    def delete_load_balancer(self, load_balancer, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer: Dict of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        db_lb = self._lb_repo.get(db_apis.get_session(),
                                  id=load_balancer[constants.LOADBALANCER_ID])
        store = {}

        if cascade:
            flow = self._lb_flows.get_cascade_delete_load_balancer_flow(
                load_balancer)
            store.update(self._lb_flows.get_delete_pools_store(db_lb))
            store.update(self._lb_flows.get_delete_listeners_store(db_lb))
        else:
            flow = self._lb_flows.get_delete_load_balancer_flow(load_balancer)
        store.update({
            constants.LOADBALANCER: load_balancer,
            constants.SERVER_GROUP_ID: db_lb.server_group_id,
            constants.PROJECT_ID: db_lb.project_id
        })

        delete_lb_tf = self._taskflow_load(flow, store=store)

        with tf_logging.DynamicLoggingListener(delete_lb_tf, log=LOG):
            delete_lb_tf.run()

    def update_load_balancer(self, original_load_balancer,
                             load_balancer_updates):
        """Updates a load balancer.

        :param original_load_balancer: Dict of the load balancer to update
        :param load_balancer_updates: Dict containing updated load balancer
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """

        update_lb_tf = self._taskflow_load(
            self._lb_flows.get_update_load_balancer_flow(),
            store={
                constants.LOADBALANCER:
                original_load_balancer,
                constants.LOADBALANCER_ID:
                original_load_balancer[constants.LOADBALANCER_ID],
                constants.UPDATE_DICT:
                load_balancer_updates
            })

        with tf_logging.DynamicLoggingListener(update_lb_tf, log=LOG):
            update_lb_tf.run()

    def create_member(self, member):
        """Creates a pool member.

        :param member: A member provider dictionary to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        create_member_tf = self._taskflow_load(
            self._member_flows.get_create_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.POOL_ID: pool.id
            })
        with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG):
            create_member_tf.run()

    def delete_member(self, member):
        """Deletes a pool member.

        :param member: A member provider dictionary to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        delete_member_tf = self._taskflow_load(
            self._member_flows.get_delete_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.POOL_ID: pool.id,
                constants.PROJECT_ID: load_balancer.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_member_tf, log=LOG):
            delete_member_tf.run()

    def batch_update_members(self, old_members, new_members, updated_members):
        updated_members = [(provider_utils.db_member_to_provider_member(
            self._member_repo.get(db_apis.get_session(),
                                  id=m.get(constants.ID))).to_dict(), m)
                           for m in updated_members]
        provider_old_members = [
            provider_utils.db_member_to_provider_member(
                self._member_repo.get(db_apis.get_session(),
                                      id=m.get(constants.ID))).to_dict()
            for m in old_members
        ]
        if old_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=old_members[0][constants.POOL_ID])
        elif new_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=new_members[0][constants.POOL_ID])
        else:
            pool = self._pool_repo.get(
                db_apis.get_session(),
                id=updated_members[0][0][constants.POOL_ID])
        load_balancer = pool.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        batch_update_members_tf = self._taskflow_load(
            self._member_flows.get_batch_update_members_flow(
                provider_old_members, new_members, updated_members),
            store={
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.POOL_ID: pool.id,
                constants.PROJECT_ID: load_balancer.project_id
            })
        with tf_logging.DynamicLoggingListener(batch_update_members_tf,
                                               log=LOG):
            batch_update_members_tf.run()

    def update_member(self, member, member_updates):
        """Updates a pool member.

        :param member_id: A member provider dictionary  to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        # TODO(ataraday) when other flows will use dicts - revisit this
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        update_member_tf = self._taskflow_load(
            self._member_flows.get_update_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.POOL_ID: pool.id,
                constants.UPDATE_DICT: member_updates
            })
        with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG):
            update_member_tf.run()

    def create_pool(self, pool):
        """Creates a node pool.

        :param pool: Provider pool dict to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """

        # TODO(ataraday) It seems we need to get db pool here anyway to get
        # proper listeners
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])
        if not db_pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool[constants.POOL_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        create_pool_tf = self._taskflow_load(
            self._pool_flows.get_create_pool_flow(),
            store={
                constants.POOL_ID: pool[constants.POOL_ID],
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb
            })
        with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG):
            create_pool_tf.run()

    def delete_pool(self, pool):
        """Deletes a node pool.

        :param pool: Provider pool dict to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))
        load_balancer = db_pool.load_balancer

        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        delete_pool_tf = self._taskflow_load(
            self._pool_flows.get_delete_pool_flow(),
            store={
                constants.POOL_ID: pool[constants.POOL_ID],
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.PROJECT_ID: db_pool.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG):
            delete_pool_tf.run()

    def update_pool(self, origin_pool, pool_updates):
        """Updates a node pool.

        :param origin_pool: Provider pool dict to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        try:
            db_pool = self._get_db_obj_until_pending_update(
                self._pool_repo, origin_pool[constants.POOL_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Pool did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_pool = e.last_attempt.result()

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        update_pool_tf = self._taskflow_load(
            self._pool_flows.get_update_pool_flow(),
            store={
                constants.POOL_ID: db_pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.UPDATE_DICT: pool_updates
            })
        with tf_logging.DynamicLoggingListener(update_pool_tf, log=LOG):
            update_pool_tf.run()

    def create_l7policy(self, l7policy):
        """Creates an L7 Policy.

        :param l7policy: Provider dict of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        create_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_create_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: db_listener.load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(create_l7policy_tf, log=LOG):
            create_l7policy_tf.run()

    def delete_l7policy(self, l7policy):
        """Deletes an L7 policy.

        :param l7policy: Provider dict of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])
        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        delete_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_delete_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: db_listener.load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(delete_l7policy_tf, log=LOG):
            delete_l7policy_tf.run()

    def update_l7policy(self, original_l7policy, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy: Provider dict of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        update_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_update_l7policy_flow(),
            store={
                constants.L7POLICY: original_l7policy,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: db_listener.load_balancer.id,
                constants.UPDATE_DICT: l7policy_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG):
            update_l7policy_tf.run()

    def create_l7rule(self, l7rule):
        """Creates an L7 Rule.

        :param l7rule: Provider dict l7rule
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])

        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        create_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_create_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy_dict.to_dict(),
                constants.LISTENERS: listeners_dicts,
                constants.L7POLICY_ID: db_l7policy.id,
                constants.LOADBALANCER_ID: load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG):
            create_l7rule_tf.run()

    def delete_l7rule(self, l7rule):
        """Deletes an L7 rule.

        :param l7rule: Provider dict of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])
        l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy)
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))

        delete_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_delete_l7rule_flow(),
            store={
                constants.L7RULE: l7rule,
                constants.L7POLICY: l7policy.to_dict(),
                constants.LISTENERS: listeners_dicts,
                constants.L7POLICY_ID: db_l7policy.id,
                constants.LOADBALANCER_ID: load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(delete_l7rule_tf, log=LOG):
            delete_l7rule_tf.run()

    def update_l7rule(self, original_l7rule, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule: Origin dict of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(
            db_apis.get_session(), id=original_l7rule[constants.L7POLICY_ID])
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        update_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_update_l7rule_flow(),
            store={
                constants.L7RULE: original_l7rule,
                constants.L7POLICY: l7policy_dict.to_dict(),
                constants.LISTENERS: listeners_dicts,
                constants.L7POLICY_ID: db_l7policy.id,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.UPDATE_DICT: l7rule_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7rule_tf, log=LOG):
            update_l7rule_tf.run()

    def _perform_amphora_failover(self, amp, priority):
        """Internal method to perform failover operations for an amphora.

        :param amp: The amphora to failover
        :param priority: The create priority
        :returns: None
        """
        stored_params = {
            constants.FAILED_AMPHORA: amp.to_dict(),
            constants.LOADBALANCER_ID: amp.load_balancer_id,
            constants.BUILD_TYPE_PRIORITY: priority,
        }

        if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP):
            amp_role = 'master_or_backup'
        elif amp.role == constants.ROLE_STANDALONE:
            amp_role = 'standalone'
        elif amp.role is None:
            amp_role = 'spare'
        else:
            amp_role = 'undefined'

        LOG.info(
            "Perform failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })

        if amp.status == constants.DELETED:
            LOG.warning(
                'Amphora %s is marked DELETED in the database but '
                'was submitted for failover. Deleting it from the '
                'amphora health table to exclude it from health '
                'checks and skipping the failover.', amp.id)
            self._amphora_health_repo.delete(db_apis.get_session(),
                                             amphora_id=amp.id)
            return

        if (CONF.house_keeping.spare_amphora_pool_size
                == 0) and (CONF.nova.enable_anti_affinity is False):
            LOG.warning("Failing over amphora with no spares pool may "
                        "cause delays in failover times while a new "
                        "amphora instance boots.")

        # if we run with anti-affinity we need to set the server group
        # as well
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amp.id)
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            lb).to_dict() if lb else lb
        if CONF.nova.enable_anti_affinity and lb:
            stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
        if lb and lb.flavor_id:
            stored_params[constants.FLAVOR] = (
                self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id))
        else:
            stored_params[constants.FLAVOR] = {}
        if lb and lb.availability_zone:
            stored_params[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), lb.availability_zone))
        else:
            stored_params[constants.AVAILABILITY_ZONE] = {}

        failover_amphora_tf = self._taskflow_load(
            self._amphora_flows.get_failover_flow(role=amp.role,
                                                  load_balancer=provider_lb),
            store=stored_params)

        with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
            failover_amphora_tf.run()

        LOG.info(
            "Successfully completed the failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })

    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """
        try:
            amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
            if not amp:
                LOG.warning(
                    "Could not fetch Amphora %s from DB, ignoring "
                    "failover request.", amphora_id)
                return
            self._perform_amphora_failover(
                amp, constants.LB_CREATE_FAILOVER_PRIORITY)
            if amp.load_balancer_id:
                LOG.info("Mark ACTIVE in DB for load balancer id: %s",
                         amp.load_balancer_id)
                self._lb_repo.update(db_apis.get_session(),
                                     amp.load_balancer_id,
                                     provisioning_status=constants.ACTIVE)
        except Exception as e:
            try:
                self._lb_repo.update(db_apis.get_session(),
                                     amp.load_balancer_id,
                                     provisioning_status=constants.ERROR)
            except Exception:
                LOG.error("Unable to revert LB status to ERROR.")
            with excutils.save_and_reraise_exception():
                LOG.error("Amphora %(id)s failover exception: %(exc)s", {
                    'id': amphora_id,
                    'exc': e
                })

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """

        # Note: This expects that the load balancer is already in
        #       provisioning_status=PENDING_UPDATE state
        try:
            lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)

            # Exclude amphora already deleted
            amps = [a for a in lb.amphorae if a.status != constants.DELETED]
            for amp in amps:
                # failover amphora in backup role
                # Note: this amp may not currently be the backup
                # TODO(johnsom) Change this to query the amp state
                #               once the amp API supports it.
                if amp.role == constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            for amp in amps:
                # failover everyhting else
                if amp.role != constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            self._lb_repo.update(db_apis.get_session(),
                                 load_balancer_id,
                                 provisioning_status=constants.ACTIVE)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error("LB %(lbid)s failover exception: %(exc)s", {
                    'lbid': load_balancer_id,
                    'exc': e
                })
                self._lb_repo.update(db_apis.get_session(),
                                     load_balancer_id,
                                     provisioning_status=constants.ERROR)

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id)

        certrotation_amphora_tf = self._taskflow_load(
            self._amphora_flows.cert_rotate_amphora_flow(),
            store={
                constants.AMPHORA: amp.to_dict(),
                constants.AMPHORA_ID: amphora_id
            })

        with tf_logging.DynamicLoggingListener(certrotation_amphora_tf,
                                               log=LOG):
            certrotation_amphora_tf.run()

    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info(
            "Start amphora agent configuration update, amphora's id "
            "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        update_amphora_tf = self._taskflow_load(
            self._amphora_flows.update_amphora_config_flow(),
            store={
                constants.AMPHORA: amp.to_dict(),
                constants.FLAVOR: flavor
            })

        with tf_logging.DynamicLoggingListener(update_amphora_tf, log=LOG):
            update_amphora_tf.run()
class ControllerWorker(object):
    """Worker class to update load balancers."""
    def __init__(self):
        self._loadbalancer_repo = f5_repos.LoadBalancerRepository()
        self._esd = esd_repo.EsdRepository()
        self._amphora_repo = repo.AmphoraRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = f5_repos.PoolRepository()
        self._l7policy_repo = f5_repos.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()
        self._vip_repo = repo.VipRepository()
        self.bigip = BigipAS3RestClient(
            bigip_urls=CONF.f5_agent.bigip_urls,
            enable_verify=CONF.f5_agent.bigip_verify,
            enable_token=CONF.f5_agent.bigip_token,
            esd=self._esd)

        self.network_driver = driver_utils.get_network_driver()
        self.cert_manager = cert_manager.CertManagerWrapper()
        self.status = status.StatusManager(self.bigip)
        worker = periodics.PeriodicWorker([(self.pending_sync, None, None)])
        t = threading.Thread(target=worker.start)
        t.daemon = True
        t.start()

        if cfg.CONF.f5_agent.prometheus:
            prometheus_port = CONF.f5_agent.prometheus_port
            LOG.info('Starting Prometheus HTTP server on port {}'.format(
                prometheus_port))
            prometheus.start_http_server(prometheus_port)

        super(ControllerWorker, self).__init__()

    @periodics.periodic(120, run_immediately=True)
    def pending_sync(self):
        """
        Reconciliation loop that
        - schedules unscheduled load balancers to this worker
        - deletes load balancers that are PENDING_DELETE
        """

        # schedule unscheduled load balancers to this worker
        self.sync_loadbalancers()

        # delete load balancers that are PENDING_DELETE
        lbs_to_delete = self._loadbalancer_repo.get_all_from_host(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_DELETE)
        for lb in lbs_to_delete:
            LOG.info("Found pending deletion of lb %s", lb.id)
            self.delete_load_balancer(lb.id)

    @lockutils.synchronized('tenant_refresh')
    def sync_loadbalancers(self):
        """Sync loadbalancers that are in a PENDING state"""
        lbs = []
        pending_create_lbs = self._loadbalancer_repo.get_all(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_CREATE,
            show_deleted=False)[0]
        for lb in pending_create_lbs:
            # bind to loadbalancer if scheduled to this host
            if CONF.host == self.network_driver.get_scheduled_host(
                    lb.vip.port_id):
                self.ensure_host_set(lb)
                self.ensure_amphora_exists(lb.id)
                lbs.append(lb)

        lbs.extend(
            self._loadbalancer_repo.get_all_from_host(
                db_apis.get_session(),
                provisioning_status=lib_consts.PENDING_UPDATE))

        pools = self._pool_repo.get_pending_from_host(db_apis.get_session())
        lbs.extend([pool.load_balancer for pool in pools])

        l7policies = self._l7policy_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend(
            [l7policy.listener.load_balancer for l7policy in l7policies])

        pending_networks = collections.defaultdict(list)
        for lb in lbs:
            if lb not in pending_networks[lb.vip.network_id]:
                pending_networks[lb.vip.network_id].append(lb)

        for network_id, loadbalancers in pending_networks.items():
            LOG.info("Found pending tenant network %s, syncing...", network_id)
            try:
                if self._refresh(network_id).ok:
                    self.status.update_status(loadbalancers)
            except exceptions.RetryException as e:
                LOG.warning("Device is busy, retrying with next sync: %s", e)
            except o_exceptions.CertificateRetrievalException as e:
                LOG.warning("Could not retrieve certificate for tenant %s: %s",
                            network_id, e)
            except exceptions.AS3Exception as e:
                LOG.error("AS3 exception while syncing tenant %s: %s",
                          network_id, e)
                for lb in loadbalancers:
                    self.status.set_error(lb)

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _get_all_loadbalancer(self, network_id):
        LOG.debug("Get load balancers from DB for network id: %s ", network_id)
        vips = self._vip_repo.get_all(db_apis.get_session(),
                                      network_id=network_id)
        loadbalancers = []
        for vip in vips[0]:
            loadbalancers.append(
                self._loadbalancer_repo.get(db_apis.get_session(),
                                            show_deleted=False,
                                            id=vip.load_balancer_id,
                                            server_group_id=CONF.host))
        return [lb for lb in loadbalancers if lb]

    def _refresh(self, network_id):
        loadbalancers = self._get_all_loadbalancer(network_id)
        segmentation_id = self.network_driver.get_segmentation_id(network_id)
        return tenant_update(self.bigip, self.cert_manager, network_id,
                             loadbalancers, segmentation_id)

    """
    Loadbalancer
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_load_balancer(self, load_balancer_id, flavor=None):
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        # We are retrying to fetch load-balancer since API could
        # be still busy inserting the LB into the database.
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(lb.id)
        self.ensure_host_set(lb)
        if self._refresh(lb.vip.network_id).ok:
            self.status.set_active(lb)
        else:
            self.status.set_error(lb)

    @lockutils.synchronized('tenant_refresh')
    def update_load_balancer(self, load_balancer_id, load_balancer_updates):
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if self._refresh(lb.vip.network_id).ok:
            self.status.set_active(lb)
        else:
            self.status.set_error(lb)

    @lockutils.synchronized('tenant_refresh')
    def delete_load_balancer(self, load_balancer_id, cascade=False):
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        existing_lbs = [
            loadbalancer
            for loadbalancer in self._get_all_loadbalancer(lb.vip.network_id)
            if loadbalancer.id != lb.id
        ]

        if not existing_lbs:
            # Delete whole tenant
            ret = tenant_delete(self.bigip, lb.vip.network_id)
        else:
            # Don't delete whole tenant
            segmentation_id = self.network_driver.get_segmentation_id(
                lb.vip.network_id)
            ret = tenant_update(self.bigip, self.cert_manager,
                                lb.vip.network_id, existing_lbs,
                                segmentation_id)

        if ret.ok:
            self.status.set_deleted(lb)

    """
    Listener
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_listener(self, listener_id):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        if self._refresh(listener.load_balancer.vip.network_id).ok:
            self.status.set_active(listener)
        else:
            self.status.set_error(listener)

    @lockutils.synchronized('tenant_refresh')
    def update_listener(self, listener_id, listener_updates):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if self._refresh(listener.load_balancer.vip.network_id).ok:
            self.status.set_active(listener)
        else:
            self.status.set_error(listener)

    @lockutils.synchronized('tenant_refresh')
    def delete_listener(self, listener_id):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)

        if self._refresh(listener.load_balancer.vip.network_id).ok:
            self.status.set_deleted(listener)

    """
    Pool
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        if self._refresh(pool.load_balancer.vip.network_id).ok:
            self.status.set_active(pool)
        else:
            self.status.set_error(pool)

    @lockutils.synchronized('tenant_refresh')
    def update_pool(self, pool_id, pool_updates):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if self._refresh(pool.load_balancer.vip.network_id).ok:
            self.status.set_active(pool)
        else:
            self.status.set_error(pool)

    @lockutils.synchronized('tenant_refresh')
    def delete_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if self._refresh(pool.load_balancer.vip.network_id).ok:
            self.status.set_deleted(pool)

    """
    Member
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(member.pool.load_balancer.id)

        if not member.backup:
            try:
                if member_create(self.bigip, member).ok:
                    self.status.set_active(member)
                    return
            except exceptions.AS3Exception:
                pass
        elif self._refresh(member.pool.load_balancer.vip.network_id).ok:
            self.status.set_active(member)
        else:
            self.status.set_error(member)

    @lockutils.synchronized('tenant_refresh')
    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):
        old_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in old_member_ids
        ]
        new_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in new_member_ids
        ]
        updated_members = [(self._member_repo.get(db_apis.get_session(),
                                                  id=m.get('id')), m)
                           for m in updated_members]
        if old_members:
            pool = old_members[0].pool
        elif new_members:
            pool = new_members[0].pool
        else:
            pool = updated_members[0][0].pool
        load_balancer = pool.load_balancer
        network_id = load_balancer.vip.network_id
        if self._refresh(network_id).ok:
            self.status.update_status([load_balancer])

    @lockutils.synchronized('tenant_refresh')
    def update_member(self, member_id, member_updates):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if self._refresh(member.pool.load_balancer.vip.network_id).ok:
            self.status.set_active(member)
        else:
            self.status.set_error(member)

    @lockutils.synchronized('tenant_refresh')
    def delete_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if self._refresh(member.pool.load_balancer.vip.network_id).ok:
            self.status.set_deleted(member)

    """
    Member
    """

    @lockutils.synchronized('tenant_refresh')
    def create_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        pool = health_mon.pool
        load_balancer = pool.load_balancer
        if self._refresh(load_balancer.vip.network_id).ok:
            self.status.set_active(health_mon)
        else:
            self.status.set_error(health_mon)

    @lockutils.synchronized('tenant_refresh')
    def update_health_monitor(self, health_monitor_id, health_monitor_updates):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        pool = health_mon.pool
        load_balancer = pool.load_balancer
        if self._refresh(load_balancer.vip.network_id).ok:
            self.status.set_active(health_mon)
        else:
            self.status.set_error(health_mon)

    @lockutils.synchronized('tenant_refresh')
    def delete_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        pool = health_mon.pool
        load_balancer = pool.load_balancer
        if self._refresh(load_balancer.vip.network_id).ok:
            self.status.set_deleted(health_mon)

    """
    l7policy
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_l7policy(self, l7policy_id):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if not l7policy:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7policy', l7policy_id)
            raise db_exceptions.NoResultFound

        if self._refresh(l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_active(l7policy)
        else:
            self.status.set_error(l7policy)

    @lockutils.synchronized('tenant_refresh')
    def update_l7policy(self, l7policy_id, l7policy_updates):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if self._refresh(l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_active(l7policy)
        else:
            self.status.set_error(l7policy)

    @lockutils.synchronized('tenant_refresh')
    def delete_l7policy(self, l7policy_id):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if self._refresh(l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_deleted(l7policy)

    """
    l7rule
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    @lockutils.synchronized('tenant_refresh')
    def create_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        if self._refresh(
                l7rule.l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_active(l7rule)
        else:
            self.status.set_error(l7rule)

    @lockutils.synchronized('tenant_refresh')
    def update_l7rule(self, l7rule_id, l7rule_updates):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if self._refresh(
                l7rule.l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_active(l7rule)
        else:
            self.status.set_error(l7rule)

    @lockutils.synchronized('tenant_refresh')
    def delete_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if self._refresh(
                l7rule.l7policy.listener.load_balancer.vip.network_id).ok:
            self.status.set_deleted(l7rule)

    """
    Amphora
    """

    def ensure_amphora_exists(self, load_balancer_id):
        """
        Octavia health manager makes some assumptions about the existence of amphorae.
        That's why even the F5 provider driver has to care about amphora DB entries.

        This function creates an amphora entry in the database, if it doesn't already exist.
        """
        device_amp = self._amphora_repo.get(db_apis.get_session(),
                                            load_balancer_id=load_balancer_id)

        # create amphora mapping if missing
        if not device_amp:
            self._amphora_repo.create(db_apis.get_session(),
                                      id=load_balancer_id,
                                      load_balancer_id=load_balancer_id,
                                      compute_flavor=CONF.host,
                                      status=lib_consts.ACTIVE)
            return

        # update host if not updated yet
        if device_amp.compute_flavor != CONF.host:
            self._amphora_repo.update(db_apis.get_session(),
                                      id=device_amp.id,
                                      compute_flavor=CONF.host)

    def create_amphora(self):
        pass

    def delete_amphora(self, amphora_id):
        self._amphora_repo.delete(db_apis.get_session(), id=amphora_id)

    def failover_amphora(self, amphora_id):
        pass

    def failover_loadbalancer(self, load_balancer_id):
        pass

    def amphora_cert_rotation(self, amphora_id):
        pass

    def update_amphora_agent_config(self, amphora_id):
        pass

    def ensure_host_set(self, loadbalancer):
        """Assigns the current host to loadbalancer by writing
        it into server_group_id column of loadbalancer table."""
        if CONF.host[:36] != loadbalancer.server_group_id:
            self._loadbalancer_repo.update(db_apis.get_session(),
                                           id=loadbalancer.id,
                                           server_group_id=CONF.host[:36])
Exemple #24
0
class ControllerWorker(object):
    """Worker class to update load balancers."""

    _metric_as3worker_queue = prometheus.metrics.Gauge(
        'octavia_as3_worker_queue', 'Number of items in AS3 worker queue',
        ['octavia_host'])

    def __init__(self):
        self._repositories = repo.Repositories()
        self._loadbalancer_repo = f5_repos.LoadBalancerRepository()
        self._amphora_repo = repo.AmphoraRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._listener_repo = f5_repos.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = f5_repos.PoolRepository()
        self._l7policy_repo = f5_repos.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._vip_repo = repo.VipRepository()
        self._quota_repo = repo.QuotasRepository()

        self.status = status_manager.StatusManager()
        self.sync = sync_manager.SyncManager(self.status,
                                             self._loadbalancer_repo)
        self.network_driver = driver_utils.get_network_driver()
        self.queue = SetQueue()
        worker = periodics.PeriodicWorker([
            (self.pending_sync, None, None),
            (self.full_sync_reappearing_devices, None, None),
            (self.cleanup_orphaned_tenants, None, None)
        ])
        t = threading.Thread(target=worker.start)
        t.daemon = True
        t.start()

        LOG.info("Starting as3worker")
        as3worker = threading.Thread(target=self.as3worker)
        as3worker.setDaemon(True)
        as3worker.start()

        if cfg.CONF.f5_agent.prometheus:
            prometheus_port = CONF.f5_agent.prometheus_port
            LOG.info('Starting Prometheus HTTP server on port {}'.format(
                prometheus_port))
            prometheus.start_http_server(prometheus_port)

        super(ControllerWorker, self).__init__()

    def as3worker(self):
        """ AS3 Worker thread, pops tenant to refresh from thread-safe set queue"""
        while True:
            try:
                self._metric_as3worker_queue.labels(
                    octavia_host=CONF.host).set(self.queue.qsize())
                network_id, device = self.queue.get()
                loadbalancers = self._get_all_loadbalancer(network_id)
                LOG.debug(
                    "AS3Worker after pop (queue_size=%d): Refresh tenant '%s' with loadbalancer %s",
                    self.queue.qsize(), network_id,
                    [lb.id for lb in loadbalancers])
                if all([
                        lb.provisioning_status == lib_consts.PENDING_DELETE
                        for lb in loadbalancers
                ]):
                    ret = self.sync.tenant_delete(network_id, device)
                else:
                    ret = self.sync.tenant_update(network_id, device)

                if not ret:
                    continue

                # update status of just-synced LBs
                self.status.update_status(loadbalancers)
                for lb in loadbalancers:
                    self._reset_in_use_quota(lb.project_id)

            except Empty:
                # Queue empty, pass
                pass
            except (exceptions.RetryException, tenacity.RetryError) as e:
                LOG.warning("Device is busy, retrying with next sync: %s", e)
                time.sleep(15)
            except Exception as e:
                LOG.exception(e)
                # restart
                pass

    @periodics.periodic(86400, run_immediately=True)
    def cleanup_orphaned_tenants(self):
        LOG.info("Running (24h) tenant cleanup")
        session = db_apis.get_session(autocommit=False)

        for device in self.sync.devices():
            try:
                # Fetch all Tenants
                tenants = self.sync.get_tenants(device)

                # Get all loadbalancers of this host
                for tenant_name, applications in tenants.items():
                    # Convert tenant_name to network_id
                    network_id = tenant_name.replace(constants.PREFIX_NETWORK,
                                                     '').replace('_', '-')

                    # Fetch active loadbalancers for this network
                    octavia_lb_ids = [
                        lb.id
                        for lb in self._loadbalancer_repo.get_all_by_network(
                            session, network_id, show_deleted=False)
                    ]
                    if not octavia_lb_ids:
                        LOG.info("Found orphaned tenant '%s' for device '%s'",
                                 tenant_name, device)
                        self.queue.put((network_id, device))
            except HTTPError:
                # Ignore as3 errors
                pass

    @periodics.periodic(240, run_immediately=True)
    def full_sync_reappearing_devices(self):
        session = db_apis.get_session(autocommit=False)

        # Get all pending devices
        booting_devices = self._amphora_repo.get_all(
            session,
            status=constants.AMPHORA_BOOTING,
            compute_flavor=CONF.host,
            load_balancer_id=None)

        for device in booting_devices[0]:
            if CONF.f5_agent.migration and device.role != constants.ROLE_BACKUP:
                LOG.warning(
                    "[Migration Mode] Skipping full sync of active device %s",
                    device.cached_zone)
                continue

            LOG.info("Device reappeared: %s. Doing a full sync.",
                     device.cached_zone)

            # get all load balancers (of this host)
            lbs = self._loadbalancer_repo.get_all_from_host(session,
                                                            show_deleted=False)

            # deduplicate
            for network_id in set([lb.vip.network_id for lb in lbs]):
                self.queue.put((network_id, device.cached_zone))

            # Set device ready
            self._amphora_repo.update(session,
                                      device.id,
                                      status=constants.AMPHORA_READY)
            session.commit()

    @periodics.periodic(120, run_immediately=True)
    def pending_sync(self):
        """
        Reconciliation loop that
        - synchronizes load balancers that are in a PENDING state
        - deletes load balancers that are PENDING_DELETE
        - executes a full sync on F5 devices that were offline but are now back online
        """

        # delete load balancers that are PENDING_DELETE
        session = db_apis.get_session()
        lbs_to_delete = self._loadbalancer_repo.get_all_from_host(
            session, provisioning_status=lib_consts.PENDING_DELETE)
        for lb in lbs_to_delete:
            LOG.info("Found pending deletion of lb %s", lb.id)
            self.delete_load_balancer(lb.id)

        # Find pending loadbalancer not yet finally assigned to this host
        lbs = []
        pending_create_lbs = self._loadbalancer_repo.get_all(
            db_apis.get_session(),
            provisioning_status=lib_consts.PENDING_CREATE,
            show_deleted=False)[0]
        for lb in pending_create_lbs:
            # bind to loadbalancer if scheduled to this host
            if CONF.host == self.network_driver.get_scheduled_host(
                    lb.vip.port_id):
                self.ensure_host_set(lb)
                lbs.append(lb)

        # Find pending loadbalancer
        lbs.extend(
            self._loadbalancer_repo.get_all_from_host(
                db_apis.get_session(),
                provisioning_status=lib_consts.PENDING_UPDATE))

        # Make the Octavia health manager happy by creating DB amphora entries
        for lb in lbs:
            self.ensure_amphora_exists(lb.id)

        # Find pending listener
        listeners = self._listener_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend([listener.load_balancer for listener in listeners])

        # Find pending pools
        pools = self._pool_repo.get_pending_from_host(db_apis.get_session())
        lbs.extend([pool.load_balancer for pool in pools])

        # Find pending l7policies
        l7policies = self._l7policy_repo.get_pending_from_host(
            db_apis.get_session())
        lbs.extend(
            [l7policy.listener.load_balancer for l7policy in l7policies])

        # Deduplicate into networks
        # because each network is synced separately
        pending_networks = set([lb.vip.network_id for lb in lbs])
        for network_id in pending_networks:
            self.queue.put_nowait((network_id, None))

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _get_all_loadbalancer(self, network_id):
        LOG.debug("Get load balancers from DB for network id: %s ", network_id)
        return self._loadbalancer_repo.get_all_by_network(
            db_apis.get_session(), network_id=network_id, show_deleted=False)

    def _reset_in_use_quota(self, project_id):
        """ reset in_use quota to None, so it will be recalculated the next time
        :param project_id: project id
        """
        reset_dict = {
            'in_use_load_balancer': None,
            'in_use_listener': None,
            'in_use_pool': None,
            'in_use_health_monitor': None,
            'in_use_member': None,
        }

        lock_session = db_apis.get_session(autocommit=False)
        try:
            self._quota_repo.update(lock_session,
                                    project_id=project_id,
                                    quota=reset_dict)
            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    'Failed to reset quota for '
                    'project: %(proj)s the project may have excess '
                    'quota in use.', {'proj': project_id})
                lock_session.rollback()

    """
    Loadbalancer
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_load_balancer(self, load_balancer_id, flavor=None):
        lb = self._loadbalancer_repo.get(db_apis.get_session(),
                                         id=load_balancer_id)
        # We are retrying to fetch load-balancer since API could
        # be still busy inserting the LB into the database.
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(lb.id)
        self.ensure_host_set(lb)
        self.queue.put((lb.vip.network_id, None))

    def update_load_balancer(self, load_balancer_id, load_balancer_updates):
        lb = self._loadbalancer_repo.get(db_apis.get_session(),
                                         id=load_balancer_id)
        self.queue.put((lb.vip.network_id, None))

    def delete_load_balancer(self, load_balancer_id, cascade=False):
        lb = self._loadbalancer_repo.get(db_apis.get_session(),
                                         id=load_balancer_id)
        # could be deleted by sync-loop meanwhile
        if lb:
            self.queue.put((lb.vip.network_id, None))

    """
    Listener
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_listener(self, listener_id):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        self.queue.put((listener.load_balancer.vip.network_id, None))

    def update_listener(self, listener_id, listener_updates):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        self.queue.put((listener.load_balancer.vip.network_id, None))

    def delete_listener(self, listener_id):
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        # could be deleted by sync-loop meanwhile
        if listener:
            self.queue.put((listener.load_balancer.vip.network_id, None))

    """
    Pool
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        if not pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        self.queue.put((pool.load_balancer.vip.network_id, None))

    def update_pool(self, pool_id, pool_updates):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        self.queue.put((pool.load_balancer.vip.network_id, None))

    def delete_pool(self, pool_id):
        pool = self._pool_repo.get(db_apis.get_session(), id=pool_id)
        # could be deleted by sync-loop meanwhile
        if pool:
            self.queue.put((pool.load_balancer.vip.network_id, None))

    """
    Member
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        if not member:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        self.ensure_amphora_exists(member.pool.load_balancer.id)
        self.queue.put((member.pool.load_balancer.vip.network_id, None))

    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):
        old_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in old_member_ids
        ]
        new_members = [
            self._member_repo.get(db_apis.get_session(), id=mid)
            for mid in new_member_ids
        ]
        updated_members = [(self._member_repo.get(db_apis.get_session(),
                                                  id=m.get('id')), m)
                           for m in updated_members]
        if old_members:
            pool = old_members[0].pool
        elif new_members:
            pool = new_members[0].pool
        elif updated_members:
            pool = updated_members[0][0].pool
        else:
            return
        self.queue.put((pool.load_balancer.vip.network_id, None))

    def update_member(self, member_id, member_updates):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        self.queue.put((member.pool.load_balancer.vip.network_id, None))

    def delete_member(self, member_id):
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        # could be deleted by sync-loop meanwhile
        self.queue.put((member.pool.load_balancer.vip.network_id, None))

    """
    Health Monitor
    """

    def create_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        self.queue.put((health_mon.pool.load_balancer.vip.network_id, None))

    def update_health_monitor(self, health_monitor_id, health_monitor_updates):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        self.queue.put((health_mon.pool.load_balancer.vip.network_id, None))

    def delete_health_monitor(self, health_monitor_id):
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        # could be deleted by sync-loop meanwhile
        if health_mon:
            self.queue.put(
                (health_mon.pool.load_balancer.vip.network_id, None))

    """
    l7policy
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7policy(self, l7policy_id):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if not l7policy:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7policy', l7policy_id)
            raise db_exceptions.NoResultFound

        self.queue.put((l7policy.listener.load_balancer.vip.network_id, None))

    def update_l7policy(self, l7policy_id, l7policy_updates):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        self.queue.put((l7policy.listener.load_balancer.vip.network_id, None))

    def delete_l7policy(self, l7policy_id):
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        # could be deleted by sync-loop meanwhile
        if l7policy:
            self.queue.put(
                (l7policy.listener.load_balancer.vip.network_id, None))

    """
    l7rule
    """

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(RETRY_INITIAL_DELAY, RETRY_BACKOFF,
                                        RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        if not l7rule:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        self.queue.put(
            (l7rule.l7policy.listener.load_balancer.vip.network_id, None))

    def update_l7rule(self, l7rule_id, l7rule_updates):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        self.queue.put(
            (l7rule.l7policy.listener.load_balancer.vip.network_id, None))

    def delete_l7rule(self, l7rule_id):
        l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id)
        # could be deleted by sync-loop meanwhile
        if l7rule:
            self.queue.put(
                (l7rule.l7policy.listener.load_balancer.vip.network_id, None))

    """
    Amphora
    """

    def ensure_amphora_exists(self, load_balancer_id):
        """
        Octavia health manager makes some assumptions about the existence of amphorae.
        That's why even the F5 provider driver has to care about amphora DB entries.
        Otherwise status updates won't work correctly.

        This function creates an amphora entry in the database, if it doesn't already exist.
        """
        device_entry = self._amphora_repo.get(
            db_apis.get_session(), load_balancer_id=load_balancer_id)

        # create amphora mapping if missing
        if not device_entry:
            self._amphora_repo.create(db_apis.get_session(),
                                      id=load_balancer_id,
                                      load_balancer_id=load_balancer_id,
                                      compute_flavor=CONF.host,
                                      status=lib_consts.ACTIVE)
            return

        # update host if not updated yet
        if device_entry.compute_flavor != CONF.host:
            self._amphora_repo.update(db_apis.get_session(),
                                      id=device_entry.id,
                                      compute_flavor=CONF.host)

    def create_amphora(self):
        pass

    def delete_amphora(self, amphora_id):
        self._amphora_repo.delete(db_apis.get_session(), id=amphora_id)

    def failover_amphora(self, amphora_id):
        """ For now, we are rusing rpc endpoint failover_amphora for receiving failover events
        :param amphora_id: host that detected a failover

        """
        if amphora_id == CONF.host and not CONF.f5_agent.migration:
            self.sync.failover()

    def failover_loadbalancer(self, load_balancer_id):
        pass

    def migrate_loadbalancer(self, load_balancer_id, target_host):
        pass

    def migrate_loadbalancers(self, source_host, target_host):
        pass

    def amphora_cert_rotation(self, amphora_id):
        pass

    def update_amphora_agent_config(self, amphora_id):
        pass

    def ensure_host_set(self, loadbalancer):
        """Assigns the current host to loadbalancer by writing
        it into server_group_id column of loadbalancer table."""
        if CONF.host[:36] != loadbalancer.server_group_id:
            self._loadbalancer_repo.update(db_apis.get_session(),
                                           id=loadbalancer.id,
                                           server_group_id=CONF.host[:36])
Exemple #25
0
def create_db_retry_object():
    return tenacity.Retrying(
        retry=tenacity.retry_if_exception_type(sa.exc.OperationalError),
        stop=tenacity.stop_after_attempt(10),
        wait=tenacity.wait_incrementing(increment=100)  # 0.1 seconds
    )