Beispiel #1
0
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        if not db_health_monitor:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'healthmonitor',
                        health_monitor[constants.HEALTHMONITOR_ID])
            raise db_exceptions.NoResultFound

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {constants.HEALTH_MON: health_monitor,
                 constants.POOL_ID: pool.id,
                 constants.LISTENERS: listeners_dicts,
                 constants.LOADBALANCER_ID: load_balancer.id,
                 constants.LOADBALANCER: provider_lb}
        self.run_flow(
            flow_utils.get_create_health_monitor_flow,
            store=store)
Beispiel #2
0
    def delete_pool(self, pool):
        """Deletes a node pool.

        :param pool: Provider pool dict to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))
        load_balancer = db_pool.load_balancer

        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.PROJECT_ID: db_pool.project_id
        }
        self.services_controller.run_poster(flow_utils.get_delete_pool_flow,
                                            store=store)
Beispiel #3
0
    def delete_health_monitor(self, health_monitor):
        """Deletes a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.PROJECT_ID: load_balancer.project_id
        }
        self.services_controller.run_poster(
            flow_utils.get_delete_health_monitor_flow, store=store)
Beispiel #4
0
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.services_controller.run_poster(
            flow_utils.get_create_health_monitor_flow, store=store)
Beispiel #5
0
    def update_pool(self, origin_pool, pool_updates):
        """Updates a node pool.

        :param origin_pool: Provider pool dict to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        try:
            db_pool = self._get_db_obj_until_pending_update(
                self._pool_repo, origin_pool[constants.POOL_ID])
        except tenacity.RetryError as e:
            LOG.warning('Pool did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            db_pool = e.last_attempt.result()

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {constants.POOL_ID: db_pool.id,
                 constants.LISTENERS: listeners_dicts,
                 constants.LOADBALANCER: provider_lb,
                 constants.LOADBALANCER_ID: load_balancer.id,
                 constants.UPDATE_DICT: pool_updates}
        self.run_flow(
            flow_utils.get_update_pool_flow,
            store=store)
Beispiel #6
0
    def update_member(self, member, member_updates):
        """Updates a pool member.

        :param member_id: A member provider dictionary  to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        # TODO(ataraday) when other flows will use dicts - revisit this
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.UPDATE_DICT: member_updates
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_update_member_flow, store=store)
Beispiel #7
0
    def delete(self, id, cascade=False):
        """Deletes a load balancer."""
        context = pecan_request.context.get('octavia_context')
        cascade = strutils.bool_from_string(cascade)
        db_lb = self._get_db_lb(context.session, id, show_deleted=False)

        self._auth_validate_action(context, db_lb.project_id,
                                   constants.RBAC_DELETE)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(db_lb.provider)

        with db_api.get_lock_session() as lock_session:
            if (db_lb.listeners or db_lb.pools) and not cascade:
                msg = _("Cannot delete Load Balancer %s - "
                        "it has children") % id
                LOG.warning(msg)
                raise exceptions.ValidationException(detail=msg)
            self._test_lb_status(lock_session, id,
                                 lb_status=constants.PENDING_DELETE)

            LOG.info("Sending delete Load Balancer %s to provider %s",
                     id, driver.name)
            provider_loadbalancer = (
                driver_utils.db_loadbalancer_to_provider_loadbalancer(
                    db_lb, for_delete=True))
            driver_utils.call_provider(driver.name, driver.loadbalancer_delete,
                                       provider_loadbalancer, cascade)
Beispiel #8
0
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        create_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_create_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb
            })
        with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG):
            create_hm_tf.run()
Beispiel #9
0
    def create_listener(self, listener):
        """Creates a listener.

        :param listener: A listener provider dictionary.
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=listener[constants.LISTENER_ID])
        if not db_listener:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'listener',
                        listener[constants.LISTENER_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_listener.load_balancer
        listeners = load_balancer.listeners
        dict_listeners = []
        for li in listeners:
            dict_listeners.append(
                provider_utils.db_listener_to_provider_listener(li).to_dict())
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {constants.LISTENERS: dict_listeners,
                 constants.LOADBALANCER: provider_lb,
                 constants.LOADBALANCER_ID: load_balancer.id}

        self.services_controller.run_poster(
            flow_utils.get_create_listener_flow,
            store=store)
Beispiel #10
0
    def create_member(self, member):
        """Creates a pool member.

        :param member: A member provider dictionary to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        create_member_tf = self._taskflow_load(
            self._member_flows.get_create_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.POOL_ID: pool.id
            })
        with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG):
            create_member_tf.run()
Beispiel #11
0
def process_get(get_data):
    session = db_api.get_session()

    if get_data[constants.OBJECT] == lib_consts.LOADBALANCERS:
        lb_repo = repositories.LoadBalancerRepository()
        db_lb = lb_repo.get(session, id=get_data[lib_consts.ID],
                            show_deleted=False)
        if db_lb:
            provider_lb = (
                driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb))
            return provider_lb.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.LISTENERS:
        listener_repo = repositories.ListenerRepository()
        db_listener = listener_repo.get(
            session, id=get_data[lib_consts.ID], show_deleted=False)
        if db_listener:
            provider_listener = (
                driver_utils.db_listener_to_provider_listener(db_listener))
            return provider_listener.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.POOLS:
        pool_repo = repositories.PoolRepository()
        db_pool = pool_repo.get(session, id=get_data[lib_consts.ID],
                                show_deleted=False)
        if db_pool:
            provider_pool = (
                driver_utils.db_pool_to_provider_pool(db_pool))
            return provider_pool.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.MEMBERS:
        member_repo = repositories.MemberRepository()
        db_member = member_repo.get(session, id=get_data[lib_consts.ID],
                                    show_deleted=False)
        if db_member:
            provider_member = (
                driver_utils.db_member_to_provider_member(db_member))
            return provider_member.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.HEALTHMONITORS:
        hm_repo = repositories.HealthMonitorRepository()
        db_hm = hm_repo.get(session, id=get_data[lib_consts.ID],
                            show_deleted=False)
        if db_hm:
            provider_hm = (
                driver_utils.db_HM_to_provider_HM(db_hm))
            return provider_hm.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.L7POLICIES:
        l7policy_repo = repositories.L7PolicyRepository()
        db_l7policy = l7policy_repo.get(session, id=get_data[lib_consts.ID],
                                        show_deleted=False)
        if db_l7policy:
            provider_l7policy = (
                driver_utils.db_l7policy_to_provider_l7policy(db_l7policy))
            return provider_l7policy.to_dict(recurse=True, render_unsets=True)
    elif get_data[constants.OBJECT] == lib_consts.L7RULES:
        l7rule_repo = repositories.L7RuleRepository()
        db_l7rule = l7rule_repo.get(session, id=get_data[lib_consts.ID],
                                    show_deleted=False)
        if db_l7rule:
            provider_l7rule = (
                driver_utils.db_l7rule_to_provider_l7rule(db_l7rule))
            return provider_l7rule.to_dict(recurse=True, render_unsets=True)
    return {}
Beispiel #12
0
    def delete(self, id, cascade=False):
        """Deletes a load balancer."""
        context = pecan.request.context.get('octavia_context')
        cascade = strutils.bool_from_string(cascade)
        db_lb = self._get_db_lb(context.session, id, show_deleted=False)

        self._auth_validate_action(context, db_lb.project_id,
                                   constants.RBAC_DELETE)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(db_lb.provider)

        with db_api.get_lock_session() as lock_session:
            if (db_lb.listeners or db_lb.pools) and not cascade:
                msg = _("Cannot delete Load Balancer %s - "
                        "it has children") % id
                LOG.warning(msg)
                raise exceptions.ValidationException(detail=msg)
            self._test_lb_status(lock_session, id,
                                 lb_status=constants.PENDING_DELETE)

            LOG.info("Sending delete Load Balancer %s to provider %s",
                     id, driver.name)
            provider_loadbalancer = (
                driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb))
            driver_utils.call_provider(driver.name, driver.loadbalancer_delete,
                                       provider_loadbalancer, cascade)
Beispiel #13
0
    def create_pool(self, pool):
        """Creates a node pool.

        :param pool: Provider pool dict to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """

        # TODO(ataraday) It seems we need to get db pool here anyway to get
        # proper listeners
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])
        if not db_pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool[constants.POOL_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_pool_flow, store=store)
Beispiel #14
0
    def delete_member(self, member):
        """Deletes a pool member.

        :param member: A member provider dictionary to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        delete_member_tf = self._taskflow_load(
            self._member_flows.get_delete_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.POOL_ID: pool.id,
                constants.PROJECT_ID: load_balancer.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_member_tf, log=LOG):
            delete_member_tf.run()
Beispiel #15
0
    def delete_member(self, member):
        """Deletes a pool member.

        :param member: A member provider dictionary to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_delete_member_flow, store=store)
Beispiel #16
0
    def update_member(self, member, member_updates):
        """Updates a pool member.

        :param member_id: A member provider dictionary  to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        # TODO(ataraday) when other flows will use dicts - revisit this
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        update_member_tf = self._taskflow_load(
            self._member_flows.get_update_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.POOL_ID: pool.id,
                constants.UPDATE_DICT: member_updates
            })
        with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG):
            update_member_tf.run()
Beispiel #17
0
    def create_member(self, member):
        """Creates a pool member.

        :param member: A member provider dictionary to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_create_member_flow, store=store)
Beispiel #18
0
    def delete_pool(self, pool):
        """Deletes a node pool.

        :param pool: Provider pool dict to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))
        load_balancer = db_pool.load_balancer

        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        delete_pool_tf = self._taskflow_load(
            self._pool_flows.get_delete_pool_flow(),
            store={
                constants.POOL_ID: pool[constants.POOL_ID],
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.PROJECT_ID: db_pool.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG):
            delete_pool_tf.run()
Beispiel #19
0
    def create_listener(self, listener):
        """Creates a listener.

        :param listener: A listener provider dictionary.
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=listener[constants.LISTENER_ID])
        if not db_listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener[constants.LISTENER_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_listener.load_balancer
        listeners = load_balancer.listeners
        dict_listeners = []
        for l in listeners:
            dict_listeners.append(
                provider_utils.db_listener_to_provider_listener(l).to_dict())
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        create_listener_tf = self._taskflow_load(
            self._listener_flows.get_create_listener_flow(),
            store={
                constants.LISTENERS: dict_listeners,
                constants.LOADBALANCER: provider_lb,
                constants.LOADBALANCER_ID: load_balancer.id
            })
        with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG):
            create_listener_tf.run()
Beispiel #20
0
    def delete_health_monitor(self, health_monitor):
        """Deletes a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.PROJECT_ID: load_balancer.project_id
            })
        with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG):
            delete_hm_tf.run()
Beispiel #21
0
    def put(self, id, load_balancer):
        """Updates a load balancer."""
        load_balancer = load_balancer.loadbalancer
        context = pecan_request.context.get('octavia_context')
        db_lb = self._get_db_lb(context.session, id, show_deleted=False)

        self._auth_validate_action(context, db_lb.project_id,
                                   constants.RBAC_PUT)

        if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType):
            network_driver = utils.get_network_driver()
            validate.qos_extension_enabled(network_driver)
            if load_balancer.vip_qos_policy_id is not None:
                if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id:
                    validate.qos_policy_exists(load_balancer.vip_qos_policy_id)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(db_lb.provider)

        with db_api.get_lock_session() as lock_session:
            self._test_lb_status(lock_session, id)

            # Prepare the data for the driver data model
            lb_dict = load_balancer.to_dict(render_unsets=False)
            lb_dict['id'] = id
            vip_dict = lb_dict.pop('vip', {})
            lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict)
            if 'qos_policy_id' in vip_dict:
                lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']

            # Also prepare the baseline object data
            old_provider_lb = (
                driver_utils.db_loadbalancer_to_provider_loadbalancer(
                    db_lb, for_delete=True))

            # Dispatch to the driver
            LOG.info("Sending update Load Balancer %s to provider "
                     "%s", id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.loadbalancer_update,
                old_provider_lb,
                driver_dm.LoadBalancer.from_dict(lb_dict))

            db_lb_dict = load_balancer.to_dict(render_unsets=False)
            if 'vip' in db_lb_dict:
                db_vip_dict = db_lb_dict.pop('vip')
                self.repositories.vip.update(lock_session, id, **db_vip_dict)
            if db_lb_dict:
                self.repositories.load_balancer.update(lock_session, id,
                                                       **db_lb_dict)

        # Force SQL alchemy to query the DB, otherwise we get inconsistent
        # results
        context.session.expire_all()
        db_lb = self._get_db_lb(context.session, id)
        result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
        return lb_types.LoadBalancerRootResponse(loadbalancer=result)
Beispiel #22
0
    def put(self, id, load_balancer):
        """Updates a load balancer."""
        load_balancer = load_balancer.loadbalancer
        context = pecan.request.context.get('octavia_context')
        db_lb = self._get_db_lb(context.session, id, show_deleted=False)

        self._auth_validate_action(context, db_lb.project_id,
                                   constants.RBAC_PUT)

        if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType):
            network_driver = utils.get_network_driver()
            validate.qos_extension_enabled(network_driver)
            if load_balancer.vip_qos_policy_id is not None:
                if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id:
                    validate.qos_policy_exists(load_balancer.vip_qos_policy_id)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(db_lb.provider)

        with db_api.get_lock_session() as lock_session:
            self._test_lb_status(lock_session, id)

            # Prepare the data for the driver data model
            lb_dict = load_balancer.to_dict(render_unsets=False)
            lb_dict['id'] = id
            vip_dict = lb_dict.pop('vip', {})
            lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict)
            if 'qos_policy_id' in vip_dict:
                lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']

            # Also prepare the baseline object data
            old_provider_lb = (
                driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb))

            # Dispatch to the driver
            LOG.info("Sending update Load Balancer %s to provider "
                     "%s", id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.loadbalancer_update,
                old_provider_lb,
                driver_dm.LoadBalancer.from_dict(lb_dict))

            db_lb_dict = load_balancer.to_dict(render_unsets=False)
            if 'vip' in db_lb_dict:
                db_vip_dict = db_lb_dict.pop('vip')
                self.repositories.vip.update(lock_session, id, **db_vip_dict)
            if db_lb_dict:
                self.repositories.load_balancer.update(lock_session, id,
                                                       **db_lb_dict)

        # Force SQL alchemy to query the DB, otherwise we get inconsistent
        # results
        context.session.expire_all()
        db_lb = self._get_db_lb(context.session, id)
        result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
        return lb_types.LoadBalancerRootResponse(loadbalancer=result)
Beispiel #23
0
    def batch_update_members(self, old_members, new_members, updated_members):
        updated_members = [(provider_utils.db_member_to_provider_member(
            self._member_repo.get(db_apis.get_session(),
                                  id=m.get(constants.ID))).to_dict(), m)
                           for m in updated_members]
        provider_old_members = [
            provider_utils.db_member_to_provider_member(
                self._member_repo.get(db_apis.get_session(),
                                      id=m.get(constants.ID))).to_dict()
            for m in old_members
        ]
        if old_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=old_members[0][constants.POOL_ID])
        elif new_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=new_members[0][constants.POOL_ID])
        else:
            pool = self._pool_repo.get(
                db_apis.get_session(),
                id=updated_members[0][0][constants.POOL_ID])
        load_balancer = pool.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        batch_update_members_tf = self._taskflow_load(
            self._member_flows.get_batch_update_members_flow(
                provider_old_members, new_members, updated_members),
            store=store)
        with tf_logging.DynamicLoggingListener(batch_update_members_tf,
                                               log=LOG):
            batch_update_members_tf.run()
Beispiel #24
0
 def test_db_loadbalancer_to_provider_loadbalancer(self):
     vip = data_models.Vip(ip_address=self.sample_data.ip_address,
                           network_id=self.sample_data.network_id,
                           port_id=self.sample_data.port_id,
                           subnet_id=self.sample_data.subnet_id)
     test_db_lb = data_models.LoadBalancer(id=1, vip=vip)
     provider_lb = utils.db_loadbalancer_to_provider_loadbalancer(
         test_db_lb)
     ref_provider_lb = driver_dm.LoadBalancer(
         loadbalancer_id=1,
         vip_address=self.sample_data.ip_address,
         vip_network_id=self.sample_data.network_id,
         vip_port_id=self.sample_data.port_id,
         vip_subnet_id=self.sample_data.subnet_id)
     self.assertEqual(ref_provider_lb.to_dict(render_unsets=True),
                      provider_lb.to_dict(render_unsets=True))
Beispiel #25
0
 def test_db_loadbalancer_to_provider_loadbalancer(self):
     vip = data_models.Vip(ip_address=self.sample_data.ip_address,
                           network_id=self.sample_data.network_id,
                           port_id=self.sample_data.port_id,
                           subnet_id=self.sample_data.subnet_id)
     test_db_lb = data_models.LoadBalancer(id=1, vip=vip)
     provider_lb = utils.db_loadbalancer_to_provider_loadbalancer(
         test_db_lb)
     ref_provider_lb = driver_dm.LoadBalancer(
         loadbalancer_id=1,
         vip_address=self.sample_data.ip_address,
         vip_network_id=self.sample_data.network_id,
         vip_port_id=self.sample_data.port_id,
         vip_subnet_id=self.sample_data.subnet_id)
     self.assertEqual(ref_provider_lb.to_dict(render_unsets=True),
                      provider_lb.to_dict(render_unsets=True))
Beispiel #26
0
    def update_health_monitor(self, original_health_monitor,
                              health_monitor_updates):
        """Updates a health monitor.

        :param original_health_monitor: Provider health monitor dict
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        try:
            db_health_monitor = self._get_db_obj_until_pending_update(
                self._health_mon_repo,
                original_health_monitor[constants.HEALTHMONITOR_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_health_monitor = e.last_attempt.result()

        pool = db_health_monitor.pool

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        update_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_update_health_monitor_flow(),
            store={
                constants.HEALTH_MON: original_health_monitor,
                constants.POOL_ID: pool.id,
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb,
                constants.UPDATE_DICT: health_monitor_updates
            })
        with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG):
            update_hm_tf.run()
Beispiel #27
0
 def test_db_loadbalancer_to_provider_loadbalancer(self, mock_get_session,
                                                   mock_get_flavor):
     mock_get_flavor.return_value = {'shaved_ice': 'cherry'}
     vip = data_models.Vip(ip_address=self.sample_data.ip_address,
                           network_id=self.sample_data.network_id,
                           port_id=self.sample_data.port_id,
                           subnet_id=self.sample_data.subnet_id)
     test_db_lb = data_models.LoadBalancer(id=1, flavor_id='2', vip=vip)
     provider_lb = utils.db_loadbalancer_to_provider_loadbalancer(
         test_db_lb)
     ref_provider_lb = driver_dm.LoadBalancer(
         loadbalancer_id=1,
         flavor={'shaved_ice': 'cherry'},
         vip_address=self.sample_data.ip_address,
         vip_network_id=self.sample_data.network_id,
         vip_port_id=self.sample_data.port_id,
         vip_subnet_id=self.sample_data.subnet_id)
     self.assertEqual(ref_provider_lb.to_dict(render_unsets=True),
                      provider_lb.to_dict(render_unsets=True))
Beispiel #28
0
    def execute(self, loadbalancer):
        """Execute post_vip_routine."""
        amps = []
        db_lb = self.loadbalancer_repo.get(
            db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
        timeout_dict = {
            constants.CONN_MAX_RETRIES:
            CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
            CONF.haproxy_amphora.active_connection_rety_interval
        }
        for amp in filter(
                lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
                db_lb.amphorae):

            try:
                interface = self.amphora_driver.get_interface_from_ip(
                    amp, amp.vrrp_ip, timeout_dict=timeout_dict)
            except Exception as e:
                # This can occur when an active/standby LB has no listener
                LOG.error(
                    'Failed to get amphora VRRP interface on amphora '
                    '%s. Skipping this amphora as it is failing due to: '
                    '%s', amp.id, str(e))
                self.amphora_repo.update(db_apis.get_session(),
                                         amp.id,
                                         status=constants.ERROR)
                continue

            self.amphora_repo.update(db_apis.get_session(),
                                     amp.id,
                                     vrrp_interface=interface)
            amps.append(self.amphora_repo.get(db_apis.get_session(),
                                              id=amp.id))
        db_lb.amphorae = amps
        return provider_utils.db_loadbalancer_to_provider_loadbalancer(
            db_lb).to_dict()
Beispiel #29
0
    def test_apply_qos_on_update(self, mock_get_session, mock_get_lb,
                                 mock_get_lb_db, mock_get_net_driver):
        mock_driver = mock.MagicMock()
        mock_get_net_driver.return_value = mock_driver
        net = network_tasks.ApplyQos()
        null_qos_vip = o_data_models.Vip(qos_policy_id=None)
        null_qos_lb = o_data_models.LoadBalancer(
            vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
            amphorae=[AMPS_DATA[0]])
        null_qos_lb_dict = (
            provider_utils.db_loadbalancer_to_provider_loadbalancer(
                null_qos_lb).to_dict())

        tmp_vip_object = o_data_models.Vip(
            qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
        tmp_lb = o_data_models.LoadBalancer(
            vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
            amphorae=[AMPS_DATA[0]])
        pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            tmp_lb).to_dict()
        mock_get_lb.return_value = tmp_lb
        # execute
        update_dict = {'description': 'fool'}
        net.execute(pr_tm_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_once_with(
            t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
        self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        mock_get_lb.reset_mock()
        mock_get_lb.return_value = null_qos_lb
        update_dict = {'vip': {'qos_policy_id': None}}
        net.execute(null_qos_lb_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_once_with(
            None, AMPS_DATA[0].vrrp_port_id)
        self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        update_dict = {'name': '123'}
        net.execute(null_qos_lb_dict, update_dict=update_dict)
        self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        mock_get_lb.reset_mock()
        update_dict = {'description': 'fool'}
        tmp_lb.amphorae = AMPS_DATA
        tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
        mock_get_lb.return_value = tmp_lb
        net.execute(pr_tm_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_with(
            t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
        self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        update_dict = {'description': 'fool',
                       'vip': {
                           'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}}
        tmp_lb.amphorae = AMPS_DATA
        tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
        net.execute(pr_tm_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_with(
            t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
        self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        update_dict = {}
        net.execute(null_qos_lb_dict, update_dict=update_dict)
        self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)

        # revert
        mock_driver.reset_mock()
        mock_get_lb.reset_mock()
        tmp_lb.amphorae = [AMPS_DATA[0]]
        tmp_lb.topology = constants.TOPOLOGY_SINGLE
        update_dict = {'description': 'fool'}
        mock_get_lb_db.return_value = tmp_lb
        net.revert(None, pr_tm_dict, update_dict=update_dict)
        self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        update_dict = {'vip': {'qos_policy_id': None}}
        ori_lb_db = LB2
        ori_lb_db.amphorae = [AMPS_DATA[0]]
        mock_get_lb_db.return_value = ori_lb_db
        net.revert(None, null_qos_lb_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_once_with(
            t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
        self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)

        mock_driver.reset_mock()
        mock_get_lb.reset_mock()
        update_dict = {'vip': {
            'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
        tmp_lb.amphorae = AMPS_DATA
        tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
        ori_lb_db = LB2
        ori_lb_db.amphorae = [AMPS_DATA[0]]
        mock_get_lb_db.return_value = ori_lb_db
        net.revert(None, pr_tm_dict, update_dict=update_dict)
        mock_driver.apply_qos_on_port.assert_called_with(
            t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
        self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
Beispiel #30
0
    def _perform_amphora_failover(self, amp, priority):
        """Internal method to perform failover operations for an amphora.

        :param amp: The amphora to failover
        :param priority: The create priority
        :returns: None
        """
        stored_params = {
            constants.FAILED_AMPHORA: amp.to_dict(),
            constants.LOADBALANCER_ID: amp.load_balancer_id,
            constants.BUILD_TYPE_PRIORITY: priority,
        }

        if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP):
            amp_role = 'master_or_backup'
        elif amp.role == constants.ROLE_STANDALONE:
            amp_role = 'standalone'
        elif amp.role is None:
            amp_role = 'spare'
        else:
            amp_role = 'undefined'

        LOG.info(
            "Perform failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })

        if amp.status == constants.DELETED:
            LOG.warning(
                'Amphora %s is marked DELETED in the database but '
                'was submitted for failover. Deleting it from the '
                'amphora health table to exclude it from health '
                'checks and skipping the failover.', amp.id)
            self._amphora_health_repo.delete(db_apis.get_session(),
                                             amphora_id=amp.id)
            return

        if (CONF.house_keeping.spare_amphora_pool_size
                == 0) and (CONF.nova.enable_anti_affinity is False):
            LOG.warning("Failing over amphora with no spares pool may "
                        "cause delays in failover times while a new "
                        "amphora instance boots.")

        # if we run with anti-affinity we need to set the server group
        # as well
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amp.id)
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            lb).to_dict() if lb else lb
        if CONF.nova.enable_anti_affinity and lb:
            stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
        if lb and lb.flavor_id:
            stored_params[constants.FLAVOR] = (
                self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id))
        else:
            stored_params[constants.FLAVOR] = {}
        if lb and lb.availability_zone:
            stored_params[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), lb.availability_zone))
        else:
            stored_params[constants.AVAILABILITY_ZONE] = {}

        failover_amphora_tf = self._taskflow_load(
            self._amphora_flows.get_failover_flow(role=amp.role,
                                                  load_balancer=provider_lb),
            store=stored_params)

        with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
            failover_amphora_tf.run()

        LOG.info(
            "Successfully completed the failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })
Beispiel #31
0
    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        Note: This expects the load balancer to already be in
        provisioning_status=PENDING_UPDATE state.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises octavia.commom.exceptions.NotFound: The load balancer was not
                                                    found.
        """
        try:
            lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
            if lb is None:
                raise exceptions.NotFound(resource=constants.LOADBALANCER,
                                          id=load_balancer_id)

            # Get the ordered list of amphorae to failover for this LB.
            amps = self._get_amphorae_for_failover(lb)

            if lb.topology == constants.TOPOLOGY_SINGLE:
                if len(amps) != 1:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'one should exist. Repairing.', len(amps),
                        load_balancer_id)
            elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:

                if len(amps) != 2:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'two should exist. Repairing.', len(amps),
                        load_balancer_id)
            else:
                LOG.error(
                    'Unknown load balancer topology found: %s, aborting '
                    'failover!', lb.topology)
                raise exceptions.InvalidTopology(topology=lb.topology)

            # We must provide a topology in the flavor definition
            # here for the amphora to be created with the correct
            # configuration.
            if lb.flavor_id:
                flavor = self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id)
                flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
            else:
                flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}

            provider_lb_dict = (
                provider_utils.db_loadbalancer_to_provider_loadbalancer(
                    lb).to_dict() if lb else lb)

            provider_lb_dict[constants.FLAVOR] = flavor

            stored_params = {
                constants.LOADBALANCER: provider_lb_dict,
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_FAILOVER_PRIORITY,
                constants.SERVER_GROUP_ID: lb.server_group_id,
                constants.LOADBALANCER_ID: lb.id,
                constants.FLAVOR: flavor
            }

            if lb.availability_zone:
                stored_params[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), lb.availability_zone))
            else:
                stored_params[constants.AVAILABILITY_ZONE] = {}

            self.run_flow(flow_utils.get_failover_LB_flow,
                          amps,
                          provider_lb_dict,
                          store=stored_params,
                          wait=True)

            LOG.info('Failover of load balancer %s completed successfully.',
                     lb.id)

        except Exception as e:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception("LB %(lbid)s failover exception: %(exc)s", {
                    'lbid': load_balancer_id,
                    'exc': str(e)
                })
                self._lb_repo.update(db_apis.get_session(),
                                     load_balancer_id,
                                     provisioning_status=constants.ERROR)