Example #1
0
    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info("Start amphora agent configuration update, amphora's id "
                 "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        update_amphora_tf = self._taskflow_load(
            self._amphora_flows.update_amphora_config_flow(),
            store={constants.AMPHORA: amp,
                   constants.FLAVOR: flavor})

        with tf_logging.DynamicLoggingListener(update_amphora_tf,
                                               log=LOG):
            update_amphora_tf.run()
Example #2
0
    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):
        old_members = [self._member_repo.get(db_apis.get_session(), id=mid)
                       for mid in old_member_ids]
        new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
                       for mid in new_member_ids]
        updated_members = [
            (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m)
            for m in updated_members]
        if old_members:
            pool = old_members[0].pool
        elif new_members:
            pool = new_members[0].pool
        else:
            pool = updated_members[0][0].pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        batch_update_members_tf = self._taskflow_load(
            self._member_flows.get_batch_update_members_flow(
                old_members, new_members, updated_members),
            store={constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer,
                   constants.POOL: pool})
        with tf_logging.DynamicLoggingListener(batch_update_members_tf,
                                               log=LOG):
            batch_update_members_tf.run()
Example #3
0
    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """
        try:
            amp = self._amphora_repo.get(db_apis.get_session(),
                                         id=amphora_id)
            if not amp:
                LOG.warning("Could not fetch Amphora %s from DB, ignoring "
                            "failover request.", amphora_id)
                return
            self._perform_amphora_failover(
                amp, constants.LB_CREATE_FAILOVER_PRIORITY)
            if amp.load_balancer_id:
                LOG.info("Mark ACTIVE in DB for load balancer id: %s",
                         amp.load_balancer_id)
                self._lb_repo.update(
                    db_apis.get_session(), amp.load_balancer_id,
                    provisioning_status=constants.ACTIVE)
        except Exception as e:
            try:
                self._lb_repo.update(
                    db_apis.get_session(), amp.load_balancer_id,
                    provisioning_status=constants.ERROR)
            except Exception:
                LOG.error("Unable to revert LB status to ERROR.")
            with excutils.save_and_reraise_exception():
                LOG.error("Failover exception: %s", e)
    def execute(self, loadbalancer):
        """Execute post_vip_routine."""
        amps = []
        timeout_dict = {
            constants.CONN_MAX_RETRIES:
                CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
                CONF.haproxy_amphora.active_connection_rety_interval}
        for amp in six.moves.filter(
            lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
                loadbalancer.amphorae):

            try:
                interface = self.amphora_driver.get_vrrp_interface(
                    amp, timeout_dict=timeout_dict)
            except Exception as e:
                # This can occur when an active/standby LB has no listener
                LOG.error('Failed to get amphora VRRP interface on amphora '
                          '%s. Skipping this amphora as it is failing due to: '
                          '%s', amp.id, str(e))
                self.amphora_repo.update(db_apis.get_session(), amp.id,
                                         status=constants.ERROR)
                continue

            self.amphora_repo.update(db_apis.get_session(), amp.id,
                                     vrrp_interface=interface)
            amps.append(self.amphora_repo.get(db_apis.get_session(),
                                              id=amp.id))
        loadbalancer.amphorae = amps
        return loadbalancer
Example #5
0
    def listener_controller(listener, delete=False, update=False,
                            create=False):
        time.sleep(ASYNC_TIME)
        LOG.info(_LI("Simulating controller operation for listener..."))

        if delete:
            repo.listener.update(db_api.get_session(), listener.id,
                                 operating_status=constants.OFFLINE,
                                 provisioning_status=constants.DELETED)
        elif update:
            db_listener = repo.listener.get(db_api.get_session(),
                                            id=listener.id)
            listener_dict = listener.to_dict()
            listener_dict['operating_status'] = db_listener.operating_status
            repo.listener.update(db_api.get_session(), listener.id,
                                 **listener_dict)
        elif create:
            repo.listener.update(db_api.get_session(), listener.id,
                                 operating_status=constants.ONLINE,
                                 provisioning_status=constants.ACTIVE)
        repo.load_balancer.update(db_api.get_session(),
                                  listener.load_balancer.id,
                                  operating_status=constants.ONLINE,
                                  provisioning_status=constants.ACTIVE)
        LOG.info(_LI("Simulated Controller Handler Thread Complete"))
Example #6
0
    def execute(self, loadbalancer, listener):
        """Mark the load balancer and listener as active in DB."""

        LOG.debug("Mark ACTIVE in DB for load balancer id: %s "
                  "and listener id: %s" % (loadbalancer.id, listener.id))
        self.loadbalancer_repo.update(db_apis.get_session(),
                                      loadbalancer.id,
                                      provisioning_status=constants.ACTIVE)
        self.listener_repo.update(db_apis.get_session(), listener.id,
                                  provisioning_status=constants.ACTIVE)
 def execute(self, loadbalancer):
     """Execute post_vip_routine."""
     amps = []
     for amp in loadbalancer.amphorae:
         # Currently this is supported only with REST Driver
         interface = self.amphora_driver.get_vrrp_interface(amp)
         self.amphora_repo.update(db_apis.get_session(), amp.id,
                                  vrrp_interface=interface)
         amps.append(self.amphora_repo.get(db_apis.get_session(),
                                           id=amp.id))
     loadbalancer.amphorae = amps
     return loadbalancer
Example #8
0
    def execute(self, loadbalancer, listeners):
        """Mark the load balancer and listeners as active in DB."""

        LOG.debug("Mark ACTIVE in DB for load balancer id: %s "
                  "and listener ids: %s", loadbalancer.id,
                  ', '.join([l.id for l in listeners]))
        self.loadbalancer_repo.update(db_apis.get_session(),
                                      loadbalancer.id,
                                      provisioning_status=constants.ACTIVE)
        for listener in listeners:
            self.listener_repo.update(db_apis.get_session(), listener.id,
                                      provisioning_status=constants.ACTIVE)
Example #9
0
    def revert(self, loadbalancer, listener, *args, **kwargs):
        """Mark the load balancer and listener as broken."""

        LOG.warn(_LW("Reverting mark load balancer "
                     "and listener active in DB "
                     "for load balancer id %(LB)s and "
                     "listener id: %(list)s"),
                 {'LB': loadbalancer.id, 'list': listener.id})
        self.loadbalancer_repo.update(db_apis.get_session(),
                                      loadbalancer.id,
                                      provisioning_status=constants.ERROR)
        self.listener_repo.update(db_apis.get_session(), listener.id,
                                  provisioning_status=constants.ERROR)
Example #10
0
 def execute(self, loadbalancer):
     """Execute post_vip_routine."""
     amps = []
     for amp in six.moves.filter(
         lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
             loadbalancer.amphorae):
                 # Currently this is supported only with REST Driver
                 interface = self.amphora_driver.get_vrrp_interface(amp)
                 self.amphora_repo.update(db_apis.get_session(), amp.id,
                                          vrrp_interface=interface)
                 amps.append(self.amphora_repo.get(db_apis.get_session(),
                                                   id=amp.id))
     loadbalancer.amphorae = amps
     return loadbalancer
Example #11
0
    def revert(self, listener, *args, **kwargs):
        """Mark the listener as broken and ready to be cleaned up."""

        LOG.warn(_LW("Reverting mark listener pending delete in DB "
                     "for listener id %s"), listener.id)
        self.listener_repo.update(db_apis.get_session(), listener.id,
                                  provisioning_status=constants.ERROR)
Example #12
0
    def post(self, flavor_):
        """Creates a flavor."""
        flavor = flavor_.flavor
        context = pecan.request.context.get('octavia_context')
        self._auth_validate_action(context, context.project_id,
                                   constants.RBAC_POST)

        # TODO(johnsom) Validate the flavor profile ID

        lock_session = db_api.get_session(autocommit=False)
        try:
            flavor_dict = flavor.to_dict(render_unsets=True)
            flavor_dict['id'] = uuidutils.generate_uuid()
            db_flavor = self.repositories.flavor.create(lock_session,
                                                        **flavor_dict)
            lock_session.commit()
        except odb_exceptions.DBDuplicateEntry:
            lock_session.rollback()
            raise exceptions.RecordAlreadyExists(field='flavor',
                                                 name=flavor.name)
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()
        result = self._convert_db_to_type(db_flavor,
                                          flavor_types.FlavorResponse)
        return flavor_types.FlavorRootResponse(flavor=result)
Example #13
0
 def delete(self, id):
     """Deletes a listener from a load balancer."""
     session = db_api.get_session()
     db_listener = self.repositories.listener.get(session, id=id)
     if not db_listener:
         LOG.info(_LI("Listener %s not found.") % id)
         raise exceptions.NotFound(
             resource=data_models.Listener._name(), id=id)
     # Verify load balancer is in a mutable status.  If so it can be assumed
     # that the listener is also in a mutable status because a load balancer
     # will only be ACTIVE when all it's listeners as ACTIVE.
     if not self.repositories.test_and_set_lb_and_listener_prov_status(
             session, self.load_balancer_id, id, constants.PENDING_UPDATE,
             constants.PENDING_DELETE):
         lb_repo = self.repositories.load_balancer
         db_lb = lb_repo.get(session, id=self.load_balancer_id)
         raise exceptions.ImmutableObject(resource=db_lb._name(),
                                          id=self.load_balancer_id)
     db_listener = self.repositories.listener.get(session, id=id)
     try:
         LOG.info(_LI("Sending Deletion of Listener %s to handler") %
                  db_listener.id)
         self.handler.delete(db_listener)
     except Exception:
         with excutils.save_and_reraise_exception(reraise=False):
             self.repositories.listener.update(
                 session, db_listener.id,
                 provisioning_status=constants.ERROR)
     db_listener = self.repositories.listener.get(
         session, id=db_listener.id)
     return self._convert_db_to_type(db_listener,
                                     listener_types.ListenerResponse)
Example #14
0
    def create_health_monitor(self, pool_id):
        """Creates a health monitor.

        :param pool_id: ID of the pool to create a health monitor on
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               pool_id=pool_id)

        listener = health_mon.pool.listener
        health_mon.pool.health_monitor = health_mon
        listener.default_pool = health_mon.pool
        vip = health_mon.pool.listener.load_balancer.vip
        load_balancer = health_mon.pool.listener.load_balancer

        create_hm_tf = self._taskflow_load(self._health_monitor_flows.
                                           get_create_health_monitor_flow(),
                                           store={'health_mon': health_mon,
                                                  'listener': listener,
                                                  'loadbalancer':
                                                      load_balancer,
                                                  'vip': vip})
        with tf_logging.DynamicLoggingListener(create_hm_tf,
                                               log=LOG):
            create_hm_tf.run()
Example #15
0
 def execute(self, amps_data):
     for amp_data in amps_data:
         self.repos.amphora.update(db_apis.get_session(), amp_data.id,
                                   vrrp_ip=amp_data.vrrp_ip,
                                   ha_ip=amp_data.ha_ip,
                                   vrrp_port_id=amp_data.vrrp_port_id,
                                   ha_port_id=amp_data.ha_port_id)
Example #16
0
    def execute(self, listener):
        """Mark the listener as pending delete in DB."""

        LOG.debug("Mark PENDING DELETE in DB for listener id: %s" %
                  listener.id)
        self.listener_repo.update(db_apis.get_session(), listener.id,
                                  provisioning_status=constants.PENDING_DELETE)
Example #17
0
 def revert(self, result, amphora, *args, **kwargs):
     """Handle a failed post network plug."""
     if isinstance(result, failure.Failure):
         return
     LOG.warning(_LW("Reverting post network plug."))
     self.amphora_repo.update(db_apis.get_session(), id=amphora.id,
                              status=constants.ERROR)
Example #18
0
 def get_all(self):
     """Lists all listeners on a load balancer."""
     session = db_api.get_session()
     db_listeners = self.repositories.listener.get_all(
         session, load_balancer_id=self.load_balancer_id)
     return self._convert_db_to_type(db_listeners,
                                     [listener_types.ListenerResponse])
Example #19
0
 def remove_all_from_build_req_queue(self):
     session = db_apis.get_session()
     with session.begin(subtransactions=True):
         self.amp_build_req_repo.delete_all(session)
         self.amp_build_slots_repo.update_count(session, action='reset')
         LOG.debug("Removed all the build requests and "
                   "released the build slots")
Example #20
0
    def post(self, flavor_profile_):
        """Creates a flavor Profile."""
        flavorprofile = flavor_profile_.flavorprofile
        context = pecan.request.context.get('octavia_context')
        self._auth_validate_action(context, context.project_id,
                                   constants.RBAC_POST)
        # Do a basic JSON validation on the metadata
        try:
            flavor_data_dict = jsonutils.loads(flavorprofile.flavor_data)
        except Exception:
            raise exceptions.InvalidOption(
                value=flavorprofile.flavor_data,
                option=constants.FLAVOR_DATA)

        # Validate that the provider driver supports the metadata
        driver = driver_factory.get_driver(flavorprofile.provider_name)
        driver_utils.call_provider(driver.name, driver.validate_flavor,
                                   flavor_data_dict)

        lock_session = db_api.get_session(autocommit=False)
        try:
            flavorprofile_dict = flavorprofile.to_dict(render_unsets=True)
            flavorprofile_dict['id'] = uuidutils.generate_uuid()
            db_flavor_profile = self.repositories.flavor_profile.create(
                lock_session, **flavorprofile_dict)
            lock_session.commit()
        except odb_exceptions.DBDuplicateEntry:
            lock_session.rollback()
            raise exceptions.IDAlreadyExists()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()
        result = self._convert_db_to_type(
            db_flavor_profile, profile_types.FlavorProfileResponse)
        return profile_types.FlavorProfileRootResponse(flavorprofile=result)
Example #21
0
 def add_to_build_request_queue(self, amphora_id, build_priority):
     self.amp_build_req_repo.add_to_build_queue(
         db_apis.get_session(),
         amphora_id=amphora_id,
         priority=build_priority)
     LOG.debug("Added build request for %s to the queue", amphora_id)
     self.wait_for_build_slot(amphora_id)
Example #22
0
    def execute(self, loadbalancer):
        """Mark the load balancer as active in DB."""

        if self.mark_listeners:
            LOG.debug("Marking all listeners of loadbalancer %s ACTIVE",
                      loadbalancer.id)
            for listener in loadbalancer.listeners:
                self.listener_repo.update(db_apis.get_session(),
                                          listener.id,
                                          provisioning_status=constants.ACTIVE)

        LOG.info(_LI("Mark ACTIVE in DB for load balancer id: %s"),
                 loadbalancer.id)
        self.loadbalancer_repo.update(db_apis.get_session(),
                                      loadbalancer.id,
                                      provisioning_status=constants.ACTIVE)
Example #23
0
 def revert(self, loadbalancer_id, server_group_id, *args, **kwargs):
     LOG.warning(_LW('Reverting Server Group updated with id: %(s1)s for '
                     'load balancer id: %(s2)s '),
                 {'s1': server_group_id, 's2': loadbalancer_id})
     self.loadbalancer_repo.update(db_apis.get_session(),
                                   id=loadbalancer_id,
                                   server_group_id=None)
Example #24
0
 def remove_from_build_req_queue(self, amphora_id):
     session = db_apis.get_session()
     with session.begin(subtransactions=True):
         self.amp_build_req_repo.delete(session, amphora_id=amphora_id)
         self.amp_build_slots_repo.update_count(session, action='decrement')
         LOG.debug("Removed request for %s from queue"
                   " and released the build slot", amphora_id)
Example #25
0
    def revert(self, listener, *args, **kwargs):
        """Handle a failed listener start."""

        LOG.warning(_LW("Reverting listener start."))
        self.listener_repo.update(db_apis.get_session(), id=listener.id,
                                  provisioning_status=constants.ERROR)
        return None
Example #26
0
    def update_pool(self, pool_id, pool_updates):
        """Updates a node pool.

        :param pool_id: ID of the pool to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=pool_id)

        listener = pool.listener
        listener.default_pool = pool
        load_balancer = listener.load_balancer
        vip = listener.load_balancer.vip

        update_pool_tf = self._taskflow_load(self._pool_flows.
                                             get_update_pool_flow(),
                                             store={'pool': pool,
                                                    'listener': listener,
                                                    'loadbalancer':
                                                        load_balancer,
                                                    'vip': vip,
                                                    'update_dict':
                                                        pool_updates})
        with tf_logging.DynamicLoggingListener(update_pool_tf,
                                               log=LOG):
            update_pool_tf.run()
Example #27
0
    def create_pool(self, pool_id):
        """Creates a node pool.

        :param pool_id: ID of the pool to create
        :returns: None
        :raises NoSuitableLB: Unable to find the load balancer
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=pool_id)

        listener = pool.listener
        listener.default_pool = pool
        load_balancer = listener.load_balancer
        vip = listener.load_balancer.vip

        create_pool_tf = self._taskflow_load(self._pool_flows.
                                             get_create_pool_flow(),
                                             store={'pool': pool,
                                                    'listener': listener,
                                                    'loadbalancer':
                                                        load_balancer,
                                                    'vip': vip})
        with tf_logging.DynamicLoggingListener(create_pool_tf,
                                               log=LOG):
            create_pool_tf.run()
Example #28
0
    def update_member(self, member_id, member_updates):
        """Updates a pool member.

        :param member_id: ID of the member to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = self._member_repo.get(db_apis.get_session(),
                                       id=member_id)

        listener = member.pool.listener
        listener.default_pool = member.pool
        load_balancer = listener.load_balancer
        vip = listener.load_balancer.vip

        update_member_tf = self._taskflow_load(self._member_flows.
                                               get_update_member_flow(),
                                               store={'member': member,
                                                      'listener': listener,
                                                      'loadbalancer':
                                                          load_balancer,
                                                      'vip': vip,
                                                      'update_dict':
                                                          member_updates})
        with tf_logging.DynamicLoggingListener(update_member_tf,
                                               log=LOG):
            update_member_tf.run()
Example #29
0
    def create_member(self, member_id):
        """Creates a pool member.

        :param member_id: ID of the member to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        member = self._member_repo.get(db_apis.get_session(),
                                       id=member_id)

        listener = member.pool.listener
        listener.default_pool = member.pool
        load_balancer = listener.load_balancer
        vip = listener.load_balancer.vip

        create_member_tf = self._taskflow_load(self._member_flows.
                                               get_create_member_flow(),
                                               store={'member': member,
                                                      'listener': listener,
                                                      'loadbalancer':
                                                          load_balancer,
                                                      'vip': vip})
        with tf_logging.DynamicLoggingListener(create_member_tf,
                                               log=LOG):
            create_member_tf.run()
Example #30
0
    def _lookup(self, pool_id, *remainder):
        """Overriden pecan _lookup method for custom routing.

        Verifies that the pool passed in the url exists, and if so decides
        which controller, if any, should control be passed.
        """
        session = db_api.get_session()
        if pool_id and len(remainder) and remainder[0] == 'members':
            remainder = remainder[1:]
            db_pool = self.repositories.pool.get(session, id=pool_id)
            if not db_pool:
                LOG.info(_LI("Pool %s not found.") % pool_id)
                raise exceptions.NotFound(resource=data_models.Pool._name(),
                                          id=pool_id)
            return member.MembersController(
                load_balancer_id=self.load_balancer_id,
                listener_id=self.listener_id,
                pool_id=db_pool.id), remainder
        if pool_id and len(remainder) and remainder[0] == 'healthmonitor':
            remainder = remainder[1:]
            db_pool = self.repositories.pool.get(session, id=pool_id)
            if not db_pool:
                LOG.info(_LI("Pool %s not found.") % pool_id)
                raise exceptions.NotFound(resource=data_models.Pool._name(),
                                          id=pool_id)
            return health_monitor.HealthMonitorController(
                load_balancer_id=self.load_balancer_id,
                listener_id=self.listener_id,
                pool_id=db_pool.id), remainder
Example #31
0
    def create_member(self, member_id):
        """Creates a pool member.

        :param member_id: ID of the member to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        member = self._member_repo.get(db_apis.get_session(),
                                       id=member_id)
        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        create_member_tf = self._taskflow_load(self._member_flows.
                                               get_create_member_flow(),
                                               store={constants.MEMBER: member,
                                                      constants.LISTENERS:
                                                          listeners,
                                                      constants.LOADBALANCER:
                                                          load_balancer,
                                                      constants.POOL: pool})
        with tf_logging.DynamicLoggingListener(create_member_tf,
                                               log=LOG):
            create_member_tf.run()
Example #32
0
    def create_load_balancer(self, load_balancer_id, flavor=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param load_balancer_id: ID of the load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if not lb:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        # TODO(johnsom) convert this to octavia_lib constant flavor
        # once octavia is transitioned to use octavia_lib
        store = {constants.LOADBALANCER_ID: load_balancer_id,
                 constants.BUILD_TYPE_PRIORITY:
                 constants.LB_CREATE_NORMAL_PRIORITY,
                 constants.FLAVOR: flavor}

        topology = lb.topology

        store[constants.UPDATE_DICT] = {
            constants.TOPOLOGY: topology
        }

        create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
            topology=topology, listeners=lb.listeners)

        create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
            create_lb_tf.run()
Example #33
0
    def create_pool(self, pool):
        """Creates a node pool.

        :param pool: Provider pool dict to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """

        # TODO(ataraday) It seems we need to get db pool here anyway to get
        # proper listeners
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])
        if not db_pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool[constants.POOL_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        create_pool_tf = self._taskflow_load(
            self._pool_flows.get_create_pool_flow(),
            store={
                constants.POOL_ID: pool[constants.POOL_ID],
                constants.LISTENERS: listeners_dicts,
                constants.LOADBALANCER_ID: load_balancer.id,
                constants.LOADBALANCER: provider_lb
            })
        with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG):
            create_pool_tf.run()
    def create_listener(self, listener_id):
        """Function to create listener for A10 provider"""

        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        load_balancer = listener.load_balancer
        parent_project_list = utils.get_parent_project_list()
        listener_parent_proj = utils.get_parent_project(listener.project_id)

        if (listener.project_id in parent_project_list
                or (listener_parent_proj
                    and listener_parent_proj in parent_project_list)
                or listener.project_id in CONF.hardware_thunder.devices):
            create_listener_tf = self._taskflow_load(
                self._listener_flows.get_rack_vthunder_create_listener_flow(
                    listener.project_id),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })
        else:
            create_listener_tf = self._taskflow_load(
                self._listener_flows.get_create_listener_flow(),
                store={
                    constants.LOADBALANCER: load_balancer,
                    constants.LISTENER: listener
                })

        with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG):
            create_listener_tf.run()
Example #35
0
    def create_amphora(self, availability_zone=None):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: uuid
        """
        try:
            store = {
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY,
                constants.FLAVOR: None,
                constants.SERVER_GROUP_ID: None,
                constants.AVAILABILITY_ZONE: None
            }
            if availability_zone:
                store[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), availability_zone))
            self.run_flow(flow_utils.get_create_amphora_flow,
                          store=store,
                          wait=True)
        except Exception as e:
            LOG.error('Failed to create an amphora due to: %s', str(e))
Example #36
0
    def post(self, member_):
        """Creates a pool member on a pool."""
        member = member_.member
        context = pecan.request.context.get('octavia_context')
        # Validate member subnet
        if member.subnet_id and not validate.subnet_exists(member.subnet_id):
            raise exceptions.NotFound(resource='Subnet',
                                      id=member.subnet_id)
        pool = self.repositories.pool.get(context.session, id=self.pool_id)
        member.project_id = self._get_lb_project_id(context.session,
                                                    pool.load_balancer_id)

        self._auth_validate_action(context, member.project_id,
                                   constants.RBAC_POST)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.Member,
                    member.project_id):
                raise exceptions.QuotaException

            member_dict = db_prepare.create_member(member.to_dict(
                render_unsets=True), self.pool_id, bool(pool.health_monitor))

            self._test_lb_and_listener_and_pool_statuses(lock_session)

            db_member = self._validate_create_member(lock_session, member_dict)
            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        return self._send_member_to_handler(context.session, db_member)
Example #37
0
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        if not db_health_monitor:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'healthmonitor',
                health_monitor[constants.HEALTHMONITOR_ID])
            raise db_exceptions.NoResultFound

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_health_monitor_flow, store=store)
Example #38
0
def lb_dict_to_provider_dict(lb_dict,
                             vip=None,
                             db_pools=None,
                             db_listeners=None,
                             for_delete=False):
    new_lb_dict = _base_to_provider_dict(lb_dict, include_project_id=True)
    new_lb_dict['loadbalancer_id'] = new_lb_dict.pop('id')
    if vip:
        new_lb_dict['vip_address'] = vip.ip_address
        new_lb_dict['vip_network_id'] = vip.network_id
        new_lb_dict['vip_port_id'] = vip.port_id
        new_lb_dict['vip_subnet_id'] = vip.subnet_id
        new_lb_dict['vip_qos_policy_id'] = vip.qos_policy_id
    if 'flavor_id' in lb_dict and lb_dict['flavor_id']:
        flavor_repo = repositories.FlavorRepository()
        new_lb_dict['flavor'] = flavor_repo.get_flavor_metadata_dict(
            db_api.get_session(), lb_dict['flavor_id'])
    if db_pools:
        new_lb_dict['pools'] = db_pools_to_provider_pools(
            db_pools, for_delete=for_delete)
    if db_listeners:
        new_lb_dict['listeners'] = db_listeners_to_provider_listeners(
            db_listeners, for_delete=for_delete)
    return new_lb_dict
Example #39
0
    def delete_health_monitor(self, health_monitor_id):
        """Deletes a health monitor.

        :param pool_id: ID of the pool to delete its health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)

        pool = health_mon.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={
                constants.HEALTH_MON: health_mon,
                constants.POOL: pool,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer
            })
        with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG):
            delete_hm_tf.run()
Example #40
0
    def member_batch_update(self, pool_id, members):
        pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)

        old_members = pool.members

        old_member_ids = [m.id for m in old_members]
        # The driver will always pass objects with IDs.
        new_member_ids = [m.member_id for m in members]

        # Find members that are brand new or updated
        new_members = []
        updated_members = []
        for m in members:
            if m.member_id not in old_member_ids:
                new_members.append(m)
            else:
                member_dict = m.to_dict(render_unsets=False)
                member_dict['id'] = member_dict.pop('member_id')
                if 'address' in member_dict:
                    member_dict['ip_address'] = member_dict.pop('address')
                if 'admin_state_up' in member_dict:
                    member_dict['enabled'] = member_dict.pop('admin_state_up')
                updated_members.append(member_dict)

        # Find members that are deleted
        deleted_members = []
        for m in old_members:
            if m.id not in new_member_ids:
                deleted_members.append(m)

        payload = {
            'old_member_ids': [m.id for m in deleted_members],
            'new_member_ids': [m.member_id for m in new_members],
            'updated_members': updated_members
        }
        self.client.cast({}, 'batch_update_members', **payload)
Example #41
0
    def delete_old_amphorae(self):
        """Checks the DB for old amphora and deletes them based on its age."""
        exp_age = datetime.timedelta(
            seconds=CONF.house_keeping.amphora_expiry_age)

        session = db_api.get_session()
        amp_ids = self.amp_repo.get_all_deleted_expiring(session,
                                                         exp_age=exp_age)

        for amp_id in amp_ids:
            # If we're here, we already think the amp is expiring according to
            # the amphora table. Now check it is expired in the health table.
            # In this way, we ensure that amps aren't deleted unless they are
            # both expired AND no longer receiving zombie heartbeats.
            if self.amp_health_repo.check_amphora_health_expired(
                    session, amp_id, exp_age):
                LOG.debug('Attempting to purge db record for Amphora ID: %s',
                          amp_id)
                self.amp_repo.delete(session, id=amp_id)
                try:
                    self.amp_health_repo.delete(session, amphora_id=amp_id)
                except sqlalchemy_exceptions.NoResultFound:
                    pass  # Best effort delete, this record might not exist
                LOG.info('Purged db record for Amphora ID: %s', amp_id)
Example #42
0
    def update_l7policy(self, l7policy_id, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy_id: ID of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_update_l7policy_flow(),
            store={
                constants.L7POLICY: l7policy,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.UPDATE_DICT: l7policy_updates
            })
        with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG):
            update_l7policy_tf.run()
Example #43
0
    def create_amphora(self, availability_zone=None):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: uuid
        """
        try:
            store = {
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY,
                constants.FLAVOR: None,
                constants.AVAILABILITY_ZONE: None
            }
            if availability_zone:
                store[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), availability_zone))
            job_id = self.services_controller.run_poster(
                flow_utils.get_create_amphora_flow, store=store, wait=True)

            return job_id
        except Exception as e:
            LOG.error('Failed to create an amphora due to: {}'.format(str(e)))
    def create_load_balancer(self, load_balancer_id):
        """Function to create load balancer for A10 provider"""

        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        store = {
            constants.LOADBALANCER_ID: load_balancer_id,
            constants.VIP: lb.vip,
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY
        }

        topology = CONF.a10_controller_worker.loadbalancer_topology

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}

        if lb.project_id in CONF.hardware_thunder.devices:
            create_lb_flow = self._lb_flows.get_create_rack_vthunder_load_balancer_flow(
                vthunder_conf=CONF.hardware_thunder.devices[lb.project_id],
                topology=topology,
                listeners=lb.listeners)
            create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        else:
            create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
                topology=topology, listeners=lb.listeners)
            create_lb_tf = self._taskflow_load(create_lb_flow, store=store)

        with tf_logging.DynamicLoggingListener(
                create_lb_tf,
                log=LOG,
                hide_inputs_outputs_of=self._exclude_result_logging_tasks):
            create_lb_tf.run()
Example #45
0
    def delete_load_balancer(self, load_balancer, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer: Dict of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        loadbalancer_id = load_balancer[constants.LOADBALANCER_ID]
        db_lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer_id)
        store = {constants.LOADBALANCER: load_balancer,
                 constants.LOADBALANCER_ID: loadbalancer_id,
                 constants.SERVER_GROUP_ID: db_lb.server_group_id,
                 constants.PROJECT_ID: db_lb.project_id}
        if cascade:
            listeners = flow_utils.get_listeners_on_lb(db_lb)
            pools = flow_utils.get_pools_on_lb(db_lb)

            self.run_flow(
                flow_utils.get_cascade_delete_load_balancer_flow,
                load_balancer, listeners, pools, store=store)
        else:
            self.run_flow(
                flow_utils.get_delete_load_balancer_flow,
                load_balancer, store=store)
Example #46
0
    def update_member(self, member_id, member_updates):
        """Updates a pool member.

        :param member_id: ID of the member to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = self._member_repo.get(db_apis.get_session(), id=member_id)
        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        update_member_tf = self._taskflow_load(
            self._member_flows.get_update_member_flow(),
            store={
                constants.MEMBER: member,
                constants.LISTENERS: listeners,
                constants.LOADBALANCER: load_balancer,
                constants.POOL: pool,
                constants.UPDATE_DICT: member_updates
            })
        with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG):
            update_member_tf.run()
Example #47
0
    def pool_controller(pool, delete=False, update=False, create=False):
        time.sleep(ASYNC_TIME)
        LOG.info("Simulating controller operation for pool...")

        db_pool = None
        if delete:
            db_pool = repo.pool.get(db_api.get_session(), id=pool.id)
            repo.pool.delete(db_api.get_session(), id=pool.id)
        elif update:
            db_pool = repo.pool.get(db_api.get_session(), id=pool.id)
            pool_dict = pool.to_dict()
            pool_dict['operating_status'] = db_pool.operating_status
            repo.update_pool_and_sp(db_api.get_session(), pool.id, pool_dict)
        elif create:
            repo.pool.update(db_api.get_session(),
                             pool.id,
                             operating_status=constants.ONLINE)
        listeners = []
        if db_pool:
            for listener in db_pool.listeners:
                if listener not in listeners:
                    listeners.append(listener)
        if pool.listeners:
            for listener in pool.listeners:
                if listener not in listeners:
                    listeners.append(listener)
        if listeners:
            for listener in listeners:
                repo.listener.update(db_api.get_session(),
                                     listener.id,
                                     operating_status=constants.ONLINE,
                                     provisioning_status=constants.ACTIVE)
        repo.load_balancer.update(db_api.get_session(),
                                  pool.load_balancer.id,
                                  operating_status=constants.ONLINE,
                                  provisioning_status=constants.ACTIVE)
        LOG.info("Simulated Controller Handler Thread Complete")
Example #48
0
    def member_controller(member, delete=False, update=False, create=False):
        time.sleep(ASYNC_TIME)
        LOG.info(_LI("Simulating controller operation for member..."))

        db_mem = None
        if delete:
            db_mem = repo.member.get(db_api.get_session(), member.id)
            repo.member.delete(db_api.get_session(), id=member.id)
        elif update:
            db_mem = repo.member.get(db_api.get_session(), member.id)
            member_dict = member.to_dict()
            member_dict['operating_status'] = db_mem.operating_status
            repo.member.update(db_api.get_session(), member.id, **member_dict)
        elif create:
            repo.member.update(db_api.get_session(),
                               member.id,
                               operating_status=constants.ONLINE)
        listeners = []
        if db_mem:
            for listener in db_mem.pool.listeners:
                if listener not in listeners:
                    listeners.append(listener)
        if member.pool.listeners:
            for listener in member.pool.listeners:
                if listener not in listeners:
                    listeners.append(listener)
        if listeners:
            for listener in listeners:
                repo.listener.update(db_api.get_session(),
                                     listener.id,
                                     operating_status=constants.ONLINE,
                                     provisioning_status=constants.ACTIVE)
        repo.load_balancer.update(db_api.get_session(),
                                  member.pool.load_balancer.id,
                                  operating_status=constants.ONLINE,
                                  provisioning_status=constants.ACTIVE)
        LOG.info(_LI("Simulated Controller Handler Thread Complete"))
Example #49
0
    def l7rule_controller(l7rule, delete=False, update=False, create=False):
        time.sleep(ASYNC_TIME)
        LOG.info(_LI("Simulating controller operation for l7rule..."))

        db_l7rule = None
        if delete:
            db_l7rule = repo.l7rule.get(db_api.get_session(), l7rule.id)
            repo.l7rule.delete(db_api.get_session(), id=l7rule.id)
        elif update:
            db_l7rule = repo.l7rule.get(db_api.get_session(), l7rule.id)
            l7rule_dict = l7rule.to_dict()
            repo.l7rule.update(db_api.get_session(), l7rule.id, **l7rule_dict)
        elif create:
            db_l7rule = repo.l7rule.create(db_api.get_session(), **l7rule_dict)
        if db_l7rule.l7policy.listener:
            listener = db_l7rule.l7policy.listener
            repo.listener.update(db_api.get_session(), listener.id,
                                 operating_status=constants.ONLINE,
                                 provisioning_status=constants.ACTIVE)
            repo.load_balancer.update(db_api.get_session(),
                                      listener.load_balancer.id,
                                      operating_status=constants.ONLINE,
                                      provisioning_status=constants.ACTIVE)
        LOG.info(_LI("Simulated Controller Handler Thread Complete"))
Example #50
0
 def _set_lb_and_children_statuses(self, lb_id, prov_status, op_status):
     self.lb_repo.update(db_api.get_session(),
                         lb_id,
                         provisioning_status=prov_status,
                         operating_status=op_status)
     lb_listeners = self.listener_repo.get_all(db_api.get_session(),
                                               load_balancer_id=lb_id)
     for listener in lb_listeners:
         self.listener_repo.update(db_api.get_session(),
                                   listener.id,
                                   provisioning_status=prov_status,
                                   operating_status=op_status)
     lb_pools = self.pool_repo.get_all(db_api.get_session(),
                                       load_balancer_id=lb_id)
     for pool in lb_pools:
         self.pool_repo.update(db_api.get_session(),
                               pool.id,
                               provisioning_status=prov_status,
                               operating_status=op_status)
         for member in pool.members:
             self.member_repo.update(db_api.get_session(),
                                     member.id,
                                     operating_status=op_status)
Example #51
0
    def put(self, id, availability_zone_profile_):
        """Updates an Availability Zone Profile."""
        availability_zone_profile = (
            availability_zone_profile_.availability_zone_profile)
        context = pecan_request.context.get('octavia_context')
        self._auth_validate_action(context, context.project_id,
                                   constants.RBAC_PUT)

        self._validate_update_azp(context, id, availability_zone_profile)
        if id == constants.NIL_UUID:
            raise exceptions.NotFound(resource='Availability Zone Profile',
                                      id=constants.NIL_UUID)

        if not isinstance(availability_zone_profile.availability_zone_data,
                          wtypes.UnsetType):
            # Do a basic JSON validation on the metadata
            try:
                availability_zone_data_dict = jsonutils.loads(
                    availability_zone_profile.availability_zone_data)
            except Exception:
                raise exceptions.InvalidOption(
                    value=availability_zone_profile.availability_zone_data,
                    option=constants.FLAVOR_DATA)

            if isinstance(availability_zone_profile.provider_name,
                          wtypes.UnsetType):
                db_availability_zone_profile = (
                    self._get_db_availability_zone_profile(
                        context.session, id))
                provider_driver = db_availability_zone_profile.provider_name
            else:
                provider_driver = availability_zone_profile.provider_name

            # Validate that the provider driver supports the metadata
            driver = driver_factory.get_driver(provider_driver)
            driver_utils.call_provider(
                driver.name, driver.validate_availability_zone,
                availability_zone_data_dict)

        lock_session = db_api.get_session(autocommit=False)
        try:
            availability_zone_profile_dict = availability_zone_profile.to_dict(
                render_unsets=False)
            if availability_zone_profile_dict:
                self.repositories.availability_zone_profile.update(
                    lock_session, id, **availability_zone_profile_dict)
            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        # Force SQL alchemy to query the DB, otherwise we get inconsistent
        # results
        context.session.expire_all()
        db_availability_zone_profile = self._get_db_availability_zone_profile(
            context.session, id)
        result = self._convert_db_to_type(
            db_availability_zone_profile,
            profile_types.AvailabilityZoneProfileResponse)
        return profile_types.AvailabilityZoneProfileRootResponse(
            availability_zone_profile=result)
Example #52
0
    def post(self, load_balancer):
        """Creates a load balancer."""
        load_balancer = load_balancer.loadbalancer
        context = pecan_request.context.get('octavia_context')

        if not load_balancer.project_id and context.project_id:
            load_balancer.project_id = context.project_id

        if not load_balancer.project_id:
            raise exceptions.ValidationException(detail=_(
                "Missing project ID in request where one is required. "
                "An administrator should check the keystone settings "
                "in the Octavia configuration."))

        self._auth_validate_action(context, load_balancer.project_id,
                                   constants.RBAC_POST)

        self._validate_vip_request_object(load_balancer, context=context)

        self._validate_flavor(context.session, load_balancer)

        self._validate_availability_zone(context.session, load_balancer)

        provider = self._get_provider(context.session, load_balancer)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.LoadBalancer,
                    load_balancer.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.LoadBalancer._name())

            db_lb, db_pools, db_lists = None, None, None

            lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict(
                render_unsets=False
            ))
            vip_dict = lb_dict.pop('vip', {})

            # Make sure we store the right provider in the DB
            lb_dict['provider'] = driver.name

            # NoneType can be weird here, have to force type a second time
            listeners = lb_dict.pop('listeners', []) or []
            pools = lb_dict.pop('pools', []) or []

            flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver,
                                                        lb_dict)

            az_dict = self._validate_and_return_az_dict(lock_session, driver,
                                                        lb_dict)
            # Validate the network as soon as we have the AZ data
            validate.network_allowed_by_config(
                load_balancer.vip_network_id,
                valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS))

            db_lb = self.repositories.create_load_balancer_and_vip(
                lock_session, lb_dict, vip_dict)

            # Pass the flavor dictionary through for the provider drivers
            # This is a "virtual" lb_dict item that includes the expanded
            # flavor dict instead of just the flavor_id we store in the DB.
            lb_dict['flavor'] = flavor_dict

            # Do the same with the availability_zone dict
            lb_dict['availability_zone'] = az_dict

            # See if the provider driver wants to manage the VIP port
            # This will still be called if the user provided a port to
            # allow drivers to collect any required information about the
            # VIP port.
            octavia_owned = False
            try:
                provider_vip_dict = driver_utils.vip_dict_to_provider_dict(
                    vip_dict)
                vip_dict = driver_utils.call_provider(
                    driver.name, driver.create_vip_port, db_lb.id,
                    db_lb.project_id, provider_vip_dict)
                vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict)
            except exceptions.ProviderNotImplementedError:
                # create vip port if not exist, driver didn't want to create
                # the VIP port
                vip = self._create_vip_port_if_not_exist(db_lb)
                LOG.info('Created VIP port %s for provider %s.',
                         vip.port_id, driver.name)
                # If a port_id wasn't passed in and we made it this far
                # we created the VIP
                if 'port_id' not in vip_dict or not vip_dict['port_id']:
                    octavia_owned = True

            # Check if the driver claims octavia owns the VIP port.
            if vip.octavia_owned:
                octavia_owned = True

            self.repositories.vip.update(
                lock_session, db_lb.id, ip_address=vip.ip_address,
                port_id=vip.port_id, network_id=vip.network_id,
                subnet_id=vip.subnet_id, octavia_owned=octavia_owned)

            if listeners or pools:
                db_pools, db_lists = self._graph_create(
                    context.session, lock_session, db_lb, listeners, pools)

            # Prepare the data for the driver data model
            driver_lb_dict = driver_utils.lb_dict_to_provider_dict(
                lb_dict, vip, db_pools, db_lists)

            # Dispatch to the driver
            LOG.info("Sending create Load Balancer %s to provider %s",
                     db_lb.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.loadbalancer_create,
                driver_dm.LoadBalancer.from_dict(driver_lb_dict))

            lock_session.commit()
        except odb_exceptions.DBDuplicateEntry:
            lock_session.rollback()
            raise exceptions.IDAlreadyExists()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_lb = self._get_db_lb(context.session, db_lb.id)

        result = self._convert_db_to_type(
            db_lb, lb_types.LoadBalancerFullResponse)
        return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)
Example #53
0
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)
Example #54
0
    def _perform_amphora_failover(self, amp, priority):
        """Internal method to perform failover operations for an amphora.

        :param amp: The amphora to failover
        :param priority: The create priority
        :returns: None
        """
        stored_params = {
            constants.FAILED_AMPHORA: amp.to_dict(),
            constants.LOADBALANCER_ID: amp.load_balancer_id,
            constants.BUILD_TYPE_PRIORITY: priority,
        }

        if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP):
            amp_role = 'master_or_backup'
        elif amp.role == constants.ROLE_STANDALONE:
            amp_role = 'standalone'
        elif amp.role is None:
            amp_role = 'spare'
        else:
            amp_role = 'undefined'

        LOG.info(
            "Perform failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })

        if amp.status == constants.DELETED:
            LOG.warning(
                'Amphora %s is marked DELETED in the database but '
                'was submitted for failover. Deleting it from the '
                'amphora health table to exclude it from health '
                'checks and skipping the failover.', amp.id)
            self._amphora_health_repo.delete(db_apis.get_session(),
                                             amphora_id=amp.id)
            return

        if (CONF.house_keeping.spare_amphora_pool_size
                == 0) and (CONF.nova.enable_anti_affinity is False):
            LOG.warning("Failing over amphora with no spares pool may "
                        "cause delays in failover times while a new "
                        "amphora instance boots.")

        # if we run with anti-affinity we need to set the server group
        # as well
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amp.id)
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            lb).to_dict() if lb else lb
        if CONF.nova.enable_anti_affinity and lb:
            stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
        if lb and lb.flavor_id:
            stored_params[constants.FLAVOR] = (
                self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id))
        else:
            stored_params[constants.FLAVOR] = {}
        if lb and lb.availability_zone:
            stored_params[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), lb.availability_zone))
        else:
            stored_params[constants.AVAILABILITY_ZONE] = {}

        failover_amphora_tf = self._taskflow_load(
            self._amphora_flows.get_failover_flow(role=amp.role,
                                                  load_balancer=provider_lb),
            store=stored_params)

        with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
            failover_amphora_tf.run()

        LOG.info(
            "Successfully completed the failover for an amphora: %s", {
                "id": amp.id,
                "load_balancer_id": amp.load_balancer_id,
                "lb_network_ip": amp.lb_network_ip,
                "compute_id": amp.compute_id,
                "role": amp_role
            })
Example #55
0
    def post(self, l7policy_):
        """Creates a l7policy on a listener."""
        l7policy = l7policy_.l7policy
        context = pecan_request.context.get('octavia_context')

        # Verify the parent listener exists
        listener_id = l7policy.listener_id
        listener = self._get_db_listener(context.session, listener_id)
        load_balancer_id = listener.load_balancer_id
        l7policy.project_id, provider = self._get_lb_project_id_provider(
            context.session, load_balancer_id)

        self._auth_validate_action(context, l7policy.project_id,
                                   constants.RBAC_POST)

        # Make sure any pool specified by redirect_pool_id exists
        if l7policy.redirect_pool_id:
            db_pool = self._get_db_pool(context.session,
                                        l7policy.redirect_pool_id)
            self._validate_protocol(listener.protocol, db_pool.protocol)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(context.session, lock_session,
                                                 data_models.L7Policy,
                                                 l7policy.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.L7Policy._name())

            l7policy_dict = db_prepare.create_l7policy(
                l7policy.to_dict(render_unsets=True), load_balancer_id,
                listener_id)

            self._test_lb_and_listener_statuses(lock_session,
                                                lb_id=load_balancer_id,
                                                listener_ids=[listener_id])
            db_l7policy = self._validate_create_l7policy(
                lock_session, l7policy_dict)

            # Prepare the data for the driver data model
            provider_l7policy = (
                driver_utils.db_l7policy_to_provider_l7policy(db_l7policy))

            # Dispatch to the driver
            LOG.info("Sending create L7 Policy %s to provider %s",
                     db_l7policy.id, driver.name)
            driver_utils.call_provider(driver.name, driver.l7policy_create,
                                       provider_l7policy)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_l7policy = self._get_db_l7policy(context.session, db_l7policy.id)
        result = self._convert_db_to_type(db_l7policy,
                                          l7policy_types.L7PolicyResponse)
        return l7policy_types.L7PolicyRootResponse(l7policy=result)
Example #56
0
    def obj_to_dict(self, obj, is_update=False, project_id=None):
        obj_type = obj.__class__.__name__
        # create a dictionary out of the object
        render_unsets = False if is_update else True
        obj_dict = obj.to_dict(recurse=True, render_unsets=render_unsets)

        # Update the dictionary to match what the nsx driver expects
        if not project_id:
            project_id = self.get_obj_project_id(obj_type, obj_dict)
        obj_dict['tenant_id'] = obj_dict['project_id'] = project_id

        if 'id' not in obj_dict:
            obj_dict['id'] = obj_dict.get('%s_id' % obj_type.lower())

        if not obj_dict.get('name') and not is_update:
            obj_dict['name'] = ""

        self._remove_unsupported_keys(obj_type, obj_dict)

        if obj_type == 'LoadBalancer':
            # clean listeners and pools for update case:
            if 'listeners' in obj_dict:
                if is_update and not obj_dict['listeners']:
                    del obj_dict['listeners']
                else:
                    if obj_dict['listeners'] is None:
                        obj_dict['listeners'] = []
                    for listener in obj_dict['listeners']:
                        listener['id'] = listener['listener_id']
            if 'pools' in obj_dict:
                if is_update and not obj_dict['pools']:
                    del obj_dict['pools']
                else:
                    if obj_dict['pools'] is None:
                        obj_dict['pools'] = []
                    for pool in obj_dict['pools']:
                        pool['id'] = pool['pool_id']

        elif obj_type == 'Listener':
            if 'l7policies' in obj_dict:
                obj_dict['l7_policies'] = obj_dict['l7policies']
            if obj_dict.get('loadbalancer_id'):
                # Generate a loadbalancer object
                obj_dict['loadbalancer'] = self._get_load_balancer_dict(
                    obj_dict['loadbalancer_id'])
            # TODO(asarfaty): add default_tls_container_id

        elif obj_type == 'Pool':
            if 'listener' not in obj_dict:
                self._get_listener_in_pool_dict(obj_dict)

        elif obj_type == 'Member':
            # Get the pool object
            if obj_dict.get('pool_id'):
                obj_dict['pool'] = self._get_pool_dict(obj_dict['pool_id'])
                obj_dict['loadbalancer'] = None
                if 'loadbalancer' in obj_dict['pool']:
                    obj_dict['loadbalancer'] = obj_dict['pool']['loadbalancer']
                    if not obj_dict.get('subnet_id'):
                        # Use the parent vip_subnet_id instead
                        obj_dict['subnet_id'] = obj_dict['loadbalancer'][
                            'vip_subnet_id']
            else:
                obj_dict['pool'] = None
                obj_dict['loadbalancer'] = None

        elif obj_type == 'HealthMonitor':
            # Get the pool object
            if obj_dict.get('pool_id'):
                obj_dict['pool'] = self._get_pool_dict(obj_dict['pool_id'])

        elif obj_type == 'L7Policy':
            self.update_policy_dict(obj_dict, obj, is_update=is_update)

        elif obj_type == 'L7Rule':
            # Get the L7 policy object
            if obj_dict.get('l7policy_id'):
                db_policy = self.repositories.l7policy.get(
                    db_apis.get_session(), id=obj_dict['l7policy_id'])
                policy_obj = oct_utils.db_l7policy_to_provider_l7policy(
                    db_policy)
                policy_dict = policy_obj.to_dict(
                    recurse=True, render_unsets=True)
                policy_dict['id'] = obj_dict['l7policy_id']
                self.update_policy_dict(
                    policy_dict, policy_obj, is_update=is_update)
                obj_dict['policy'] = policy_dict

        LOG.debug("Translated %(type)s to dictionary: %(obj)s",
                  {'type': obj_type, 'obj': obj_dict})
        return obj_dict
Example #57
0
    def _update_health(self, health, srcaddr):
        """This function is to update db info based on amphora status

        :param health: map object that contains amphora, listener, member info
        :type map: string
        :returns: null

        The input v1 health data structure is shown as below::

            health = {
                "id": self.FAKE_UUID_1,
                "listeners": {
                    "listener-id-1": {"status": constants.OPEN, "pools": {
                        "pool-id-1": {"status": constants.UP,
                                      "members": {
                                          "member-id-1": constants.ONLINE}
                                      }
                    }
                    }
                }
            }

        Example V2 message::

            {"id": "<amphora_id>",
             "seq": 67,
             "listeners": {
               "<listener_id>": {
                 "status": "OPEN",
                 "stats": {
                   "tx": 0,
                   "rx": 0,
                   "conns": 0,
                   "totconns": 0,
                   "ereq": 0
                 }
               }
             },
             "pools": {
                 "<pool_id>:<listener_id>": {
                   "status": "UP",
                   "members": {
                     "<member_id>": "no check"
                   }
                 }
             },
             "ver": 2
            }

        """
        session = db_api.get_session()

        # We need to see if all of the listeners are reporting in
        db_lb = self.amphora_repo.get_lb_for_health_update(
            session, health['id'])
        ignore_listener_count = False

        if db_lb:
            expected_listener_count = 0
            if ('PENDING' in db_lb['provisioning_status']
                    or not db_lb['enabled']):
                ignore_listener_count = True
            else:
                for key, listener in db_lb.get('listeners', {}).items():
                    # disabled listeners don't report from the amphora
                    if listener['enabled']:
                        expected_listener_count += 1

                # If this is a heartbeat older than versioning, handle
                # UDP special for backward compatibility.
                if 'ver' not in health:
                    udp_listeners = [
                        l for k, l in db_lb.get('listeners', {}).items()
                        if l['protocol'] == constants.PROTOCOL_UDP
                    ]
                    if udp_listeners:
                        expected_listener_count = (
                            self._update_listener_count_for_UDP(
                                session, db_lb, expected_listener_count))
        else:
            # If this is not a spare amp, log and skip it.
            amp = self.amphora_repo.get(session, id=health['id'])
            if not amp or amp.load_balancer_id:
                # This is debug and not warning because this can happen under
                # normal deleting operations.
                LOG.debug(
                    'Received a health heartbeat from amphora %s with '
                    'IP %s that should not exist. This amphora may be '
                    'in the process of being deleted, in which case you '
                    'will only see this message a few '
                    'times', health['id'], srcaddr)
                if not amp:
                    LOG.warning(
                        'The amphora %s with IP %s is missing from '
                        'the DB, so it cannot be automatically '
                        'deleted (the compute_id is unknown). An '
                        'operator must manually delete it from the '
                        'compute service.', health['id'], srcaddr)
                    return
                # delete the amp right there
                try:
                    compute = stevedore_driver.DriverManager(
                        namespace='octavia.compute.drivers',
                        name=CONF.controller_worker.compute_driver,
                        invoke_on_load=True).driver
                    compute.delete(amp.compute_id)
                    return
                except Exception as e:
                    LOG.info("Error deleting amp %s with IP %s Error: %s",
                             health['id'], srcaddr, str(e))
            expected_listener_count = 0

        listeners = health['listeners']

        # Do not update amphora health if the reporting listener count
        # does not match the expected listener count
        if len(listeners) == expected_listener_count or ignore_listener_count:

            lock_session = db_api.get_session(autocommit=False)

            # if we're running too far behind, warn and bail
            proc_delay = time.time() - health['recv_time']
            hb_interval = CONF.health_manager.heartbeat_interval
            # TODO(johnsom) We need to set a warning threshold here, and
            #               escalate to critical when it reaches the
            #               heartbeat_interval
            if proc_delay >= hb_interval:
                LOG.warning(
                    'Amphora %(id)s health message was processed too '
                    'slowly: %(delay)ss! The system may be overloaded '
                    'or otherwise malfunctioning. This heartbeat has '
                    'been ignored and no update was made to the '
                    'amphora health entry. THIS IS NOT GOOD.', {
                        'id': health['id'],
                        'delay': proc_delay
                    })
                return

            # if the input amphora is healthy, we update its db info
            try:
                self.amphora_health_repo.replace(
                    lock_session,
                    health['id'],
                    last_update=(datetime.datetime.utcnow()))
                lock_session.commit()
            except Exception:
                with excutils.save_and_reraise_exception():
                    lock_session.rollback()
        else:
            LOG.warning(
                'Amphora %(id)s health message reports %(found)i '
                'listeners when %(expected)i expected', {
                    'id': health['id'],
                    'found': len(listeners),
                    'expected': expected_listener_count
                })

        # Don't try to update status for spares pool amphora
        if not db_lb:
            return

        processed_pools = []
        potential_offline_pools = {}

        # We got a heartbeat so lb is healthy until proven otherwise
        if db_lb[constants.ENABLED] is False:
            lb_status = constants.OFFLINE
        else:
            lb_status = constants.ONLINE

        health_msg_version = health.get('ver', 0)

        for listener_id in db_lb.get(constants.LISTENERS, {}):
            db_listener = db_lb[constants.LISTENERS][listener_id]
            db_op_status = db_listener[constants.OPERATING_STATUS]
            listener_status = None
            listener = None

            if listener_id not in listeners:
                if (db_listener[constants.ENABLED]
                        and db_lb[constants.PROVISIONING_STATUS]
                        == constants.ACTIVE):
                    listener_status = constants.ERROR
                else:
                    listener_status = constants.OFFLINE
            else:
                listener = listeners[listener_id]

                # OPEN = HAProxy listener status nbconn < maxconn
                if listener.get('status') == constants.OPEN:
                    listener_status = constants.ONLINE
                # FULL = HAProxy listener status not nbconn < maxconn
                elif listener.get('status') == constants.FULL:
                    listener_status = constants.DEGRADED
                    if lb_status == constants.ONLINE:
                        lb_status = constants.DEGRADED
                else:
                    LOG.warning(('Listener %(list)s reported status of '
                                 '%(status)s'), {
                                     'list': listener_id,
                                     'status': listener.get('status')
                                 })

            try:
                if (listener_status is not None
                        and listener_status != db_op_status):
                    self._update_status(session, self.listener_repo,
                                        constants.LISTENER, listener_id,
                                        listener_status, db_op_status)
            except sqlalchemy.orm.exc.NoResultFound:
                LOG.error("Listener %s is not in DB", listener_id)

            if not listener:
                continue

            if health_msg_version < 2:
                raw_pools = listener['pools']

                # normalize the pool IDs. Single process listener pools
                # have the listener id appended with an ':' seperator.
                # Old multi-process listener pools only have a pool ID.
                # This makes sure the keys are only pool IDs.
                pools = {(k + ' ')[:k.rfind(':')]: v
                         for k, v in raw_pools.items()}

                for db_pool_id in db_lb.get('pools', {}):
                    # If we saw this pool already on another listener, skip it.
                    if db_pool_id in processed_pools:
                        continue
                    db_pool_dict = db_lb['pools'][db_pool_id]
                    lb_status = self._process_pool_status(
                        session, db_pool_id, db_pool_dict, pools, lb_status,
                        processed_pools, potential_offline_pools)

        if health_msg_version >= 2:
            raw_pools = health['pools']

            # normalize the pool IDs. Single process listener pools
            # have the listener id appended with an ':' seperator.
            # Old multi-process listener pools only have a pool ID.
            # This makes sure the keys are only pool IDs.
            pools = {(k + ' ')[:k.rfind(':')]: v for k, v in raw_pools.items()}

            for db_pool_id in db_lb.get('pools', {}):
                # If we saw this pool already, skip it.
                if db_pool_id in processed_pools:
                    continue
                db_pool_dict = db_lb['pools'][db_pool_id]
                lb_status = self._process_pool_status(session, db_pool_id,
                                                      db_pool_dict, pools,
                                                      lb_status,
                                                      processed_pools,
                                                      potential_offline_pools)

        for pool_id in potential_offline_pools:
            # Skip if we eventually found a status for this pool
            if pool_id in processed_pools:
                continue
            try:
                # If the database doesn't already show the pool offline, update
                if potential_offline_pools[pool_id] != constants.OFFLINE:
                    self._update_status(session, self.pool_repo,
                                        constants.POOL, pool_id,
                                        constants.OFFLINE,
                                        potential_offline_pools[pool_id])
            except sqlalchemy.orm.exc.NoResultFound:
                LOG.error("Pool %s is not in DB", pool_id)

        # Update the load balancer status last
        try:
            if lb_status != db_lb['operating_status']:
                self._update_status(session, self.loadbalancer_repo,
                                    constants.LOADBALANCER, db_lb['id'],
                                    lb_status,
                                    db_lb[constants.OPERATING_STATUS])
        except sqlalchemy.orm.exc.NoResultFound:
            LOG.error("Load balancer %s is not in DB", db_lb.id)
Example #58
0
    def post(self, health_monitor_):
        """Creates a health monitor on a pool."""
        context = pecan.request.context.get('octavia_context')
        health_monitor = health_monitor_.healthmonitor

        if (not CONF.api_settings.allow_ping_health_monitors
                and health_monitor.type == consts.HEALTH_MONITOR_PING):
            raise exceptions.DisabledOption(option='type',
                                            value=consts.HEALTH_MONITOR_PING)

        pool = self._get_db_pool(context.session, health_monitor.pool_id)

        health_monitor.project_id, provider = self._get_lb_project_id_provider(
            context.session, pool.load_balancer_id)

        if pool.protocol == consts.PROTOCOL_UDP:
            self._validate_healthmonitor_request_for_udp(health_monitor)
        else:
            if health_monitor.type == consts.HEALTH_MONITOR_UDP_CONNECT:
                raise exceptions.ValidationException(
                    detail=_(
                        "The %(type)s type is only supported for pools of type "
                        "%(protocol)s.") % {
                            'type': health_monitor.type,
                            'protocol': consts.PROTOCOL_UDP
                        })

        self._auth_validate_action(context, health_monitor.project_id,
                                   consts.RBAC_POST)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(context.session, lock_session,
                                                 data_models.HealthMonitor,
                                                 health_monitor.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.HealthMonitor._name())

            hm_dict = db_prepare.create_health_monitor(
                health_monitor.to_dict(render_unsets=True))

            self._test_lb_and_listener_and_pool_statuses(
                lock_session, health_monitor)
            db_hm = self._validate_create_hm(lock_session, hm_dict)

            # Prepare the data for the driver data model
            provider_healthmon = (driver_utils.db_HM_to_provider_HM(db_hm))

            # Dispatch to the driver
            LOG.info("Sending create Health Monitor %s to provider %s",
                     db_hm.id, driver.name)
            driver_utils.call_provider(driver.name,
                                       driver.health_monitor_create,
                                       provider_healthmon)

            lock_session.commit()
        except odb_exceptions.DBError:
            lock_session.rollback()
            raise exceptions.InvalidOption(value=hm_dict.get('type'),
                                           option='type')
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_hm = self._get_db_hm(context.session, db_hm.id)
        result = self._convert_db_to_type(db_hm,
                                          hm_types.HealthMonitorResponse)
        return hm_types.HealthMonitorRootResponse(healthmonitor=result)
Example #59
0
 def execute(self, loadbalancer):
     db_lb = self.loadbalancer_repo.get(
         db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
     self.amphora_driver.start_vrrp_service(db_lb)
     LOG.debug("Started VRRP of loadbalancer %s amphorae",
               loadbalancer[constants.LOADBALANCER_ID])
Example #60
0
 def execute(self, amphora):
     """Execute finalize_amphora routine."""
     db_amp = self.amphora_repo.get(db_apis.get_session(),
                                    id=amphora.get(constants.ID))
     self.amphora_driver.finalize_amphora(db_amp)
     LOG.debug("Finalized the amphora.")