Beispiel #1
0
    def post(self, listener_):
        """Creates a listener on a load balancer."""
        listener = listener_.listener
        context = pecan.request.context.get('octavia_context')

        load_balancer_id = listener.loadbalancer_id
        listener.project_id, provider = self._get_lb_project_id_provider(
            context.session, load_balancer_id)

        self._auth_validate_action(context, listener.project_id,
                                   constants.RBAC_POST)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(context.session, lock_session,
                                                 data_models.Listener,
                                                 listener.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.Listener._name())

            listener_dict = db_prepare.create_listener(
                listener.to_dict(render_unsets=True), None)

            if listener_dict['default_pool_id']:
                self._validate_pool(context.session, load_balancer_id,
                                    listener_dict['default_pool_id'],
                                    listener.protocol)

            self._test_lb_and_listener_statuses(lock_session,
                                                lb_id=load_balancer_id)

            db_listener = self._validate_create_listener(
                lock_session, listener_dict)

            # Prepare the data for the driver data model
            provider_listener = (
                driver_utils.db_listener_to_provider_listener(db_listener))

            # re-inject the sni container references lost due to SNI
            # being a separate table in the DB
            if listener.sni_container_refs != wtypes.Unset:
                provider_listener.sni_container_refs = (
                    listener.sni_container_refs)

            # Dispatch to the driver
            LOG.info("Sending create Listener %s to provider %s",
                     db_listener.id, driver.name)
            driver_utils.call_provider(driver.name, driver.listener_create,
                                       provider_listener)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_listener = self._get_db_listener(context.session, db_listener.id)
        result = self._convert_db_to_type(db_listener,
                                          listener_types.ListenerResponse)
        return listener_types.ListenerRootResponse(listener=result)
    def post(self, health_monitor_):
        """Creates a health monitor on a pool."""
        context = pecan_request.context.get('octavia_context')
        health_monitor = health_monitor_.healthmonitor

        if (not CONF.api_settings.allow_ping_health_monitors and
                health_monitor.type == consts.HEALTH_MONITOR_PING):
            raise exceptions.DisabledOption(
                option='type', value=consts.HEALTH_MONITOR_PING)

        pool = self._get_db_pool(context.session, health_monitor.pool_id)

        health_monitor.project_id, provider = self._get_lb_project_id_provider(
            context.session, pool.load_balancer_id)

        if pool.protocol == consts.PROTOCOL_UDP:
            self._validate_healthmonitor_request_for_udp(health_monitor)
        else:
            if health_monitor.type == consts.HEALTH_MONITOR_UDP_CONNECT:
                raise exceptions.ValidationException(detail=_(
                    "The %(type)s type is only supported for pools of type "
                    "%(protocol)s.") % {'type': health_monitor.type,
                                        'protocol': consts.PROTOCOL_UDP})

        self._auth_validate_action(context, health_monitor.project_id,
                                   consts.RBAC_POST)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.HealthMonitor,
                    health_monitor.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.HealthMonitor._name())

            hm_dict = db_prepare.create_health_monitor(
                health_monitor.to_dict(render_unsets=True))

            self._test_lb_and_listener_and_pool_statuses(
                lock_session, health_monitor)
            db_hm = self._validate_create_hm(lock_session, hm_dict)

            # Prepare the data for the driver data model
            provider_healthmon = (driver_utils.db_HM_to_provider_HM(db_hm))

            # Dispatch to the driver
            LOG.info("Sending create Health Monitor %s to provider %s",
                     db_hm.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.health_monitor_create, provider_healthmon)

            lock_session.commit()
        except odb_exceptions.DBError:
            lock_session.rollback()
            raise exceptions.InvalidOption(
                value=hm_dict.get('type'), option='type')
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_hm = self._get_db_hm(context.session, db_hm.id)
        result = self._convert_db_to_type(
            db_hm, hm_types.HealthMonitorResponse)
        return hm_types.HealthMonitorRootResponse(healthmonitor=result)
Beispiel #3
0
    def post(self, pool_):
        """Creates a pool on a load balancer or listener.

        Note that this can optionally take a listener_id with which the pool
        should be associated as the listener's default_pool. If specified,
        the pool creation will fail if the listener specified already has
        a default_pool.
        """
        # For some API requests the listener_id will be passed in the
        # pool_dict:
        pool = pool_.pool
        context = pecan.request.context.get('octavia_context')
        if pool.protocol == constants.PROTOCOL_UDP:
            self._validate_pool_request_for_udp(pool)
        else:
            if (pool.session_persistence
                    and (pool.session_persistence.persistence_timeout
                         or pool.session_persistence.persistence_granularity)):
                raise exceptions.ValidationException(
                    detail=_("persistence_timeout and persistence_granularity "
                             "is only for UDP protocol pools."))
        if pool.loadbalancer_id:
            pool.project_id, provider = self._get_lb_project_id_provider(
                context.session, pool.loadbalancer_id)
        elif pool.listener_id:
            listener = self.repositories.listener.get(context.session,
                                                      id=pool.listener_id)
            pool.loadbalancer_id = listener.load_balancer_id
            pool.project_id, provider = self._get_lb_project_id_provider(
                context.session, pool.loadbalancer_id)
        else:
            msg = _("Must provide at least one of: "
                    "loadbalancer_id, listener_id")
            raise exceptions.ValidationException(detail=msg)

        self._auth_validate_action(context, pool.project_id,
                                   constants.RBAC_POST)

        if pool.session_persistence:
            sp_dict = pool.session_persistence.to_dict(render_unsets=False)
            validate.check_session_persistence(sp_dict)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(context.session, lock_session,
                                                 data_models.Pool,
                                                 pool.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.Pool._name())

            listener_repo = self.repositories.listener
            pool_dict = db_prepare.create_pool(
                pool.to_dict(render_unsets=True))

            listener_id = pool_dict.pop('listener_id', None)
            if listener_id:
                if listener_repo.has_default_pool(lock_session, listener_id):
                    raise exceptions.DuplicatePoolEntry()

            self._test_lb_and_listener_statuses(
                lock_session,
                lb_id=pool_dict['load_balancer_id'],
                listener_ids=[listener_id] if listener_id else [])

            db_pool = self._validate_create_pool(lock_session, pool_dict,
                                                 listener_id)

            # Prepare the data for the driver data model
            provider_pool = (driver_utils.db_pool_to_provider_pool(db_pool))

            # Dispatch to the driver
            LOG.info("Sending create Pool %s to provider %s", db_pool.id,
                     driver.name)
            driver_utils.call_provider(driver.name, driver.pool_create,
                                       provider_pool)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_pool = self._get_db_pool(context.session, db_pool.id)
        result = self._convert_db_to_type(db_pool, pool_types.PoolResponse)
        return pool_types.PoolRootResponse(pool=result)
Beispiel #4
0
    def post(self, load_balancer):
        """Creates a load balancer."""
        load_balancer = load_balancer.loadbalancer
        context = pecan.request.context.get('octavia_context')

        if not load_balancer.project_id and context.project_id:
            load_balancer.project_id = context.project_id

        if not load_balancer.project_id:
            raise exceptions.ValidationException(detail=_(
                "Missing project ID in request where one is required."))

        self._auth_validate_action(context, load_balancer.project_id,
                                   constants.RBAC_POST)

        self._validate_vip_request_object(load_balancer)

        self._validate_flavor(context.session, load_balancer)

        provider = self._get_provider(context.session, load_balancer)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.LoadBalancer,
                    load_balancer.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.LoadBalancer._name())

            db_lb, db_pools, db_lists = None, None, None

            lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict(
                render_unsets=False
            ))
            vip_dict = lb_dict.pop('vip', {})

            # Make sure we store the right provider in the DB
            lb_dict['provider'] = driver.name

            # NoneType can be weird here, have to force type a second time
            listeners = lb_dict.pop('listeners', []) or []
            pools = lb_dict.pop('pools', []) or []

            flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver,
                                                        lb_dict)

            db_lb = self.repositories.create_load_balancer_and_vip(
                lock_session, lb_dict, vip_dict)

            # Pass the flavor dictionary through for the provider drivers
            # This is a "virtual" lb_dict item that includes the expanded
            # flavor dict instead of just the flavor_id we store in the DB.
            lb_dict['flavor'] = flavor_dict

            # See if the provider driver wants to create the VIP port
            octavia_owned = False
            try:
                provider_vip_dict = driver_utils.vip_dict_to_provider_dict(
                    vip_dict)
                vip_dict = driver_utils.call_provider(
                    driver.name, driver.create_vip_port, db_lb.id,
                    db_lb.project_id, provider_vip_dict)
                vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict)
            except exceptions.ProviderNotImplementedError:
                # create vip port if not exist, driver didn't want to create
                # the VIP port
                vip = self._create_vip_port_if_not_exist(db_lb)
                LOG.info('Created VIP port %s for provider %s.',
                         vip.port_id, driver.name)
                # If a port_id wasn't passed in and we made it this far
                # we created the VIP
                if 'port_id' not in vip_dict or not vip_dict['port_id']:
                    octavia_owned = True

            self.repositories.vip.update(
                lock_session, db_lb.id, ip_address=vip.ip_address,
                port_id=vip.port_id, network_id=vip.network_id,
                subnet_id=vip.subnet_id, octavia_owned=octavia_owned)

            if listeners or pools:
                db_pools, db_lists = self._graph_create(
                    context.session, lock_session, db_lb, listeners, pools)

            # Prepare the data for the driver data model
            driver_lb_dict = driver_utils.lb_dict_to_provider_dict(
                lb_dict, vip, db_pools, db_lists)

            # Dispatch to the driver
            LOG.info("Sending create Load Balancer %s to provider %s",
                     db_lb.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.loadbalancer_create,
                driver_dm.LoadBalancer.from_dict(driver_lb_dict))

            lock_session.commit()
        except odb_exceptions.DBDuplicateEntry:
            lock_session.rollback()
            raise exceptions.IDAlreadyExists()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_lb = self._get_db_lb(context.session, db_lb.id)

        result = self._convert_db_to_type(
            db_lb, lb_types.LoadBalancerFullResponse)
        return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)
Beispiel #5
0
    def _graph_create(self, session, lock_session, db_lb, listeners, pools):
        # Track which pools must have a full specification
        pools_required = set()
        # Look through listeners and find any extra pools, and move them to the
        # top level so they are created first.
        for l in listeners:
            default_pool = l.get('default_pool')
            pool_name = (
                default_pool.get('name') if default_pool else None)
            # All pools need to have a name so they can be referenced
            if default_pool and not pool_name:
                raise exceptions.ValidationException(
                    detail='Pools must be named when creating a fully '
                           'populated loadbalancer.')
            # If a pool has more than a name, assume it's a full specification
            # (but use >3 because it will also have "enabled" and "tls_enabled"
            # as default)
            if default_pool and len(default_pool) > 3:
                pools.append(default_pool)
                l['default_pool'] = {'name': pool_name}
            # Otherwise, it's a reference and we record it and move on
            elif default_pool:
                pools_required.add(pool_name)
            # We also need to check policy redirects
            for policy in l.get('l7policies'):
                redirect_pool = policy.get('redirect_pool')
                pool_name = (
                    redirect_pool.get('name') if redirect_pool else None)
                # All pools need to have a name so they can be referenced
                if redirect_pool and not pool_name:
                    raise exceptions.ValidationException(
                        detail='Pools must be named when creating a fully '
                               'populated loadbalancer.')
                # If a pool has more than a name, assume it's a full spec
                # (but use >2 because it will also have "enabled" and
                # "tls_enabled" as default)
                if redirect_pool and len(redirect_pool) > 3:
                    pool_name = redirect_pool['name']
                    policy['redirect_pool'] = {'name': pool_name}
                    pools.append(redirect_pool)
                # Otherwise, it's a reference and we record it and move on
                elif redirect_pool:
                    pools_required.add(pool_name)

        # Make sure all pool names are unique.
        pool_names = [p.get('name') for p in pools]
        if len(set(pool_names)) != len(pool_names):
            raise exceptions.ValidationException(
                detail="Pool names must be unique when creating a fully "
                       "populated loadbalancer.")
        # Make sure every reference is present in our spec list
        for pool_ref in pools_required:
            if pool_ref not in pool_names:
                raise exceptions.ValidationException(
                    detail="Pool '{name}' was referenced but no full "
                           "definition was found.".format(name=pool_ref))

        # Check quotas for pools.
        if pools and self.repositories.check_quota_met(
                session, lock_session, data_models.Pool, db_lb.project_id,
                count=len(pools)):
            raise exceptions.QuotaException(resource=data_models.Pool._name())

        # Now create all of the pools ahead of the listeners.
        new_pools = []
        pool_name_ids = {}
        for p in pools:
            # Check that pools have mandatory attributes, since we have to
            # bypass the normal validation layer to allow for name-only
            for attr in ('protocol', 'lb_algorithm'):
                if attr not in p:
                    raise exceptions.ValidationException(
                        detail="Pool definition for '{name}' missing required "
                               "attribute: {attr}".format(name=p['name'],
                                                          attr=attr))
            p['load_balancer_id'] = db_lb.id
            p['project_id'] = db_lb.project_id
            new_pool = (pool.PoolsController()._graph_create(
                session, lock_session, p))
            new_pools.append(new_pool)
            pool_name_ids[new_pool.name] = new_pool.id

        # Now check quotas for listeners
        if listeners and self.repositories.check_quota_met(
                session, lock_session, data_models.Listener, db_lb.project_id,
                count=len(listeners)):
            raise exceptions.QuotaException(
                resource=data_models.Listener._name())

        # Now create all of the listeners
        new_lists = []
        for l in listeners:
            default_pool = l.pop('default_pool', None)
            # If there's a default pool, replace it with the ID
            if default_pool:
                pool_name = default_pool['name']
                pool_id = pool_name_ids.get(pool_name)
                if not pool_id:
                    raise exceptions.SingleCreateDetailsMissing(
                        type='Pool', name=pool_name)
                l['default_pool_id'] = pool_id
            l['load_balancer_id'] = db_lb.id
            l['project_id'] = db_lb.project_id
            new_lists.append(listener.ListenersController()._graph_create(
                lock_session, l, pool_name_ids=pool_name_ids))

        return new_pools, new_lists
Beispiel #6
0
    def put(self, additive_only=False, members_=None):
        """Updates all members."""
        members = members_.members
        additive_only = strutils.bool_from_string(additive_only)
        context = pecan.request.context.get('octavia_context')

        db_pool = self._get_db_pool(context.session, self.pool_id)
        old_members = db_pool.members

        project_id, provider = self._get_lb_project_id_provider(
            context.session, db_pool.load_balancer_id)

        # Check POST+PUT+DELETE since this operation is all of 'CUD'
        self._auth_validate_action(context, project_id, constants.RBAC_POST)
        self._auth_validate_action(context, project_id, constants.RBAC_PUT)
        if not additive_only:
            self._auth_validate_action(context, project_id,
                                       constants.RBAC_DELETE)

        # Validate member subnets
        for member in members:
            if member.subnet_id and not validate.subnet_exists(
                    member.subnet_id):
                raise exceptions.NotFound(resource='Subnet',
                                          id=member.subnet_id)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        with db_api.get_lock_session() as lock_session:
            self._test_lb_and_listener_and_pool_statuses(lock_session)

            old_member_uniques = {(m.ip_address, m.protocol_port): m.id
                                  for m in old_members}
            new_member_uniques = [(m.address, m.protocol_port)
                                  for m in members]

            # Find members that are brand new or updated
            new_members = []
            updated_members = []
            for m in members:
                if (m.address, m.protocol_port) not in old_member_uniques:
                    validate.ip_not_reserved(m.address)
                    new_members.append(m)
                else:
                    m.id = old_member_uniques[(m.address, m.protocol_port)]
                    updated_members.append(m)

            # Find members that are deleted
            deleted_members = []
            for m in old_members:
                if (m.ip_address, m.protocol_port) not in new_member_uniques:
                    deleted_members.append(m)

            if additive_only:
                member_count_diff = len(new_members)
            else:
                member_count_diff = len(new_members) - len(deleted_members)
            if member_count_diff > 0 and self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.Member,
                    db_pool.project_id,
                    count=member_count_diff):
                raise exceptions.QuotaException(
                    resource=data_models.Member._name())

            provider_members = []
            # Create new members
            for m in new_members:
                m = m.to_dict(render_unsets=False)
                m['project_id'] = db_pool.project_id
                created_member = self._graph_create(lock_session, m)
                provider_member = driver_utils.db_member_to_provider_member(
                    created_member)
                provider_members.append(provider_member)
            # Update old members
            for m in updated_members:
                m.provisioning_status = constants.PENDING_UPDATE
                m.project_id = db_pool.project_id
                db_member_dict = m.to_dict(render_unsets=False)
                db_member_dict.pop('id')
                self.repositories.member.update(lock_session, m.id,
                                                **db_member_dict)

                m.pool_id = self.pool_id
                provider_members.append(
                    driver_utils.db_member_to_provider_member(m))
            # Delete old members
            for m in deleted_members:
                if additive_only:
                    # Members are appended to the dict and their status remains
                    # unchanged, because they are logically "untouched".
                    db_member_dict = m.to_dict(render_unsets=False)
                    db_member_dict.pop('id')
                    m.pool_id = self.pool_id
                    provider_members.append(
                        driver_utils.db_member_to_provider_member(m))
                else:
                    # Members are changed to PENDING_DELETE and not passed.
                    self.repositories.member.update(
                        lock_session,
                        m.id,
                        provisioning_status=constants.PENDING_DELETE)

            # Dispatch to the driver
            LOG.info("Sending Pool %s batch member update to provider %s",
                     db_pool.id, driver.name)
            driver_utils.call_provider(driver.name, driver.member_batch_update,
                                       db_pool.id, provider_members)
Beispiel #7
0
    def post(self, l7policy_):
        """Creates a l7policy on a listener."""
        l7policy = l7policy_.l7policy
        context = pecan.request.context.get('octavia_context')
        # Make sure any pool specified by redirect_pool_id exists
        if l7policy.redirect_pool_id:
            db_pool = self._get_db_pool(
                context.session, l7policy.redirect_pool_id)
            self._escape_l7policy_udp_pool_request(db_pool)
        # Verify the parent listener exists
        listener_id = l7policy.listener_id
        listener = self._get_db_listener(
            context.session, listener_id)
        load_balancer_id = listener.load_balancer_id
        l7policy.project_id, provider = self._get_lb_project_id_provider(
            context.session, load_balancer_id)

        self._auth_validate_action(context, l7policy.project_id,
                                   constants.RBAC_POST)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.L7Policy,
                    l7policy.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.L7Policy._name())

            l7policy_dict = db_prepare.create_l7policy(
                l7policy.to_dict(render_unsets=True),
                load_balancer_id, listener_id)

            self._test_lb_and_listener_statuses(
                lock_session, lb_id=load_balancer_id,
                listener_ids=[listener_id])
            db_l7policy = self._validate_create_l7policy(
                lock_session, l7policy_dict)

            # Prepare the data for the driver data model
            provider_l7policy = (
                driver_utils.db_l7policy_to_provider_l7policy(db_l7policy))

            # Dispatch to the driver
            LOG.info("Sending create L7 Policy %s to provider %s",
                     db_l7policy.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.l7policy_create, provider_l7policy)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_l7policy = self._get_db_l7policy(context.session, db_l7policy.id)
        result = self._convert_db_to_type(db_l7policy,
                                          l7policy_types.L7PolicyResponse)
        return l7policy_types.L7PolicyRootResponse(l7policy=result)
Beispiel #8
0
    def post(self, listener_):
        """Creates a listener on a load balancer."""
        listener = listener_.listener
        context = pecan.request.context.get('octavia_context')

        load_balancer_id = listener.loadbalancer_id
        listener.project_id, provider = self._get_lb_project_id_provider(
            context.session, load_balancer_id)

        self._auth_validate_action(context, listener.project_id,
                                   constants.RBAC_POST)
        if (listener.protocol == constants.PROTOCOL_UDP and
                self._is_tls_or_insert_header(listener)):
            raise exceptions.ValidationException(detail=_(
                "%s protocol listener does not support TLS or header "
                "insertion.") % constants.PROTOCOL_UDP)
        if (not CONF.api_settings.allow_tls_terminated_listeners and
                listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS):
            raise exceptions.DisabledOption(
                value=constants.PROTOCOL_TERMINATED_HTTPS, option='protocol')

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.Listener,
                    listener.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.Listener._name())

            listener_dict = db_prepare.create_listener(
                listener.to_dict(render_unsets=True), None)

            if listener_dict['default_pool_id']:
                self._validate_pool(context.session, load_balancer_id,
                                    listener_dict['default_pool_id'],
                                    listener.protocol)

            self._test_lb_and_listener_statuses(
                lock_session, lb_id=load_balancer_id)

            db_listener = self._validate_create_listener(
                lock_session, listener_dict)

            # Prepare the data for the driver data model
            provider_listener = (
                driver_utils.db_listener_to_provider_listener(db_listener))

            # re-inject the sni container references lost due to SNI
            # being a separate table in the DB
            provider_listener.sni_container_refs = listener.sni_container_refs

            # Dispatch to the driver
            LOG.info("Sending create Listener %s to provider %s",
                     db_listener.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.listener_create, provider_listener)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_listener = self._get_db_listener(context.session, db_listener.id)
        result = self._convert_db_to_type(db_listener,
                                          listener_types.ListenerResponse)
        return listener_types.ListenerRootResponse(listener=result)
    def post(self, load_balancer):
        """Creates a load balancer."""
        context = pecan.request.context.get('octavia_context')

        project_id = context.project_id
        if context.is_admin or (CONF.api_settings.auth_strategy
                                == constants.NOAUTH):
            if load_balancer.project_id:
                project_id = load_balancer.project_id

        if not project_id:
            raise exceptions.ValidationException(detail=_(
                "Missing project ID in request where one is required."))

        load_balancer.project_id = project_id

        if not (load_balancer.vip.port_id or load_balancer.vip.network_id
                or load_balancer.vip.subnet_id):
            raise exceptions.ValidationException(detail=_(
                "VIP must contain one of: port_id, network_id, subnet_id."))

        # Validate the port id
        if load_balancer.vip.port_id:
            port = validate.port_exists(port_id=load_balancer.vip.port_id)
            load_balancer.vip.network_id = port.network_id
        # If no port id, validate the network id (and subnet if provided)
        elif load_balancer.vip.network_id:
            self._validate_network_and_fill_or_validate_subnet(load_balancer)
        # Validate just the subnet id
        elif load_balancer.vip.subnet_id:
            subnet = validate.subnet_exists(
                subnet_id=load_balancer.vip.subnet_id)
            load_balancer.vip.network_id = subnet.network_id

        lock_session = db_api.get_session(autocommit=False)
        if load_balancer.listeners:
            try:
                db_lb = self._create_load_balancer_graph_db(
                    context.session, lock_session, load_balancer)
                lock_session.commit()
            except Exception:
                with excutils.save_and_reraise_exception():
                    lock_session.rollback()

            return self._load_balancer_graph_to_handler(context, db_lb)
        else:
            if self.repositories.check_quota_met(context.session, lock_session,
                                                 data_models.LoadBalancer,
                                                 load_balancer.project_id):
                lock_session.rollback()
                raise exceptions.QuotaException(
                    resource=data_models.LoadBalancer._name())

        try:
            lb_dict = db_prepare.create_load_balancer(
                load_balancer.to_dict(render_unsets=True))
            vip_dict = lb_dict.pop('vip', {})

            db_lb = self.repositories.create_load_balancer_and_vip(
                lock_session, lb_dict, vip_dict)
            lock_session.commit()
        except odb_exceptions.DBDuplicateEntry:
            lock_session.rollback()
            raise exceptions.IDAlreadyExists()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        # Handler will be responsible for sending to controller
        try:
            LOG.info("Sending created Load Balancer %s to the handler",
                     db_lb.id)
            self.handler.create(db_lb)
        except Exception:
            with excutils.save_and_reraise_exception(reraise=False):
                self.repositories.load_balancer.update(
                    context.session,
                    db_lb.id,
                    provisioning_status=constants.ERROR)
        return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
Beispiel #10
0
    def post(self, rule_):
        """Creates a l7rule on an l7policy."""
        l7rule = rule_.rule
        try:
            validate.l7rule_data(l7rule)
        except Exception as e:
            raise exceptions.L7RuleValidation(error=e)
        context = pecan.request.context.get('octavia_context')

        db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id,
                                            show_deleted=False)
        load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id(
            db_l7policy)
        l7rule.project_id, provider = self._get_lb_project_id_provider(
            context.session, load_balancer_id)

        self._check_l7policy_max_rules(context.session)

        self._auth_validate_action(context, l7rule.project_id,
                                   constants.RBAC_POST)

        # Load the driver early as it also provides validation
        driver = driver_factory.get_driver(provider)

        lock_session = db_api.get_session(autocommit=False)
        try:
            if self.repositories.check_clusterquota_met(
                    lock_session,
                    data_models.L7Rule,
                    base_res_id=self.l7policy_id):
                raise exceptions.ClusterQuotaException(
                    resource=data_models.L7Rule._name())
            if self.repositories.check_quota_met(
                    context.session,
                    lock_session,
                    data_models.L7Rule,
                    l7rule.project_id):
                raise exceptions.QuotaException(
                    resource=data_models.L7Rule._name())

            l7rule_dict = db_prepare.create_l7rule(
                l7rule.to_dict(render_unsets=True), self.l7policy_id)

            self._test_lb_listener_policy_statuses(context.session)

            db_l7rule = self._validate_create_l7rule(lock_session, l7rule_dict)

            # Prepare the data for the driver data model
            provider_l7rule = (
                driver_utils.db_l7rule_to_provider_l7rule(db_l7rule))

            # Dispatch to the driver
            LOG.info("Sending create L7 Rule %s to provider %s",
                     db_l7rule.id, driver.name)
            driver_utils.call_provider(
                driver.name, driver.l7rule_create, provider_l7rule)

            lock_session.commit()
        except Exception:
            with excutils.save_and_reraise_exception():
                lock_session.rollback()

        db_l7rule = self._get_db_l7rule(context.session, db_l7rule.id)
        result = self._convert_db_to_type(db_l7rule,
                                          l7rule_types.L7RuleResponse)
        return l7rule_types.L7RuleRootResponse(rule=result)