def _lock_free_update(session, id, lock_state=False, session_id=0):
    """Implement lock-free atomic update for the distributed lock

    :param session:    the db session
    :type session:     DB Session object
    :param id:         the lock uuid
    :type id:          string
    :param lock_state: the lock state to update
    :type lock_state:  boolean
    :param session_id: the API session ID to update
    :type session_id:  string
    :raises:           RetryRequest() when the lock failed to update
    """
    if not lock_state:
        # acquire lock
        search_params = {'object_uuid': id, 'lock': lock_state}
        update_params = {'lock': not lock_state, 'session_id': session_id,
                         'created_at': func.now()}
    else:
        # release or reset lock
        search_params = {'object_uuid': id, 'lock': lock_state,
                         'session_id': session_id}
        update_params = {'lock': not lock_state, 'session_id': 0}

    rows_update = session.query(models.DFLockedObjects).\
        filter_by(**search_params).\
        update(update_params, synchronize_session='fetch')

    if not rows_update:
        LOG.info(_LI('The lock for object %(id)s in session '
                     '%(sid)s cannot be updated.'), {'id': id,
                                                     'sid': session_id})
        raise db_exc.RetryRequest(df_exc.DBLockFailed(oid=id, sid=session_id))
Exemple #2
0
 def _extract(self, resource_type, resource_id, field):
     # NOTE(salv-orlando): This check currently assumes the parent
     # resource is handled by the core plugin. It might be worth
     # having a way to map resources to plugins so to make this
     # check more general
     plugin = directory.get_plugin()
     if resource_type in service_const.EXT_PARENT_RESOURCE_MAPPING:
         plugin = directory.get_plugin(
             service_const.EXT_PARENT_RESOURCE_MAPPING[resource_type])
     f = getattr(plugin, 'get_%s' % resource_type)
     # f *must* exist, if not found it is better to let neutron
     # explode. Check will be performed with admin context
     try:
         data = f(context.get_admin_context(), resource_id, fields=[field])
     except exceptions.NotFound as e:
         # NOTE(kevinbenton): a NotFound exception can occur if a
         # list operation is happening at the same time as one of
         # the parents and its children being deleted. So we issue
         # a RetryRequest so the API will redo the lookup and the
         # problem items will be gone.
         raise db_exc.RetryRequest(e)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception('Policy check error while calling %s!', f)
     return data[field]
    def _push_metadata_software_deployments(self, cnxt, server_id, sd):
        rs = db_api.resource_get_by_physical_resource_id(cnxt, server_id)
        if not rs:
            return
        deployments = self.metadata_software_deployments(cnxt, server_id)
        md = rs.rsrc_metadata or {}
        md['deployments'] = deployments
        rows_updated = db_api.resource_update(cnxt, rs.id,
                                              {'rsrc_metadata': md},
                                              rs.atomic_key)
        if not rows_updated:
            raise db_exc.RetryRequest(
                exception.DeploymentConcurrentTransaction(server=server_id))

        metadata_put_url = None
        metadata_queue_id = None
        for rd in rs.data:
            if rd.key == 'metadata_put_url':
                metadata_put_url = rd.value
            if rd.key == 'metadata_queue_id':
                metadata_queue_id = rd.value
        if metadata_put_url:
            json_md = jsonutils.dumps(md)
            requests.put(metadata_put_url, json_md)
        if metadata_queue_id:
            project = sd.stack_user_project_id
            token = self._get_user_token(cnxt, rs, project)
            zaqar_plugin = cnxt.clients.client_plugin('zaqar')
            zaqar = zaqar_plugin.create_for_tenant(project, token)
            queue = zaqar.queue(metadata_queue_id)
            queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
Exemple #4
0
    def _ensure_default_security_group(self, context, tenant_id):
        """Create a default security group if one doesn't exist.

        :returns: the default security group id for given tenant.
        """
        # Make no more than two attempts
        for attempts in (1, 2):
            try:
                query = self._model_query(context, DefaultSecurityGroup)
                default_group = query.filter_by(tenant_id=tenant_id).one()
                return default_group['security_group_id']
            except exc.NoResultFound as ex:
                if attempts > 1:
                    # the second iteration means that attempt to add default
                    # group failed with duplicate error. Since we're still
                    # not seeing this group we're most probably inside a
                    # transaction with REPEATABLE READ isolation level ->
                    # need to restart the whole transaction
                    raise db_exc.RetryRequest(ex)

                security_group = {
                    'security_group':
                        {'name': 'default',
                         'tenant_id': tenant_id,
                         'description': _('Default security group')}
                }
                try:
                    security_group = self.create_security_group(
                        context, security_group, default_sg=True)
                    return security_group['id']
                except db_exc.DBDuplicateEntry as ex:
                    # default security group was created concurrently
                    LOG.debug("Duplicate default security group %s was "
                              "not created", ex.value)
Exemple #5
0
def record(plugin_context,
           object_type,
           object_uuid,
           operation,
           data,
           ml2_context=None):
    if (object_type == odl_const.ODL_PORT
            and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
        data = _enrich_port(plugin_context, ml2_context, object_type,
                            operation, data)

    # Calculate depending_on on other journal entries
    depending_on = dependency_validations.calculate(plugin_context.session,
                                                    operation, object_type,
                                                    object_uuid, data)

    # NOTE(mpeterson): Between the moment that a dependency is calculated and
    # the new entry is recorded in the journal, an operation can ocurr that
    # would make the dependency irrelevant. In that case we request a retry.
    # For more details, read the commit message that introduced this comment.
    try:
        db.create_pending_row(plugin_context.session,
                              object_type,
                              object_uuid,
                              operation,
                              data,
                              depending_on=depending_on)
    except exception.DBReferenceError as e:
        raise exception.RetryRequest(e)
    def test_health_check_stale_amphora(self, session_mock, get_stale_amp_mock,
                                        failover_mock, db_wait_mock):
        amphora_health = mock.MagicMock()
        amphora_health.amphora_id = AMPHORA_ID

        get_stale_amp_mock.side_effect = [amphora_health, None]

        exit_event = threading.Event()
        hm = healthmanager.HealthManager(exit_event)

        hm.health_check()

        # Test DBDeadlock and RetryRequest exceptions
        session_mock.reset_mock()
        get_stale_amp_mock.reset_mock()
        mock_session = mock.MagicMock()
        session_mock.return_value = mock_session
        get_stale_amp_mock.side_effect = [
            db_exc.DBDeadlock,
            db_exc.RetryRequest(Exception('retry_test')),
            db_exc.DBConnectionError,
            TestException('test')
        ]
        # Test that a DBDeadlock does not raise an exception
        self.assertIsNone(hm.health_check())
        # Test that a RetryRequest does not raise an exception
        self.assertIsNone(hm.health_check())
        # Test that a DBConnectionError does not raise an exception
        self.assertIsNone(hm.health_check())
        # ... and that it waits for DB reconnection
        db_wait_mock.assert_called_once()
        # Other exceptions should raise
        self.assertRaises(TestException, hm.health_check)
        self.assertEqual(4, mock_session.rollback.call_count)
Exemple #7
0
 def map_and_create_tables(self, resource_type, facade):
     with self._lock:
         # NOTE(sileht): map this resource_type to have
         # Base.metadata filled with sa.Table objects
         mappers = self.get_classes(resource_type)
         tables = [
             Base.metadata.tables[klass.__tablename__]
             for klass in mappers.values()
         ]
         try:
             with facade.writer_connection() as connection:
                 Base.metadata.create_all(connection, tables=tables)
         except exception.DBError as e:
             # HACK(jd) Sometimes, PostgreSQL raises an error such as
             # "current transaction is aborted, commands ignored until end
             # of transaction block" on its own catalog, so we need to
             # retry, but this is not caught by oslo.db as a deadlock. This
             # is likely because when we use Base.metadata.create_all(),
             # sqlalchemy itself gets an error it does not catch or
             # something. So this is paperover I guess.
             inn_e = e.inner_exception
             if (psycopg2
                     and isinstance(inn_e, sqlalchemy.exc.InternalError)
                     and isinstance(inn_e.orig, psycopg2.InternalError)
                     # current transaction is aborted
                     and inn_e.orig.pgcode == '25P02'):
                 raise exception.RetryRequest(e)
             raise
    def test_health_check_stale_amphora(self, session_mock,
                                        sleep_mock, get_stale_amp_mock,
                                        failover_mock):
        amphora_health = mock.MagicMock()
        amphora_health.amphora_id = AMPHORA_ID

        get_stale_amp_mock.side_effect = [amphora_health,
                                          None,
                                          TestException('test')]

        hm = healthmanager.HealthManager()
        self.assertRaises(TestException, hm.health_check)

        failover_mock.assert_called_once_with(AMPHORA_ID)

        # Test DBDeadlock and RetryRequest exceptions
        session_mock.reset_mock()
        get_stale_amp_mock.reset_mock()
        mock_session = mock.MagicMock()
        session_mock.return_value = mock_session
        get_stale_amp_mock.side_effect = [
            db_exc.DBDeadlock,
            db_exc.RetryRequest(Exception('retry_test')),
            TestException('test')]
        self.assertRaises(TestException, hm.health_check)
        self.assertEqual(3, mock_session.rollback.call_count)
Exemple #9
0
    def _lock_subnetpool(self):
        """Lock subnetpool associated row.

        This method disallows to allocate concurrently 2 subnets in the same
        subnetpool, it's required to ensure non-overlapping cidrs in the same
        subnetpool.
        """

        current_hash = (self._context.session.query(
            models_v2.SubnetPool.hash).filter_by(
                id=self._subnetpool['id']).scalar())
        if current_hash is None:
            # NOTE(cbrandily): subnetpool has been deleted
            raise n_exc.SubnetPoolNotFound(
                subnetpool_id=self._subnetpool['id'])
        new_hash = uuidutils.generate_uuid()

        # NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
        # to succeed: at most 1 transaction will succeed, others will be
        # rollbacked and be caught in neutron.db.v2.base
        query = self._context.session.query(models_v2.SubnetPool).filter_by(
            id=self._subnetpool['id'], hash=current_hash)
        count = query.update({'hash': new_hash})
        if not count:
            raise db_exc.RetryRequest()
Exemple #10
0
def exc_to_retry(etypes):
    try:
        yield
    except Exception as e:
        with excutils.save_and_reraise_exception() as ctx:
            if _is_nested_instance(e, etypes):
                ctx.reraise = False
                raise db_exc.RetryRequest(e)
Exemple #11
0
 def _wrapped(*args, **kwargs):
     try:
         return function(*args, **kwargs)
     except Exception as e:
         with excutils.save_and_reraise_exception() as ctx:
             if is_retriable(e):
                 ctx.reraise = False
                 raise db_exc.RetryRequest(e)
Exemple #12
0
    def unmap_and_delete_tables(self, resource_type, connection):
        if resource_type.state != "deleting":
            raise RuntimeError("unmap_and_delete_tables must be called in "
                               "state deleting")

        mappers = self.get_classes(resource_type)
        del self._cache[resource_type.tablename]

        tables = [
            Base.metadata.tables[klass.__tablename__]
            for klass in mappers.values()
        ]

        if connection is not None:
            # NOTE(sileht): Base.metadata.drop_all doesn't
            # issue CASCADE stuffs correctly at least on postgresql
            # We drop foreign keys manually to not lock the destination
            # table for too long during drop table.
            # It's safe to not use a transaction since
            # the resource_type table is already cleaned and commited
            # so this code cannot be triggerred anymore for this
            # resource_type
            try:
                for table in tables:
                    for fk in table.foreign_key_constraints:
                        try:
                            self._safe_execute(
                                connection,
                                sqlalchemy.schema.DropConstraint(fk))
                        except exception.DBNonExistentConstraint:
                            pass
                for table in tables:
                    try:
                        self._safe_execute(connection,
                                           sqlalchemy.schema.DropTable(table))
                    except exception.DBNonExistentTable:
                        pass
            except exception.DBError as e:
                if self._is_current_transaction_aborted(e):
                    raise exception.RetryRequest(e)
                raise

            # NOTE(sileht): If something goes wrong here, we are currently
            # f****d, that why we expose the state to the superuser.
            # TODO(sileht): The idea is to make the delete resource_type more
            # like a cleanup method, I mean we should don't fail if the
            # constraint have already been dropped or the table have already
            # been deleted. So, when the superuser have fixed it's backend
            # issue, it can rerun 'DELETE ../resource_type/foobar' even the
            # state is already error and if we are sure all underlying
            # resources have been cleaned we really deleted the resource_type.

        # TODO(sileht): Remove this resource on other workers
        # by using expiration on cache ?
        for table in tables:
            Base.metadata.remove(table)
Exemple #13
0
    def _update_params(self, query, ref, filters, params, failed_exception):
        update_filters = {}
        for f in filters:
            update_filters[f] = getattr(ref, f)
        rows_update = query.filter_by(**update_filters).\
            update(params, synchronize_session='evaluate')

        if not rows_update:
            LOG.debug('The row was updated in a concurrent transaction, '
                      'we will fetch another one')
            raise db_exc.RetryRequest(failed_exception)
Exemple #14
0
 def test_router_gateway_set_retry(self):
     with self.router() as r, self.subnet() as s:
         ext_net_id = s['subnet']['network_id']
         self._set_net_external(ext_net_id)
         with mock.patch.object(
                 l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',
                 side_effect=[db_exc.RetryRequest(None), ext_net_id]):
             self._set_router_external_gateway(r['router']['id'],
                                               ext_net_id)
         res = self._show('routers', r['router']['id'])['router']
         self.assertEqual(ext_net_id,
                          res['external_gateway_info']['network_id'])
Exemple #15
0
    def allocate_partially_specified_segment(self, context, **filters):
        """Allocate model segment from pool partially specified by filters.

        Return allocated db object or None.
        """

        network_type = self.get_type()
        session, ctx_manager = self._get_session(context)
        with ctx_manager:
            queries = (self.build_segment_queries_for_tenant_and_shared_ranges(
                session, **filters) if directory.get_plugin(
                    plugin_constants.NETWORK_SEGMENT_RANGE) else
                       self.build_segment_query(session, **filters))

            for select in queries:
                # Selected segment can be allocated before update by someone
                # else
                allocs = select.limit(IDPOOL_SELECT_SIZE).all()

                if not allocs:
                    # No resource available
                    continue

                alloc = random.choice(allocs)
                raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
                LOG.debug(
                    "%(type)s segment allocate from pool "
                    "started with %(segment)s ", {
                        "type": network_type,
                        "segment": raw_segment
                    })
                count = (session.query(self.model).filter_by(
                    allocated=False, **raw_segment).update({"allocated":
                                                            True}))
                if count:
                    LOG.debug(
                        "%(type)s segment allocate from pool "
                        "success with %(segment)s ", {
                            "type": network_type,
                            "segment": raw_segment
                        })
                    return alloc

                # Segment allocated since select
                LOG.debug(
                    "Allocate %(type)s segment from pool "
                    "failed with segment %(segment)s", {
                        "type": network_type,
                        "segment": raw_segment
                    })
                # saving real exception in case we exceeded amount of attempts
                raise db_exc.RetryRequest(
                    exceptions.NoNetworkFoundInMaximumAllowedAttempts())
Exemple #16
0
    def delete_range(self, session, db_range):
        """Return count of deleted ranges

        :param session: database session
        :param db_range: IpamAvailabilityRange db object
        """
        try:
            return session.query(db_models.IpamAvailabilityRange).filter_by(
                allocation_pool_id=db_range.allocation_pool_id).filter_by(
                    first_ip=db_range.first_ip).filter_by(
                        last_ip=db_range.last_ip).delete()
        except orm_exc.ObjectDeletedError:
            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
Exemple #17
0
 def test__cleanup_handles_failures(self):
     retry_then_notfound = (
         [db_exc.RetryRequest(ValueError())] +
         [n_exc.NotFound()] * 10
     )
     self.mixin._l3_plugin.remove_router_interface.side_effect = (
         retry_then_notfound)
     self.mixin._l3_plugin.delete_router.side_effect = (
         retry_then_notfound)
     self.mixin._core_plugin.delete_network.side_effect = (
         retry_then_notfound)
     self.mixin._cleanup(self.ctx, network_id=44, router_id=45,
                         subnets=[{'id': 46}])
Exemple #18
0
def exc_to_retry(etypes):
    """Contextually reraise Exceptions as a RetryRequests.

    :param etypes: The class type to check the exception for.
    :returns: None
    :raises: A RetryRequest if any exception is caught in the context
        is a nested instance of etypes.
    """
    try:
        yield
    except Exception as e:
        with excutils.save_and_reraise_exception() as ctx:
            if _is_nested_instance(e, etypes):
                ctx.reraise = False
                raise db_exc.RetryRequest(e)
Exemple #19
0
    def allocate_partially_specified_segment(self, context, **filters):
        """Allocate model segment from pool partially specified by filters.

        Return allocated db object or None.
        """

        network_type = self.get_type()
        session = self._get_session(context)
        with session.begin(subtransactions=True):
            select = (session.query(self.model).filter_by(allocated=False,
                                                          **filters))

            # Selected segment can be allocated before update by someone else,
            allocs = select.limit(IDPOOL_SELECT_SIZE).all()

            if not allocs:
                # No resource available
                return

            alloc = random.choice(allocs)
            raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
            LOG.debug(
                "%(type)s segment allocate from pool "
                "started with %(segment)s ", {
                    "type": network_type,
                    "segment": raw_segment
                })
            count = (session.query(self.model).filter_by(
                allocated=False, **raw_segment).update({"allocated": True}))
            if count:
                LOG.debug(
                    "%(type)s segment allocate from pool "
                    "success with %(segment)s ", {
                        "type": network_type,
                        "segment": raw_segment
                    })
                return alloc

            # Segment allocated since select
            LOG.debug(
                "Allocate %(type)s segment from pool "
                "failed with segment %(segment)s", {
                    "type": network_type,
                    "segment": raw_segment
                })
            # saving real exception in case we exceeded amount of attempts
            raise db_exc.RetryRequest(
                exc.NoNetworkFoundInMaximumAllowedAttempts())
Exemple #20
0
 def _precommit_router_create(self, resource, event, trigger, context,
                              router, router_db, **kwargs):
     """Event handler to set ha flag and status on creation."""
     is_ha = self._is_ha(router)
     router['ha'] = is_ha
     self.set_extra_attr_value(context, router_db, 'ha', is_ha)
     if not is_ha:
         return
     # This will throw an exception if there aren't enough agents to
     # handle this HA router
     self.get_number_of_agents_for_scheduling(context)
     ha_net = self.get_ha_network(context, router['tenant_id'])
     if not ha_net:
         # net was deleted, throw a retry to start over to create another
         raise db_exc.RetryRequest(
             l3ha_exc.HANetworkConcurrentDeletion(
                 tenant_id=router['tenant_id']))
Exemple #21
0
    def allocate_partially_specified_segment(self, context, **filters):
        """Allocate model segment from pool partially specified by filters.

        Return allocated db object or None.
        """
        network_type = self.get_type()
        if directory.get_plugin(plugin_constants.NETWORK_SEGMENT_RANGE):
            calls = [
                functools.partial(
                    ns_range.NetworkSegmentRange.get_segments_for_project,
                    context, self.model, network_type,
                    self.model_segmentation_id, **filters),
                functools.partial(
                    ns_range.NetworkSegmentRange.get_segments_shared, context,
                    self.model, network_type, self.model_segmentation_id,
                    **filters)
            ]
        else:
            calls = [
                functools.partial(
                    self.segmentation_obj.get_random_unallocated_segment,
                    context, **filters)
            ]

        try_to_allocate = False
        for call in calls:
            allocations = call()
            if not isinstance(allocations, list):
                allocations = [allocations] if allocations else []
            for alloc in allocations:
                segment = dict((k, alloc[k]) for k in self.primary_keys)
                try_to_allocate = True
                if self.segmentation_obj.allocate(context, **segment):
                    LOG.debug(
                        '%(type)s segment allocate from pool success '
                        'with %(segment)s ', {
                            'type': network_type,
                            'segment': segment
                        })
                    return alloc

        if try_to_allocate:
            raise db_exc.RetryRequest(
                exceptions.NoNetworkFoundInMaximumAllowedAttempts())
 def _create_default_security_group(self, context, tenant_id):
     security_group = {
         'security_group':
             {'name': 'default',
              'tenant_id': tenant_id,
              'description': _('Default security group')}
     }
     try:
         security_group = self.create_security_group(
             context, security_group, default_sg=True)
         return security_group['id']
     except db_exc.DBDuplicateEntry as ex:
         # default security group was created concurrently
         LOG.debug("Duplicate default security group %s was "
                   "not created", ex.value)
         # raise a retry request to restart the whole process since
         # we could be in a REPEATABLE READ isolation level and won't
         # be able to see the SG group in this transaction.
         raise db_exc.RetryRequest(ex)
Exemple #23
0
    def delete_network(self, context, id):
        LOG.debug("MidonetPluginV2.delete_network called: id=%r", id)

        with context.session.begin(subtransactions=True):
            c_utils.check_delete_network_precommit(context, id)
            self._process_l3_delete(context, id)
            try:
                super(MidonetPluginV2, self).delete_network(context, id)
            except n_exc.NetworkInUse as ex:
                LOG.warning(_LW("Error deleting network %(net)s, retrying..."),
                            {'net': id})
                # Contention for DHCP port deletion and network deletion occur
                # often which leads to NetworkInUse error.  Retry to get
                # around this problem.
                raise oslo_db_exc.RetryRequest(ex)

            self.client.delete_network_precommit(context, id)

        self.client.delete_network_postcommit(id)

        LOG.debug("MidonetPluginV2.delete_network exiting: id=%r", id)
Exemple #24
0
    def delete(self, context):
        stack_ids = self._get_node_instance_stacks(context.plugin_session,
                                                   context.current_node['id'],
                                                   context.instance['id'])
        heatclient = self._get_heat_client(context.plugin_context)

        for stack in stack_ids:
            vip_port_id = None
            try:
                rstr = heatclient.client.resources.get(stack_ids[0].stack_id,
                                                       'loadbalancer')
                vip_port_id = rstr.attributes['vip_port_id']
            except heat_exc.HTTPNotFound:
                # stack not found, so no need to process any further
                pass
            heatclient.delete(stack.stack_id)
            if vip_port_id:
                for x in range(0, DELETE_VIP_PORT_RETRIES):
                    # We intentionally get a new session so as to be
                    # able to read the updated DB
                    session = db_api.get_reader_session()
                    vip_port = session.query(
                        ndb.Port).filter_by(id=vip_port_id).all()
                    if vip_port:
                        # heat stack delete is not finished yet, so try again
                        LOG.debug(("VIP port %s is not yet deleted"), vip_port)
                        LOG.debug(("Retry attempt; %s"), x + 1)
                        # Stack delete will at least take some minimal amount
                        # of time, hence we wait a little bit.
                        time.sleep(STACK_ACTION_WAIT_TIME)
                    else:
                        # we force a retry so that a new session can be
                        # used that will correctly reflect the VIP port as
                        # deleted and hence allow the subsequent policy driver
                        # to delete the VIP subnet
                        raise db_exc.RetryRequest(Exception)

        self._delete_node_instance_stack_in_db(context.plugin_session,
                                               context.current_node['id'],
                                               context.instance['id'])
Exemple #25
0
    def map_and_create_tables(self, resource_type, facade):
        if resource_type.state != "creating":
            raise RuntimeError("map_and_create_tables must be called in state "
                               "creating")

        mappers = self.get_classes(resource_type)
        tables = [
            Base.metadata.tables[klass.__tablename__]
            for klass in mappers.values()
        ]

        try:
            with facade.writer_connection() as connection:
                Base.metadata.create_all(connection, tables=tables)
        except exception.DBError as e:
            if self._is_current_transaction_aborted(e):
                raise exception.RetryRequest(e)
            raise

        # NOTE(sileht): no need to protect the _cache with a lock
        # get_classes cannot be called in state creating
        self._cache[resource_type.tablename] = mappers
Exemple #26
0
    def update_range(self, session, db_range, first_ip=None, last_ip=None):
        """Updates db_range to have new first_ip and last_ip.

        :param session: database session
        :param db_range: IpamAvailabilityRange db object
        :param first_ip: first ip address in range
        :param last_ip: last ip address in range
        :return: count of updated rows
        """
        opts = {}
        if first_ip:
            opts['first_ip'] = str(first_ip)
        if last_ip:
            opts['last_ip'] = str(last_ip)
        if not opts:
            raise ipam_exc.IpamAvailabilityRangeNoChanges()
        try:
            return session.query(db_models.IpamAvailabilityRange).filter_by(
                allocation_pool_id=db_range.allocation_pool_id).filter_by(
                    first_ip=db_range.first_ip).filter_by(
                        last_ip=db_range.last_ip).update(opts)
        except orm_exc.ObjectDeletedError:
            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
Exemple #27
0
def record(plugin_context,
           object_type,
           object_uuid,
           operation,
           data,
           ml2_context=None):
    if (object_type == odl_const.ODL_PORT
            and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
        data = _enrich_port(plugin_context, ml2_context, object_type,
                            operation, data)

    # Calculate depending_on on other journal entries
    depending_on = dependency_validations.calculate(plugin_context, operation,
                                                    object_type, object_uuid,
                                                    data)

    # NOTE(mpeterson): Between the moment that a dependency is calculated and
    # the new entry is recorded in the journal, an operation can ocurr that
    # would make the dependency irrelevant. In that case we request a retry.
    # For more details, read the commit message that introduced this comment.
    try:
        entry = db.create_pending_row(plugin_context,
                                      object_type,
                                      object_uuid,
                                      operation,
                                      data,
                                      depending_on=depending_on)
    except exception.DBReferenceError as e:
        raise exception.RetryRequest(e)

    _log_entry(LOG_RECORDED, entry)
    LOG.debug(
        'Entry with ID %(entry_id)s depends on these entries: '
        '%(depending_on)s', {
            'entry_id': entry.seqnum,
            'depending_on': [d.seqnum for d in depending_on]
        })
Exemple #28
0
 def test_multi_exception_contains_retry(self):
     e = exceptions.MultipleExceptions(
         [ValueError(), db_exc.RetryRequest(TypeError())])
     self.assertIsNone(self._decorated_function(1, e))
Exemple #29
0
    def _allocate_specific_ip(self,
                              session,
                              ip_address,
                              allocation_pool_id=None,
                              auto_generated=False):
        """Remove an IP address from subnet's availability ranges.

        This method is supposed to be called from within a database
        transaction, otherwise atomicity and integrity might not be
        enforced and the operation might result in incosistent availability
        ranges for the subnet.

        :param session: database session
        :param ip_address: ip address to mark as allocated
        :param allocation_pool_id: identifier of the allocation pool from
             which the ip address has been extracted. If not specified this
             routine will scan all allocation pools.
        :param auto_generated: indicates whether ip was auto generated
        :returns: list of IP ranges as instances of IPAvailabilityRange
        """
        # Return immediately for EUI-64 addresses. For this
        # class of subnets availability ranges do not apply
        if ipv6_utils.is_eui64_address(ip_address):
            return

        LOG.debug(
            "Removing %(ip_address)s from availability ranges for "
            "subnet id:%(subnet_id)s", {
                'ip_address': ip_address,
                'subnet_id': self.subnet_manager.neutron_id
            })
        # Netaddr's IPRange and IPSet objects work very well even with very
        # large subnets, including IPv6 ones.
        final_ranges = []
        ip_in_pools = False
        if allocation_pool_id:
            av_ranges = self.subnet_manager.list_ranges_by_allocation_pool(
                session, allocation_pool_id)
        else:
            av_ranges = self.subnet_manager.list_ranges_by_subnet_id(session)
        for db_range in av_ranges:
            initial_ip_set = netaddr.IPSet(
                netaddr.IPRange(db_range['first_ip'], db_range['last_ip']))
            final_ip_set = initial_ip_set - netaddr.IPSet([ip_address])
            if not final_ip_set:
                ip_in_pools = True
                # Range exhausted - bye bye
                if not self.subnet_manager.delete_range(session, db_range):
                    raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
                continue
            if initial_ip_set == final_ip_set:
                # IP address does not fall within the current range, move
                # to the next one
                final_ranges.append(db_range)
                continue
            ip_in_pools = True
            for new_range in final_ip_set.iter_ipranges():
                # store new range in database
                # use netaddr.IPAddress format() method which is equivalent
                # to str(...) but also enables us to use different
                # representation formats (if needed) for IPv6.
                first_ip = netaddr.IPAddress(new_range.first)
                last_ip = netaddr.IPAddress(new_range.last)
                if (db_range['first_ip'] == first_ip.format()
                        or db_range['last_ip'] == last_ip.format()):
                    rows = self.subnet_manager.update_range(session,
                                                            db_range,
                                                            first_ip=first_ip,
                                                            last_ip=last_ip)
                    if not rows:
                        raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
                    LOG.debug("Adjusted availability range for pool %s",
                              db_range['allocation_pool_id'])
                    final_ranges.append(db_range)
                else:
                    new_ip_range = self.subnet_manager.create_range(
                        session, db_range['allocation_pool_id'],
                        first_ip.format(), last_ip.format())
                    LOG.debug("Created availability range for pool %s",
                              new_ip_range['allocation_pool_id'])
                    final_ranges.append(new_ip_range)

        # If ip is autogenerated it should be present in allocation pools,
        # so retry if it is not there
        if auto_generated and not ip_in_pools:
            raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed)
        # Most callers might ignore this return value, which is however
        # useful for testing purposes
        LOG.debug(
            "Availability ranges for subnet id %(subnet_id)s "
            "modified: %(new_ranges)s", {
                'subnet_id':
                self.subnet_manager.neutron_id,
                'new_ranges':
                ", ".join([
                    "[%s; %s]" % (r['first_ip'], r['last_ip'])
                    for r in final_ranges
                ])
            })
        return final_ranges
 def new_create(*args, **kwargs):
     if not self._create_failed:
         self._create_failed = True
         raise db_exc.RetryRequest(ValueError())
     return orig(*args, **kwargs)