Example #1
0
    def update_port_status(self, context, port_id, status):
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self,
                    context,
                    updated_port,
                    network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return True
Example #2
0
    def update_port_status(self, context, port_id, status, host=None):
        """
        Returns port_id (non-truncated uuid) if the port exists.
        Otherwise returns None.
        """
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return None
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network, port.port_binding,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return port['id']
Example #3
0
    def update_port_status(self, context, port_id, status):
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return True
Example #4
0
 def delete_flowrule(self, context, id):
     '''
     called by a DELETE to /flowrules/id
     in here 3 methods are called:
         delete_flowrule_precommit -> mechanism driver method (it should perform some delete calls in the "flowmods" DB table the flowmods to be instanciated
         delete_flowrule -> calls delete_flowrule in db_base_plugin_v2.py (it deletes the flowrule from the "flowrules" table plus some checks)
         delete_flowrule_postcommit -> mechanism driver method to sync all flowmods with ODL
     '''
     # The id is the unique identifier that can be used to delete
     # the row entry of your database.
     LOG.debug(_("delete_flowrule with id "+str(id)))
     #no return value required
     
     session = context.session
     with contextlib.nested(lockutils.lock('db-access'),
                            session.begin(subtransactions=True)):
         flowrule = self.get_flowrule(context, id)
         mech_context = driver_context.FlowruleContext(self,
                                                      context,
                                                      flowrule)
         self.mechanism_manager.delete_flowrule_precommit(mech_context)
         
         try: #check if the flowrule exists in the DB
             flowrule_db = (session.query(models_v2.Flowrule).
                        enable_eagerloads(False).
                        filter_by(id=id).with_lockmode('update').one())
         except sa_exc.NoResultFound:
             LOG.debug(_("The flowrule '%s' does not exist anymore"), id)
             return
         flowrule = self._make_flowrule_dict(flowrule_db)
         super(Ml2Plugin, self).delete_flowrule(context, id)
         
         #TBD move out of the transaction
         self.mechanism_manager.delete_flowrule_postcommit(mech_context)
Example #5
0
    def _txn_from_context(self, context, tag="<unset>"):
        """Context manager: opens a DB transaction against the given context.

        If required, this also takes the Neutron-wide db-access semaphore.

        :return: context manager for use with with:.
        """
        session = context.session
        conn_url = str(session.connection().engine.url).lower()
        if (conn_url.startswith("mysql:") or
                conn_url.startswith("mysql+mysqldb:")):
            # Neutron is using the mysqldb driver for accessing the database.
            # This has a known incompatibility with eventlet that leads to
            # deadlock.  Take the neutron-wide db-access lock as a workaround.
            # See https://bugs.launchpad.net/oslo.db/+bug/1350149 for a
            # description of the issue.
            LOG.debug("Waiting for db-access lock tag=%s...", tag)
            try:
                with lockutils.lock('db-access'):
                    LOG.debug("...acquired db-access lock tag=%s", tag)
                    with context.session.begin(subtransactions=True) as txn:
                        yield txn
            finally:
                LOG.debug("Released db-access lock tag=%s", tag)
        else:
            # Liberty or later uses an eventlet-safe mysql library.  (Or, we're
            # not using mysql at all.)
            LOG.debug("Not using mysqldb driver, skipping db-access lock")
            with context.session.begin(subtransactions=True) as txn:
                yield txn
Example #6
0
    def _set(self, key, value, ttl=0, not_exists=False):
        with lockutils.lock(key):

            # NOTE(flaper87): This is needed just in `set`
            # calls, hence it's not in `_set_unlocked`
            if not_exists and self._exists_unlocked(key):
                return False

            self._set_unlocked(key, value, ttl)
            return True
Example #7
0
    def _set(self, key, value, ttl=0, not_exists=False):
        with lockutils.lock(key):

            # NOTE(flaper87): This is needed just in `set`
            # calls, hence it's not in `_set_unlocked`
            if not_exists and self._exists_unlocked(key):
                return False

            self._set_unlocked(key, value, ttl)
            return True
Example #8
0
    def _apply(self):
        lock_name = "iptables"
        if self.namespace:
            lock_name += "-" + self.namespace

        try:
            with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
                LOG.debug('Got semaphore / lock "%s"', lock_name)
                return self._apply_synchronized()
        finally:
            LOG.debug('Semaphore / lock released "%s"', lock_name)
Example #9
0
    def _incr_append(self, key, other):
        with lockutils.lock(key):
            timeout, value = self._get_unlocked(key)

            if value is None:
                return None

            ttl = timeutils.utcnow_ts() - timeout
            new_value = value + other
            self._set_unlocked(key, new_value, ttl)
            return new_value
Example #10
0
    def _incr_append(self, key, other):
        with lockutils.lock(key):
            timeout, value = self._get_unlocked(key)

            if value is None:
                return None

            ttl = timeutils.utcnow_ts() - timeout
            new_value = value + other
            self._set_unlocked(key, new_value, ttl)
            return new_value
Example #11
0
    def _apply(self):
        lock_name = 'iptables'
        if self.namespace:
            lock_name += '-' + self.namespace

        try:
            with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
                LOG.debug('Got semaphore / lock "%s"', lock_name)
                return self._apply_synchronized()
        finally:
            LOG.debug('Semaphore / lock released "%s"', lock_name)
Example #12
0
    def update_port(self, context, id, port):
        attrs = port['port']
        need_port_update_notify = False

        session = context.session

        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port_db, binding = db.get_locked_port_and_binding(session, id)
            if not port_db:
                raise exc.PortNotFound(port_id=id)
            original_port = self._make_port_dict(port_db)
            updated_port = super(Ml2Plugin, self).update_port(context, id,
                                                              port)
            if addr_pair.ADDRESS_PAIRS in port['port']:
                need_port_update_notify |= (
                    self.update_address_pairs_on_port(context, id, port,
                                                      original_port,
                                                      updated_port))
            need_port_update_notify |= self.update_security_group_on_port(
                context, id, port, original_port, updated_port)
            network = self.get_network(context, original_port['network_id'])
            need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
                context, id, port, updated_port)
            mech_context = driver_context.PortContext(
                self, context, updated_port, network, binding,
                original_port=original_port)
            need_port_update_notify |= self._process_port_binding(
                mech_context, attrs)
            self.mechanism_manager.update_port_precommit(mech_context)

        # TODO(apech) - handle errors raised by update_port, potentially
        # by re-calling update_port with the previous attributes. For
        # now the error is propogated to the caller, which is expected to
        # either undo/retry the operation or delete the resource.
        self.mechanism_manager.update_port_postcommit(mech_context)

        need_port_update_notify |= self.is_security_group_member_updated(
            context, original_port, updated_port)

        if original_port['admin_state_up'] != updated_port['admin_state_up']:
            need_port_update_notify = True

        bound_port = self._bind_port_if_needed(
            mech_context,
            allow_notify=True,
            need_notify=need_port_update_notify)
        return bound_port._port
Example #13
0
    def instance(cls, l3_agent):
        """Creates instance (singleton) of service.

        Do not directly call this for the base class. Instead, it should be
        called by a child class, that represents a specific service type.

        This ensures that only one instance is created for all agents of a
        specific service type.
        """
        if not cls._instance:
            with lockutils.lock('instance'):
                if not cls._instance:
                    cls._instance = cls(l3_agent)

        return cls._instance
Example #14
0
def get_client(context, admin=False):
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise neutron_client_exc.Unauthorized()
Example #15
0
def get_client(context, admin=False):
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise neutron_client_exc.Unauthorized()
Example #16
0
    def delete_port(self, context, id, l3_port_check=True):
        LOG.debug(_("Deleting port %s"), id)
        l3plugin = manager.NeutronManager.get_service_plugins().get(
            service_constants.L3_ROUTER_NAT)
        if l3plugin and l3_port_check:
            l3plugin.prevent_l3_port_deletion(context, id)

        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            try:
                port_db = (session.query(
                    models_v2.Port).enable_eagerloads(False).filter_by(
                        id=id).with_lockmode('update').one())
            except sa_exc.NoResultFound:
                # the port existed when l3plugin.prevent_l3_port_deletion
                # was called but now is already gone
                LOG.debug(_("The port '%s' was deleted"), id)
                return
            port = self._make_port_dict(port_db)

            network = self.get_network(context, port['network_id'])
            mech_context = driver_context.PortContext(self, context, port,
                                                      network)
            self.mechanism_manager.delete_port_precommit(mech_context)
            self._delete_port_security_group_bindings(context, id)
            LOG.debug(_("Calling base delete_port"))
            if l3plugin:
                router_ids = l3plugin.disassociate_floatingips(context,
                                                               id,
                                                               do_notify=False)

            super(Ml2Plugin, self).delete_port(context, id)

        # now that we've left db transaction, we are safe to notify
        if l3plugin:
            l3plugin.notify_routers_updated(context, router_ids)

        try:
            self.mechanism_manager.delete_port_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the port.  Ideally we'd notify the caller of the
            # fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
        self.notify_security_groups_member_updated(context, port)
Example #17
0
    def delete_port(self, context, id, l3_port_check=True):
        LOG.debug(_("Deleting port %s"), id)
        l3plugin = manager.NeutronManager.get_service_plugins().get(
            service_constants.L3_ROUTER_NAT)
        if l3plugin and l3_port_check:
            l3plugin.prevent_l3_port_deletion(context, id)

        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            try:
                port_db = (session.query(models_v2.Port).
                           enable_eagerloads(False).
                           filter_by(id=id).with_lockmode('update').one())
            except sa_exc.NoResultFound:
                # the port existed when l3plugin.prevent_l3_port_deletion
                # was called but now is already gone
                LOG.debug(_("The port '%s' was deleted"), id)
                return
            port = self._make_port_dict(port_db)

            network = self.get_network(context, port['network_id'])
            mech_context = driver_context.PortContext(self, context, port,
                                                      network)
            self.mechanism_manager.delete_port_precommit(mech_context)
            self._delete_port_security_group_bindings(context, id)
            LOG.debug(_("Calling base delete_port"))
            if l3plugin:
                router_ids = l3plugin.disassociate_floatingips(
                    context, id, do_notify=False)

            super(Ml2Plugin, self).delete_port(context, id)

        # now that we've left db transaction, we are safe to notify
        if l3plugin:
            l3plugin.notify_routers_updated(context, router_ids)

        try:
            self.mechanism_manager.delete_port_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the port.  Ideally we'd notify the caller of the
            # fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
        self.notify_security_groups_member_updated(context, port)
Example #18
0
    def delete_port(self, context, id, l3_port_check=True):
        LOG.debug(_("Deleting port %s"), id)
        l3plugin = manager.NeutronManager.get_service_plugins().get(
            service_constants.L3_ROUTER_NAT)
        if l3plugin and l3_port_check:
            l3plugin.prevent_l3_port_deletion(context, id)

        session = context.session
        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port_db, binding = db.get_locked_port_and_binding(session, id)
            if not port_db:
                # the port existed when l3plugin.prevent_l3_port_deletion
                # was called but now is already gone
                LOG.debug(_("The port '%s' was deleted"), id)
                return
            port = self._make_port_dict(port_db)

            network = self.get_network(context, port['network_id'])
            mech_context = driver_context.PortContext(self, context, port,
                                                      network, binding)
            self.mechanism_manager.delete_port_precommit(mech_context)
            self._delete_port_security_group_bindings(context, id)
            LOG.debug(_("Calling base delete_port"))
            if l3plugin:
                router_ids = l3plugin.disassociate_floatingips(
                    context, id, do_notify=False)

            super(Ml2Plugin, self).delete_port(context, id)

        # now that we've left db transaction, we are safe to notify
        if l3plugin:
            l3plugin.notify_routers_updated(context, router_ids)

        try:
            self.mechanism_manager.delete_port_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the port.  Ideally we'd notify the caller of the
            # fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
        self.notify_security_groups_member_updated(context, port)
Example #19
0
    def _run_commands(self, port, commands):
        num_tries = 0
        max_tries = 1 + self._config.auth_failure_retries
        sleep_time = self._config.auth_failure_retry_interval

        while True:
            num_tries += 1
            try:
                # we must lock during switch communication here because we run
                # the save commands in a separate greenthread.
                with lockutils.lock('CiscoDriver-%s' % (port.switch_host),
                                    lock_file_prefix='neutron-'):
                    return self._run_commands_inner(port, commands)
            except CiscoException as err:
                if (num_tries == max_tries or not self._retryable_error(err)):
                    raise
                LOG.warning("Received retryable failure: %s" % err)
            time.sleep(sleep_time)
Example #20
0
    def _run_commands(self, port, commands):
        num_tries = 0
        max_tries = 1 + self._config.auth_failure_retries
        sleep_time = self._config.auth_failure_retry_interval

        while True:
            num_tries += 1
            try:
                # we must lock during switch communication here because we run
                # the save commands in a separate greenthread.
                with lockutils.lock('CiscoDriver-%s' % (port.switch_host),
                                    lock_file_prefix='neutron-'):
                    return self._run_commands_inner(port, commands)
            except CiscoException as err:
                if (num_tries == max_tries or not self._retryable_error(err)):
                    raise
                LOG.warning("Received retryable failure: %s" % err)
            time.sleep(sleep_time)
 def delete_listener(self, context, id):
     with contextlib.nested(lockutils.lock('db-access'),
                            context.session.begin(subtransactions=True)):
         listener_db_entry = self._get_resource(context, models.Listener, id)
         #if listener_db_entry.admin_state_up:
         #    filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id],
         #               'admin_state_up': [True]}
         #    all_filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id]}
         #    all_listeners = self._get_resources(context,
         #                                        models.Listener,
         #                                        filters=all_filters)
         #    if len(all_listeners)>1:
         #        up_listeners = self._get_resources(context,
         #                                            models.Listener,
         #                                            filters=filters)
         #        if len(up_listeners)<=1:
         #            raise loadbalancerv2.OneListenerAdminStateUpAtLeast(
         #                lb_id=listener_db_entry.loadbalancer_id)
         context.session.delete(listener_db_entry)
    def update_listener(self, context, id, listener):

        with contextlib.nested(lockutils.lock('db-access'),
                               context.session.begin(subtransactions=True)):
            listener_db = self._get_resource(context, models.Listener, id)
            admin_enable = listener.get('admin_state_up')
            if (admin_enable is not None and admin_enable==False
               and listener_db.admin_state_up==True):
                filters = {'loadbalancer_id': [listener_db.loadbalancer_id],
                           'admin_state_up': [True]}
                up_listeners = self._get_resources(context,
                                                    models.Listener,
                                                    filters=filters)
                if len(up_listeners)<=1 :
                    raise loadbalancerv2.OneListenerAdminStateUpAtLeast(
                        lb_id=listener_db.loadbalancer_id)

            pool_id = listener.get('default_pool_id')
            lb_id = listener.get('loadbalancer_id')

            # Do not allow changing loadbalancer ids
            if listener_db.loadbalancer_id and lb_id:
                raise loadbalancerv2.AttributeIDImmutable(
                    attribute='loadbalancer_id')
            # Do not allow changing pool ids
            #if listener_db.default_pool_id and pool_id:
            #    raise loadbalancerv2.AttributeIDImmutable(
            #        attribute='default_pool_id')
            if lb_id:
                if not self._resource_exists(context, models.LoadBalancer,
                                             lb_id):
                    raise loadbalancerv2.EntityNotFound(
                        name=models.LoadBalancer.NAME, id=lb_id)
            loadbalancer_db = listener_db.loadbalancer
            if pool_id:
                if not self._resource_exists(context, models.PoolV2, pool_id):
                    raise loadbalancerv2.EntityNotFound(
                        name=models.PoolV2.NAME, id=pool_id)
                pool = self._get_resource(context, models.PoolV2, pool_id)
                if pool.subnet_id:
                    if loadbalancer_db.vip_subnet_id != pool.subnet_id:
                        raise loadbalancerv2.LoadBalancerPoolSubnetMismatch()
                else:
                    if loadbalancer_db.vip_subnet_id:
                        raise loadbalancerv2.LoadBalancerPoolSubnetMismatch()
                    elif loadbalancer_db.vip_network_id!=pool.network_id:
                        raise loadbalancerv2.LoadBalancerPoolNetworkMismatch()

                protocol = listener.get('protocol') or listener_db.protocol
                if pool.protocol != protocol:
                    raise loadbalancerv2.ListenerPoolProtocolMismatch(
                        listener_proto=protocol,
                        pool_proto=pool.protocol)
                filters = {'default_pool_id': [pool_id]}
                listenerpools = self._get_resources(context,
                                                    models.Listener,
                                                    filters=filters)
                if listenerpools:
                    if listenerpools[0].id!=id:
                        raise loadbalancerv2.EntityInUse(
                            entity_using=models.Listener.NAME,
                            id=listenerpools[0].id,
                            entity_in_use=models.PoolV2.NAME)

                filters = {'redirect_pool_id': [pool_id]}
                l7policypools = self._get_resources(context,
                                                    models.L7Policy,
                                                    filters=filters)
                if l7policypools:
                    raise loadbalancerv2.EntityInUse(
                        entity_using=models.L7Policy.NAME,
                        id=l7policypools[0].id,
                        entity_in_use=models.PoolV2.NAME)

                if (listener_db.default_pool_id and
                       listener_db.default_pool_id != pool_id):
                    self.update_status(context, models.PoolV2,
                        listener_db.default_pool_id, constants.DEFERRED)
            else:
                #Only if the default_pool_id exists and set to None
                if 'default_pool_id' in listener:
                    listener['default_pool_id'] =  None
                    listener['default_pool'] = None
            listener_db.update(listener)
        context.session.refresh(listener_db)
        return data_models.Listener.from_sqlalchemy_model(listener_db)
Example #23
0
    def delete_network(self, context, id):
        # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
        # function is not used because it auto-deletes ports and
        # subnets from the DB without invoking the derived class's
        # delete_port() or delete_subnet(), preventing mechanism
        # drivers from being called. This approach should be revisited
        # when the API layer is reworked during icehouse.

        LOG.debug(_("Deleting network %s"), id)
        session = context.session
        while True:
            try:
                # REVISIT(rkukura): Its not clear that
                # with_lockmode('update') is really needed in this
                # transaction, and if not, the semaphore can also be
                # removed.
                #
                # REVISIT: Serialize this operation with a semaphore
                # to prevent deadlock waiting to acquire a DB lock
                # held by another thread in the same process, leading
                # to 'lock wait timeout' errors.
                with contextlib.nested(lockutils.lock('db-access'),
                                       session.begin(subtransactions=True)):
                    self._process_l3_delete(context, id)
                    # Get ports to auto-delete.
                    ports = (session.query(models_v2.Port).
                             enable_eagerloads(False).
                             filter_by(network_id=id).
                             with_lockmode('update').all())
                    LOG.debug(_("Ports to auto-delete: %s"), ports)
                    only_auto_del = all(p.device_owner
                                        in db_base_plugin_v2.
                                        AUTO_DELETE_PORT_OWNERS
                                        for p in ports)
                    if not only_auto_del:
                        LOG.debug(_("Tenant-owned ports exist"))
                        raise exc.NetworkInUse(net_id=id)

                    # Get subnets to auto-delete.
                    subnets = (session.query(models_v2.Subnet).
                               enable_eagerloads(False).
                               filter_by(network_id=id).
                               with_lockmode('update').all())
                    LOG.debug(_("Subnets to auto-delete: %s"), subnets)

                    if not (ports or subnets):
                        network = self.get_network(context, id)
                        mech_context = driver_context.NetworkContext(self,
                                                                     context,
                                                                     network)
                        self.mechanism_manager.delete_network_precommit(
                            mech_context)

                        record = self._get_network(context, id)
                        LOG.debug(_("Deleting network record %s"), record)
                        session.delete(record)

                        for segment in mech_context.network_segments:
                            self.type_manager.release_segment(session, segment)

                        # The segment records are deleted via cascade from the
                        # network record, so explicit removal is not necessary.
                        LOG.debug(_("Committing transaction"))
                        break
            except os_db_exception.DBError as e:
                with excutils.save_and_reraise_exception() as ctxt:
                    if isinstance(e.inner_exception, sql_exc.IntegrityError):
                        ctxt.reraise = False
                        msg = _("A concurrent port creation has occurred")
                        LOG.warning(msg)
                        continue

            for port in ports:
                try:
                    self.delete_port(context, port.id)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_("Exception auto-deleting port %s"),
                                      port.id)

            for subnet in subnets:
                try:
                    self.delete_subnet(context, subnet.id)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_("Exception auto-deleting subnet %s"),
                                      subnet.id)

        try:
            self.mechanism_manager.delete_network_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the network.  Ideally we'd notify the caller of
            # the fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_network_postcommit failed"))
        self.notifier.network_delete(context, id)
Example #24
0
    def delete_subnet(self, context, id):
        # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
        # function is not used because it deallocates the subnet's addresses
        # from ports in the DB without invoking the derived class's
        # update_port(), preventing mechanism drivers from being called.
        # This approach should be revisited when the API layer is reworked
        # during icehouse.

        LOG.debug(_("Deleting subnet %s"), id)
        session = context.session
        while True:
            # REVISIT: Serialize this operation with a semaphore to
            # prevent deadlock waiting to acquire a DB lock held by
            # another thread in the same process, leading to 'lock
            # wait timeout' errors.
            with contextlib.nested(lockutils.lock('db-access'),
                                   session.begin(subtransactions=True)):
                subnet = self.get_subnet(context, id)
                # Get ports to auto-deallocate
                allocated = (session.query(models_v2.IPAllocation).
                             filter_by(subnet_id=id).
                             join(models_v2.Port).
                             filter_by(network_id=subnet['network_id']).
                             with_lockmode('update').all())
                LOG.debug(_("Ports to auto-deallocate: %s"), allocated)
                only_auto_del = all(not a.port_id or
                                    a.ports.device_owner in db_base_plugin_v2.
                                    AUTO_DELETE_PORT_OWNERS
                                    for a in allocated)
                if not only_auto_del:
                    LOG.debug(_("Tenant-owned ports exist"))
                    raise exc.SubnetInUse(subnet_id=id)

                if not allocated:
                    mech_context = driver_context.SubnetContext(self, context,
                                                                subnet)
                    self.mechanism_manager.delete_subnet_precommit(
                        mech_context)

                    LOG.debug(_("Deleting subnet record"))
                    record = self._get_subnet(context, id)
                    session.delete(record)

                    LOG.debug(_("Committing transaction"))
                    break

            for a in allocated:
                if a.port_id:
                    # calling update_port() for each allocation to remove the
                    # IP from the port and call the MechanismDrivers
                    data = {'port':
                            {'fixed_ips': [{'subnet_id': ip.subnet_id,
                                            'ip_address': ip.ip_address}
                                           for ip in a.ports.fixed_ips
                                           if ip.subnet_id != id]}}
                    try:
                        self.update_port(context, a.port_id, data)
                    except Exception:
                        with excutils.save_and_reraise_exception():
                            LOG.exception(_("Exception deleting fixed_ip from "
                                            "port %s"), a.port_id)
                session.delete(a)

        try:
            self.mechanism_manager.delete_subnet_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the subnet.  Ideally we'd notify the caller of
            # the fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_subnet_postcommit failed"))
Example #25
0
 def __init__(self, name, lock_file_prefix=None):
     self.mgr = lockutils.lock(name, lock_file_prefix, True)
Example #26
0
    def _commit_port_binding(self, plugin_context, port_id, orig_binding,
                             new_context):
        session = plugin_context.session
        new_binding = new_context._binding

        # After we've attempted to bind the port, we begin a
        # transaction, get the current port state, and decide whether
        # to commit the binding results.
        #
        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            # Get the current port state and build a new PortContext
            # reflecting this state as original state for subsequent
            # mechanism driver update_port_*commit() calls.
            port_db, cur_binding = db.get_locked_port_and_binding(session,
                                                                  port_id)
            if not port_db:
                # The port has been deleted concurrently.
                return
            oport = self._make_port_dict(port_db)
            port = self._make_port_dict(port_db)
            network = self.get_network(plugin_context, port['network_id'])
            cur_context = driver_context.PortContext(
                self, plugin_context, port, network, cur_binding,
                original_port=oport)

            # Commit our binding results only if port has not been
            # successfully bound concurrently by another thread or
            # process and no binding inputs have been changed.
            commit = ((cur_binding.vif_type in
                       [portbindings.VIF_TYPE_UNBOUND,
                        portbindings.VIF_TYPE_BINDING_FAILED]) and
                      orig_binding.host == cur_binding.host and
                      orig_binding.vnic_type == cur_binding.vnic_type and
                      orig_binding.profile == cur_binding.profile)

            if commit:
                # Update the port's binding state with our binding
                # results.
                cur_binding.vif_type = new_binding.vif_type
                cur_binding.vif_details = new_binding.vif_details
                cur_binding.driver = new_binding.driver
                cur_binding.segment = new_binding.segment

                # REVISIT(rkukura): The binding:profile attribute is
                # supposed to be input-only, but the Mellanox driver
                # currently modifies it while binding. Remove this
                # code when the Mellanox driver has been updated to
                # use binding:vif_details instead.
                if cur_binding.profile != new_binding.profile:
                    cur_binding.profile = new_binding.profile

                # Update PortContext's port dictionary to reflect the
                # updated binding state.
                self._update_port_dict_binding(port, cur_binding)

                # Update the port status if requested by the bound driver.
                if new_binding.segment and new_context._new_port_status:
                    port_db.status = new_context._new_port_status
                    port['status'] = new_context._new_port_status

                # Call the mechanism driver precommit methods, commit
                # the results, and call the postcommit methods.
                self.mechanism_manager.update_port_precommit(cur_context)
        if commit:
            self.mechanism_manager.update_port_postcommit(cur_context)

        # Continue, using the port state as of the transaction that
        # just finished, whether that transaction committed new
        # results or discovered concurrent port state changes.
        return (cur_context, commit)
Example #27
0
 def _get(self, key, default=None):
     with lockutils.lock(key):
         return self._get_unlocked(key, default)[1]
Example #28
0
 def __contains__(self, key):
     with lockutils.lock(key):
         return self._exists_unlocked(key)
Example #29
0
    def delete_port(self, context, id, l3_port_check=True):
        LOG.debug(_("Deleting port %s"), id)
        l3plugin = manager.NeutronManager.get_service_plugins().get(
            service_constants.L3_ROUTER_NAT)
        if l3plugin and l3_port_check:
            l3plugin.prevent_l3_port_deletion(context, id)

        session = context.session
        mech_context = None
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            try:
                port_db = (session.query(models_v2.Port).
                           enable_eagerloads(False).
                           filter_by(id=id).with_lockmode('update').one())
            except sa_exc.NoResultFound:
                # the port existed when l3plugin.prevent_l3_port_deletion
                # was called but now is already gone
                LOG.debug(_("The port '%s' was deleted"), id)
                return
            port = self._make_port_dict(port_db)

            network = self.get_network(context, port['network_id'])
            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
                bindings = db.get_dvr_port_bindings(id)
                for bind in bindings:
                    mech_context = driver_context.PortContext(self, context,
                                                              port, network,
                                                              binding=bind)
                    self.mechanism_manager.delete_port_precommit(mech_context)
                LOG.debug("Calling base delete_port %s for DVR", id)
                super(Ml2Plugin, self).delete_port(context, id)
            else:
                mech_context = driver_context.PortContext(self, context, port,
                                                          network)
                if "compute:" in port['device_owner']:
                    self.dvr_deletens_ifnovm(context, id)
                self.mechanism_manager.delete_port_precommit(mech_context)
                self._delete_port_security_group_bindings(context, id)
                LOG.debug(_("Calling base delete_port"))
                if l3plugin:
                    l3plugin.disassociate_floatingips(context, id)
                super(Ml2Plugin, self).delete_port(context, id)

        try:
            # for both normal and DVR Interface ports, only one invocation of
            # delete_port_postcommit
            if mech_context:
                self.mechanism_manager.delete_port_postcommit(mech_context)
            else:
                LOG.error(_("Unable to invoke delete_port_postcommit,"
                            " mech_context NULL for port %s"), id)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the port.  Ideally we'd notify the caller of the
            # fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_port_postcommit failed for"
                        " port %s"), id)
        self.notify_security_groups_member_updated(context, port)
Example #30
0
    def update_port(self, context, id, port):
        do_commit = False

        attrs = port['port']
        need_port_update_notify = False

        LOG.info('Attempting port update %s: %s' % (id, port))

        session = context.session

        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port_db, binding = ml2_db.get_locked_port_and_binding(session, id)
            if not port_db:
                raise exc.PortNotFound(port_id=id)

            original_port = self._make_port_dict(port_db)
            # Process extension data
            self._find_port_dict_extensions(original_port,
                                            None,
                                            session=session)

            updated_port = super(plugin.Ml2Plugin,
                                 self).update_port(context, id, port)

            # Process extension data
            port_ext = self._update_port_ext(original_port,
                                             updated_port,
                                             port,
                                             session=session)
            switchports = self._update_switchports(updated_port,
                                                   port,
                                                   session=session)
            self._find_port_dict_extensions(updated_port,
                                            None,
                                            port_ext=port_ext,
                                            switchports=switchports,
                                            session=session)

            # We only want to commit on a state change
            if original_port["commit"] != updated_port["commit"]:
                do_commit = True
                # If we are transitioning to active, validate
                if not original_port["commit"] and updated_port["commit"]:
                    self._validate_port_can_commit(updated_port,
                                                   None,
                                                   session=session)

            if addr_pair.ADDRESS_PAIRS in port['port']:
                need_port_update_notify |= (self.update_address_pairs_on_port(
                    context, id, port, original_port, updated_port))
            need_port_update_notify |= self.update_security_group_on_port(
                context, id, port, original_port, updated_port)
            network = self.get_network(context, original_port['network_id'])
            need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
                context, id, port, updated_port)
            mech_context = driver_context.PortContext(
                self,
                context,
                updated_port,
                network,
                binding,
                original_port=original_port)
            need_port_update_notify |= self._process_port_binding(
                mech_context, attrs)
            self.mechanism_manager.update_port_precommit(mech_context)

        # TODO(apech) - handle errors raised by update_port, potentially
        # by re-calling update_port with the previous attributes. For
        # now the error is propogated to the caller, which is expected to
        # either undo/retry the operation or delete the resource.
        if do_commit:
            self.mechanism_manager.update_port_postcommit(mech_context)

        need_port_update_notify |= self.is_security_group_member_updated(
            context, original_port, updated_port)

        if original_port['admin_state_up'] != updated_port['admin_state_up']:
            need_port_update_notify = True

        if need_port_update_notify:
            self._notify_port_updated(mech_context)

        bound_port = self._bind_port_if_needed(
            mech_context,
            allow_notify=True,
            need_notify=need_port_update_notify)
        return bound_port._port
Example #31
0
    def update_port_status(self, context, port_id, status, host=None):
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
                binding = db.get_dvr_port_binding_by_host(port_id=port['id'],
                                                          host=host,
                                                          session=session)
                if not binding:
                    LOG.error(_("Binding info for port %s not found"),
                              port_id)
                    return False
                binding['status'] = status
                binding.update(binding)

        # binding already updated
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
                original_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                port.status = self._generate_dvr_port_status(session,
                                                             port['id'])
                updated_port = self._make_port_dict(port)
                mech_context = (driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port,
                    binding=binding))
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True
            elif port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
            self._check_and_delete_dvr_port_binding(mech_context, context)

        return True
Example #32
0
 def _get(self, key, default=None):
     with lockutils.lock(key):
         return self._get_unlocked(key, default)[1]
Example #33
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token
Example #34
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token
Example #35
0
    def update_port(self, context, id, port):
        do_commit = False

        attrs = port['port']
        need_port_update_notify = False

        LOG.info('Attempting port update %s: %s' % (id, port))

        session = context.session

        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port_db, binding = ml2_db.get_locked_port_and_binding(session, id)
            if not port_db:
                raise exc.PortNotFound(port_id=id)

            original_port = self._make_port_dict(port_db)
            # Process extension data
            self._find_port_dict_extensions(
                original_port, None, session=session)

            updated_port = super(plugin.Ml2Plugin, self).update_port(
                context, id, port)

            # Process extension data
            port_ext = self._update_port_ext(
                original_port, updated_port, port, session=session)
            switchports = self._update_switchports(
                updated_port, port, session=session)
            self._find_port_dict_extensions(
                updated_port, None, port_ext=port_ext,
                switchports=switchports, session=session)

            # We only want to commit on a state change
            if original_port["commit"] != updated_port["commit"]:
                do_commit = True
                # If we are transitioning to active, validate
                if not original_port["commit"] and updated_port["commit"]:
                    self._validate_port_can_commit(
                        updated_port, None, session=session)

            if addr_pair.ADDRESS_PAIRS in port['port']:
                need_port_update_notify |= (
                    self.update_address_pairs_on_port(context, id, port,
                                                      original_port,
                                                      updated_port))
            need_port_update_notify |= self.update_security_group_on_port(
                context, id, port, original_port, updated_port)
            network = self.get_network(context, original_port['network_id'])
            need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
                context, id, port, updated_port)
            mech_context = driver_context.PortContext(
                self, context, updated_port, network, binding,
                original_port=original_port)
            need_port_update_notify |= self._process_port_binding(
                mech_context, attrs)
            self.mechanism_manager.update_port_precommit(mech_context)

        # TODO(apech) - handle errors raised by update_port, potentially
        # by re-calling update_port with the previous attributes. For
        # now the error is propogated to the caller, which is expected to
        # either undo/retry the operation or delete the resource.
        if do_commit:
            self.mechanism_manager.update_port_postcommit(mech_context)

        need_port_update_notify |= self.is_security_group_member_updated(
            context, original_port, updated_port)

        if original_port['admin_state_up'] != updated_port['admin_state_up']:
            need_port_update_notify = True

        if need_port_update_notify:
            self._notify_port_updated(mech_context)

        bound_port = self._bind_port_if_needed(
            mech_context,
            allow_notify=True,
            need_notify=need_port_update_notify)
        return bound_port._port
    def create_listener(self, context, listener):

        try:
            with contextlib.nested(lockutils.lock('db-access'),
                               context.session.begin(subtransactions=True)):
                self._load_id_and_tenant_id(context, listener)
                listener['status'] = constants.PENDING_CREATE
                # Check for unspecified loadbalancer_id and listener_id and
                # set to None
                for id in ['loadbalancer_id', 'default_pool_id']:
                    if listener.get(id) == attributes.ATTR_NOT_SPECIFIED:
                        listener[id] = None
                pool_id = listener.get('default_pool_id')
                lb_id = listener.get('loadbalancer_id')
                if lb_id:
                    if not self._resource_exists(context, models.LoadBalancer,
                                                 lb_id):
                        raise loadbalancerv2.EntityNotFound(
                            name=models.LoadBalancer.NAME, id=lb_id)
                loadbalancer_db = self._get_resource(context, models.LoadBalancer, lb_id)
                if pool_id:
                    if not self._resource_exists(context, models.PoolV2,
                                                 pool_id):
                        raise loadbalancerv2.EntityNotFound(
                            name=models.PoolV2.NAME, id=pool_id)
                    pool = self._get_resource(context, models.PoolV2, pool_id)

                    if pool.subnet_id:
                        if loadbalancer_db.vip_subnet_id != pool.subnet_id:
                            raise loadbalancerv2.LoadBalancerPoolSubnetMismatch()
                    else:
                        if loadbalancer_db.subnet_id:
                            raise loadbalancerv2.LoadBalancerPoolSubnetMismatch()
                        elif loadbalancer_db.network_id!=pool.network_id:
                            raise loadbalancerv2.LoadBalancerPoolNetworkMismatch()

                    if ((pool.protocol, listener.get('protocol'))
                        not in lb_const.LISTENER_POOL_COMPATIBLE_PROTOCOLS):
                        raise loadbalancerv2.ListenerPoolProtocolMismatch(
                            listener_proto=listener['protocol'],
                            pool_proto=pool.protocol)
                    filters = {'default_pool_id': [pool_id]}
                    listenerpools = self._get_resources(context,
                                                        models.Listener,
                                                        filters=filters)
                    if listenerpools:
                        raise loadbalancerv2.EntityInUse(
                            entity_using=models.Listener.NAME,
                            id=listenerpools[0].id,
                            entity_in_use=models.PoolV2.NAME)
                    filters = {'redirect_pool_id': [pool_id]}
                    l7policypools = self._get_resources(context,
                                                    models.L7Policy,
                                                    filters=filters)
                    if l7policypools:
                        raise loadbalancerv2.EntityInUse(
                            entity_using=models.L7Policy.NAME,
                            id=l7policypools[0].id,
                            entity_in_use=models.PoolV2.NAME)

                listener['created_at'] = timeutils.utcnow()
                listener_db_entry = models.Listener(**listener)

                context.session.add(listener_db_entry)
        except exception.DBDuplicateEntry:
            raise loadbalancerv2.LoadBalancerListenerProtocolPortExists(
                lb_id=listener['loadbalancer_id'],
                protocol_port=listener['protocol_port'])
        return data_models.Listener.from_sqlalchemy_model(listener_db_entry)
Example #37
0
 def __contains__(self, key):
     with lockutils.lock(key):
         return self._exists_unlocked(key)