Esempio n. 1
0
    def _ensure_default_security_group(self, context, tenant_id):
        """Create a default security group if one doesn't exist.

        :returns: the default security group id.
        """
        query = self._model_query(context, DefaultSecurityGroup)
        # the next loop should do 2 iterations at max
        while True:
            try:
                default_group = query.filter_by(tenant_id=tenant_id).one()
            except exc.NoResultFound:
                security_group = {
                    'security_group':
                        {'name': 'default',
                         'tenant_id': tenant_id,
                         'description': _('Default security group')}
                }
                try:
                    with db_api.autonested_transaction(context.session):
                        ret = self.create_security_group(
                            context, security_group, default_sg=True)
                except exception.DBDuplicateEntry as ex:
                    LOG.debug("Duplicate default security group %s was "
                              "not created", ex.value)
                    continue
                else:
                    return ret['id']
            else:
                return default_group['security_group_id']
Esempio n. 2
0
def add_provisioning_component(context, object_id, object_type, entity):
    """Adds a provisioning block by an entity to a given object.

    Adds a provisioning block to the DB for object_id with an identifier
    of the entity that is doing the provisioning. While an object has these
    provisioning blocks present, this module will not emit any callback events
    indicating that provisioning has completed. Any logic that depends on
    multiple disjoint components use these blocks and subscribe to the
    PROVISIONING_COMPLETE event to know when all components have completed.

    :param context: neutron api request context
    :param object_id: ID of object that has been provisioned
    :param object_type: callback resource type of the object
    :param entity: The entity that has provisioned the object
    """
    log_dict = {'entity': entity, 'oid': object_id, 'otype': object_type}
    # we get an object's ID, so we need to convert that into a standard attr id
    standard_attr_id = _get_standard_attr_id(context, object_id, object_type)
    if not standard_attr_id:
        return
    try:
        with db_api.autonested_transaction(context.session):
            record = ProvisioningBlock(standard_attr_id=standard_attr_id,
                                       entity=entity)
            context.session.add(record)
    except db_exc.DBDuplicateEntry:
        # an entry could be leftover from a previous transition that hasn't
        # yet been provisioned. (e.g. multiple updates in a short period)
        LOG.debug("Ignored duplicate provisioning block setup for %(otype)s "
                  "%(oid)s by entity %(entity)s.", log_dict)
        return
    LOG.debug("Transition to ACTIVE for %(otype)s object %(oid)s "
              "will not be triggered until provisioned by entity %(entity)s.",
              log_dict)
Esempio n. 3
0
    def create_network(self, context, network):
        """Instruct HDN operators to create a network

        This function implements the "network create" Neutron API operation.

        @param context - The Neutron context reference. This parameter holds
        a database session (context.session), the identifier of the tenant
        performing the operation (context.tenant_id), and other attributes
        such as a flag to test whether the tenant is an administrator
        (context.is_admin)

        @param network - A dict containing data of the network to be created

        """

        # Set the status of the network as 'PENDING CREATE'
        network['network']['status'] = constants.STATUS_PENDING_CREATE
        with db_api.autonested_transaction(context.session):
            new_net = super(HdnNeutronPlugin, self).create_network(
                context, network)
            self._process_l3_create(context, new_net, network['network'])

        # Use the HDN library to notify operators about the new network
        LOG.debug("Queued request to create network: %s", new_net['id'])
        hdnlib.notify_network_create(new_net)
        # Network is not present in neutron.callbacks.resources
        # TODO(salv-orlando): do not use literal for resource name
        registry.notify('NETWORK', events.AFTER_CREATE, self,
                        tenant_id=context.tenant_id,
                        resource_id=new_net['id'])
        return new_net
Esempio n. 4
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        with db_api.autonested_transaction(context.session):
            trunk = trunk_objects.Trunk.get_object(context, id=trunk_id)
            if trunk is None:
                raise trunk_exc.TrunkNotFound(trunk_id=trunk_id)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(
                context, basic_validation=True,
                trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()

            trunk.sub_ports = list(current_subports.values())
            return trunk
Esempio n. 5
0
 def update_trunk_status(self, context, trunk_id, status):
     """Update the trunk status to reflect outcome of data plane wiring."""
     with db_api.autonested_transaction(context.session):
         trunk = trunk_objects.Trunk.get_object(context, id=trunk_id)
         if trunk:
             trunk.status = status
             trunk.update()
Esempio n. 6
0
    def create_policy_rule(self, context, rule_obj, policy_id, rule_data):
        """Create a QoS policy rule.

        :param context: neutron api request context
        :type context: neutron.context.Context
        :param rule_obj: the rule object
        :type rule_obj: a class from the rule_object (qos.objects.rule) module
        :param policy_id: the id of the QosPolicy for which to create the rule
        :type policy_id: str uuid
        :param rule_data: the rule data to be applied
        :type rule_data: dict

        :returns: a QoS policy rule object
        """
        rule_type = rule_obj.rule_type
        rule_data = rule_data[rule_type + '_rule']

        with db_api.autonested_transaction(context.session):
            # Ensure that we have access to the policy.
            policy = self._get_policy_obj(context, policy_id)
            rule = rule_obj(context, qos_policy_id=policy_id, **rule_data)
            rule.create()
            policy.reload_rules()
        self.notification_driver_manager.update_policy(context, policy)
        return rule
Esempio n. 7
0
 def create(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         prefixes = self.prefixes
         super(SubnetPool, self).create()
         if 'prefixes' in fields:
             self._attach_prefixes(prefixes)
Esempio n. 8
0
    def update_policy_rule(self, context, rule_obj, rule_id, policy_id,
            rule_data):
        """Update a QoS policy rule.

        :param context: neutron api request context
        :type context: neutron.context.Context
        :param rule_obj: the rule object
        :type rule_obj: a class from the rule_object (qos.objects.rule) module
        :param rule_id: the id of the QoS policy rule to update
        :type rule_id: str uuid
        :param policy_id: the id of the rule's policy
        :type policy_id: str uuid
        :param rule_data: the new rule data to update
        :type rule_data: dict

        :returns: a QoS policy rule object
        """
        rule_type = rule_obj.rule_type
        rule_data = rule_data[rule_type + '_rule']

        with db_api.autonested_transaction(context.session):
            # Ensure we have access to the policy.
            policy = self._get_policy_obj(context, policy_id)
            # Ensure the rule belongs to the policy.
            policy.get_rule_by_id(rule_id)
            rule = rule_obj(context, id=rule_id)
            rule.update_nonidentifying_fields(rule_data, reset_changes=True)
            rule.update()
            policy.reload_rules()
        self.notification_driver_manager.update_policy(context, policy)
        return rule
Esempio n. 9
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(
                context, basic_validation=True,
                trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            trunk.sub_ports = list(current_subports.values())
            registry.notify(
                constants.SUBPORTS, events.AFTER_DELETE, self,
                removed_subports=removed_subports)
            return trunk
Esempio n. 10
0
def _update_hook(self, update_orig):
    with db_api.autonested_transaction(self.obj_context.session):
        # NOTE(slaweq): copy of object changes is required to pass it later to
        # _update_post method because update() will reset all those changes
        obj_changes = self.obj_get_changes()
        update_orig(self)
        _update_post(self, obj_changes)
Esempio n. 11
0
 def create_trunk(self, context, trunk):
     """Create a trunk."""
     trunk = self.validate(context, trunk['trunk'])
     sub_ports = [trunk_objects.SubPort(
                      context=context,
                      port_id=p['port_id'],
                      segmentation_id=p['segmentation_id'],
                      segmentation_type=p['segmentation_type'])
                  for p in trunk['sub_ports']]
     admin_state_up = trunk.get('admin_state_up', True)
     trunk_obj = trunk_objects.Trunk(context=context,
                                     admin_state_up=admin_state_up,
                                     id=uuidutils.generate_uuid(),
                                     name=trunk.get('name', ""),
                                     tenant_id=trunk['tenant_id'],
                                     port_id=trunk['port_id'],
                                     sub_ports=sub_ports)
     with db_api.autonested_transaction(context.session):
         trunk_obj.create()
         payload = callbacks.TrunkPayload(context, trunk_obj.id,
                                          current_trunk=trunk_obj)
         registry.notify(
             constants.TRUNK, events.PRECOMMIT_CREATE, self,
             payload=payload)
     registry.notify(
         constants.TRUNK, events.AFTER_CREATE, self, payload=payload)
     return trunk_obj
Esempio n. 12
0
def set_quota_usage(context, resource, tenant_id,
                    in_use=None, delta=False):
    """Set resource quota usage.

    :param context: instance of neutron context with db session
    :param resource: name of the resource for which usage is being set
    :param tenant_id: identifier of the tenant for which quota usage is
                      being set
    :param in_use: integer specifying the new quantity of used resources,
                   or a delta to apply to current used resource
    :param delta: Specifies whether in_use is an absolute number
                  or a delta (default to False)
    """
    with db_api.autonested_transaction(context.session):
        query = common_db_api.model_query(context, quota_models.QuotaUsage)
        query = query.filter_by(resource=resource).filter_by(
            tenant_id=tenant_id)
        usage_data = query.first()
        if not usage_data:
            # Must create entry
            usage_data = quota_models.QuotaUsage(
                resource=resource,
                tenant_id=tenant_id)
            context.session.add(usage_data)
        # Perform explicit comparison with None as 0 is a valid value
        if in_use is not None:
            if delta:
                in_use = usage_data.in_use + in_use
            usage_data.in_use = in_use
        # After an explicit update the dirty bit should always be reset
        usage_data.dirty = False
    return QuotaUsageInfo(usage_data.resource,
                          usage_data.tenant_id,
                          usage_data.in_use,
                          usage_data.dirty)
Esempio n. 13
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }

        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        tenant_id = s['tenant_id']

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)

        with db_api.autonested_transaction(context.session):
            security_group_db = SecurityGroup(id=s.get('id') or (
                                              uuidutils.generate_uuid()),
                                              description=s['description'],
                                              tenant_id=tenant_id,
                                              name=s['name'])
            context.session.add(security_group_db)
            if default_sg:
                context.session.add(DefaultSecurityGroup(
                    security_group=security_group_db,
                    tenant_id=security_group_db['tenant_id']))
            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = SecurityGroupRule(
                        id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                        security_group=security_group_db,
                        direction='ingress',
                        ethertype=ethertype,
                        source_group=security_group_db)
                    context.session.add(ingress_rule)

                egress_rule = SecurityGroupRule(
                    id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                    security_group=security_group_db,
                    direction='egress',
                    ethertype=ethertype)
                context.session.add(egress_rule)
                self._registry_notify(resources.SECURITY_GROUP,
                                      events.PRECOMMIT_CREATE,
                                      exc_cls=ext_sg.SecurityGroupConflict,
                                      **kwargs)

        secgroup_dict = self._make_security_group_dict(security_group_db)

        kwargs['security_group'] = secgroup_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict
Esempio n. 14
0
def map_segment_to_hosts(context, segment_id, hosts):
    """Map segment to a collection of hosts."""
    with db_api.autonested_transaction(context.session):
        for host in hosts:
            context.session.add(
                segment_model.SegmentHostMapping(segment_id=segment_id,
                                                 host=host))
Esempio n. 15
0
    def create_policy_rule(self, context, rule_cls, policy_id, rule_data):
        """Create a QoS policy rule.

        :param context: neutron api request context
        :type context: neutron.context.Context
        :param rule_cls: the rule object class
        :type rule_cls: a class from the rule_object (qos.objects.rule) module
        :param policy_id: the id of the QosPolicy for which to create the rule
        :type policy_id: str uuid
        :param rule_data: the rule data to be applied
        :type rule_data: dict

        :returns: a QoS policy rule object
        """
        rule_type = rule_cls.rule_type
        rule_data = rule_data[rule_type + '_rule']

        with db_api.autonested_transaction(context.session):
            # Ensure that we have access to the policy.
            policy = self._get_policy_obj(context, policy_id)
            checker.check_bandwidth_rule_conflict(policy, rule_data)
            rule = rule_cls(context, qos_policy_id=policy_id, **rule_data)
            checker.check_rules_conflict(policy, rule)
            rule.create()
            policy.obj_load_attr('rules')
            self.validate_policy(context, policy)
            self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
                                     context, policy)

        self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)

        return rule
Esempio n. 16
0
 def create_trunk(self, context, trunk):
     """Create a trunk."""
     trunk = self.validate(context, trunk['trunk'])
     sub_ports = [trunk_objects.SubPort(
                      context=context,
                      port_id=p['port_id'],
                      segmentation_id=p['segmentation_id'],
                      segmentation_type=p['segmentation_type'])
                  for p in trunk['sub_ports']]
     admin_state_up = trunk.get('admin_state_up', True)
     # NOTE(status_police): a trunk is created in PENDING status. Depending
     # on the nature of the create request, a driver may set the status
     # immediately to ACTIVE if no physical provisioning is required.
     # Otherwise a transition to BUILD (or ERROR) should be expected
     # depending on how the driver reacts. PRECOMMIT failures prevent the
     # trunk from being created altogether.
     trunk_obj = trunk_objects.Trunk(context=context,
                                     admin_state_up=admin_state_up,
                                     id=uuidutils.generate_uuid(),
                                     name=trunk.get('name', ""),
                                     tenant_id=trunk['tenant_id'],
                                     port_id=trunk['port_id'],
                                     status=constants.PENDING_STATUS,
                                     sub_ports=sub_ports)
     with db_api.autonested_transaction(context.session):
         trunk_obj.create()
         payload = callbacks.TrunkPayload(context, trunk_obj.id,
                                          current_trunk=trunk_obj)
         registry.notify(
             constants.TRUNK, events.PRECOMMIT_CREATE, self,
             payload=payload)
     registry.notify(
         constants.TRUNK, events.AFTER_CREATE, self, payload=payload)
     return trunk_obj
Esempio n. 17
0
 def allocate(self, address_request):
     # NOTE(salv-orlando): Creating a new db session might be a rather
     # dangerous thing to do, if executed from within another database
     # transaction. Therefore  the IPAM driver should never be
     # called from within a database transaction, which is also good
     # practice since in the general case these drivers may interact
     # with remote backends
     session = self._context.session
     all_pool_id = None
     auto_generated = False
     with db_api.autonested_transaction(session):
         # NOTE(salv-orlando): It would probably better to have a simpler
         # model for address requests and just check whether there is a
         # specific IP address specified in address_request
         if isinstance(address_request, ipam_req.SpecificAddressRequest):
             # This handles both specific and automatic address requests
             # Check availability of requested IP
             ip_address = str(address_request.address)
             self._verify_ip(session, ip_address)
         else:
             ip_address, all_pool_id = self._generate_ip(session)
             auto_generated = True
         self._allocate_specific_ip(session, ip_address, all_pool_id,
                                    auto_generated)
         # Create IP allocation request object
         # The only defined status at this stage is 'ALLOCATED'.
         # More states will be available in the future - e.g.: RECYCLABLE
         self.subnet_manager.create_allocation(session, ip_address)
         return ip_address
Esempio n. 18
0
    def create_ha_port_and_bind(self, plugin, context, router_id,
                                tenant_id, agent):
        """Creates and binds a new HA port for this agent."""
        ctxt = context.elevated()
        creator = functools.partial(self._add_port_from_net,
                                    plugin, ctxt, router_id, tenant_id)
        dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
        dep_creator = functools.partial(plugin._create_ha_network,
                                        ctxt, tenant_id)
        dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
        dep_id_attr = 'network_id'
        try:
            port_binding = utils.create_object_with_dependency(
                creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0]
            with db_api.autonested_transaction(context.session):
                port_binding.l3_agent_id = agent['id']
        except db_exc.DBDuplicateEntry:
            LOG.debug("Router %(router)s already scheduled for agent "
                      "%(agent)s", {'router': router_id, 'agent': agent['id']})
        except l3.RouterNotFound:
            LOG.debug('Router %s has already been removed '
                      'by concurrent operation', router_id)
            return

        self.bind_router(context, router_id, agent)
Esempio n. 19
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        # Check for basic validation since the request body here is not
        # automatically validated by the API layer.
        subports_validator = rules.SubPortsValidator(
            self._segmentation_types, subports)
        subports = subports_validator.validate(context, basic_validation=True)
        added_subports = []

        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            rules.trunk_can_be_managed(context, trunk)
            for subport in subports:
                obj = trunk_objects.SubPort(
                               context=context,
                               trunk_id=trunk_id,
                               port_id=subport['port_id'],
                               segmentation_type=subport['segmentation_type'],
                               segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)

        registry.notify(
            constants.SUBPORTS, events.AFTER_CREATE, self,
            added_subports=added_subports)
        return trunk
Esempio n. 20
0
    def create_ha_port_and_bind(self, plugin, context, router_id,
                                tenant_id, agent, is_manual_scheduling=False):
        """Creates and binds a new HA port for this agent."""
        ctxt = context.elevated()
        router_db = plugin._get_router(ctxt, router_id)
        creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
                                    plugin, ctxt, router_db, tenant_id)
        dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
        dep_creator = functools.partial(plugin._create_ha_network,
                                        ctxt, tenant_id)
        dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
        dep_id_attr = 'network_id'

        # This might fail in case of concurrent calls, which is good for us
        # as we can skip the rest of this function.
        binding = self.bind_router(
            plugin, context, router_id, agent['id'],
            is_manual_scheduling=is_manual_scheduling, is_ha=True)
        if not binding:
            return

        try:
            port_binding = utils.create_object_with_dependency(
                creator, dep_getter, dep_creator,
                dep_id_attr, dep_deleter)[0]
            with db_api.autonested_transaction(context.session):
                port_binding.l3_agent_id = agent['id']
        except db_exc.DBDuplicateEntry:
            LOG.debug("Router %(router)s already scheduled for agent "
                      "%(agent)s", {'router': router_id,
                                    'agent': agent['id']})
        except l3.RouterNotFound:
            LOG.debug('Router %s has already been removed '
                      'by concurrent operation', router_id)
Esempio n. 21
0
 def bind(self, context, agents, network_id):
     """Bind the network to the agents."""
     # customize the bind logic
     bound_agents = agents[:]
     for agent in agents:
         # saving agent_id to use it after rollback to avoid
         # DetachedInstanceError
         agent_id = agent.id
         binding = ndab_model.NetworkDhcpAgentBinding()
         binding.dhcp_agent_id = agent_id
         binding.network_id = network_id
         try:
             with db_api.autonested_transaction(context.session):
                 context.session.add(binding)
                 # try to actually write the changes and catch integrity
                 # DBDuplicateEntry
         except db_exc.DBDuplicateEntry:
             # it's totally ok, someone just did our job!
             bound_agents.remove(agent)
             LOG.info(_LI('Agent %s already present'), agent_id)
         LOG.debug('Network %(network_id)s is scheduled to be '
                   'hosted by DHCP agent %(agent_id)s',
                   {'network_id': network_id,
                    'agent_id': agent_id})
     super(DhcpFilter, self).bind(context, bound_agents, network_id)
Esempio n. 22
0
    def create(self):
        with db_api.autonested_transaction(self.obj_context.session):
            try:
                super(SubPort, self).create()
            except o_db_exc.DBReferenceError as ex:
                if ex.key_table is None:
                    # NOTE(ivc): 'key_table' is provided by 'oslo.db' [1]
                    # only for a limited set of database backends (i.e.
                    # MySQL and PostgreSQL). Other database backends
                    # (including SQLite) would have 'key_table' set to None.
                    # We emulate the 'key_table' support for such database
                    # backends.
                    #
                    # [1] https://github.com/openstack/oslo.db/blob/3fadd5a
                    #     /oslo_db/sqlalchemy/exc_filters.py#L190-L203
                    if not Trunk.get_object(self.obj_context,
                                            id=self.trunk_id):
                        ex.key_table = Trunk.db_model.__tablename__

                if ex.key_table == Trunk.db_model.__tablename__:
                    raise t_exc.TrunkNotFound(trunk_id=self.trunk_id)

                raise n_exc.PortNotFound(port_id=self.port_id)
            except o_exc.NeutronDbObjectDuplicateEntry:
                raise t_exc.DuplicateSubPort(
                    segmentation_type=self.segmentation_type,
                    segmentation_id=self.segmentation_id,
                    trunk_id=self.trunk_id)
Esempio n. 23
0
 def update(self):
     with db_api.autonested_transaction(self.obj_context.session):
         if 'is_default' in self.obj_what_changed():
             if self.is_default:
                 self.set_default()
             else:
                 self.unset_default()
         super(QosPolicy, self).update()
Esempio n. 24
0
 def update(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         super(Port, self).update()
         if 'security_group_ids' in fields:
             self._attach_security_groups(fields['security_group_ids'])
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(fields['qos_policy_id'])
Esempio n. 25
0
 def update(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         super(Network, self).update()
         if 'dns_domain' in fields:
             self._set_dns_domain(fields['dns_domain'])
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(fields['qos_policy_id'])
Esempio n. 26
0
 def update(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         super(Network, self).update()
         if 'dns_domain' in fields:
             self._set_dns_domain(fields['dns_domain'])
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(fields['qos_policy_id'])
Esempio n. 27
0
 def update(self):
     with db_api.autonested_transaction(self.obj_context.session):
         if 'is_default' in self.obj_what_changed():
             if self.is_default:
                 self.set_default()
             else:
                 self.unset_default()
         super(QosPolicy, self).update()
Esempio n. 28
0
def delete_mapping(openstack_id):
    LOG.debug('Deleting mapping for - %s', openstack_id)
    session = db_api.get_session()
    with db_api.autonested_transaction(session) as tx:
        mapping = tx.session.query(omni_resources.OmniResources).filter_by(
            openstack_id=openstack_id).first()
        if mapping:
            tx.session.delete(mapping)
Esempio n. 29
0
 def update(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         super(Port, self).update()
         if 'security_group_ids' in fields:
             self._attach_security_groups(fields['security_group_ids'])
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(fields['qos_policy_id'])
Esempio n. 30
0
 def create(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         hosts = self.hosts
         if hosts is None:
             hosts = []
         super(NetworkSegment, self).create()
         if 'hosts' in fields:
             self._attach_hosts(hosts)
Esempio n. 31
0
 def get_objects(cls, context, _pager=None, validate_filters=True,
                 **kwargs):
     with db_api.autonested_transaction(context.session):
         objs = super(SubnetPool, cls).get_objects(context, _pager,
                                                   validate_filters,
                                                   **kwargs)
         for obj in objs:
             obj.reload_prefixes()
         return objs
Esempio n. 32
0
 def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
     # make sure we will have a policy object to push resource update
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         policy = self._get_policy_obj(context, policy_id)
         rule = policy.get_rule_by_id(rule_id)
         rule.delete()
         policy.reload_rules()
     self.notification_driver_manager.update_policy(context, policy)
Esempio n. 33
0
 def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
     # make sure we will have a policy object to push resource update
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         policy = self._get_policy_obj(context, policy_id)
         rule = policy.get_rule_by_id(rule_id)
         rule.delete()
         policy.reload_rules()
     self.notification_driver_manager.update_policy(context, policy)
Esempio n. 34
0
 def create(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         hosts = self.hosts
         if hosts is None:
             hosts = []
         super(NetworkSegment, self).create()
         if 'hosts' in fields:
             self._attach_hosts(hosts)
Esempio n. 35
0
    def create_ha_port_and_bind(self,
                                plugin,
                                context,
                                router_id,
                                tenant_id,
                                agent,
                                is_manual_scheduling=False):
        """Creates and binds a new HA port for this agent."""
        ctxt = context.elevated()
        router_db = plugin._get_router(ctxt, router_id)
        creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
                                    plugin, ctxt, router_db, tenant_id)
        dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
        dep_creator = functools.partial(plugin._create_ha_network, ctxt,
                                        tenant_id)
        dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
        dep_id_attr = 'network_id'

        # This might fail in case of concurrent calls, which is good for us
        # as we can skip the rest of this function.
        binding = self.bind_router(plugin,
                                   context,
                                   router_id,
                                   agent['id'],
                                   is_manual_scheduling=is_manual_scheduling,
                                   is_ha=True)
        if not binding:
            return

        try:
            port_binding = utils.create_object_with_dependency(
                creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0]
            with db_api.autonested_transaction(context.session):
                port_binding.l3_agent_id = agent['id']
        except db_exc.DBDuplicateEntry:
            LOG.debug(
                "Router %(router)s already scheduled for agent "
                "%(agent)s", {
                    'router': router_id,
                    'agent': agent['id']
                })
            port_id = port_binding.port_id
            # Below call will also delete entry from L3HARouterAgentPortBinding
            # and RouterPort tables
            plugin._core_plugin.delete_port(context,
                                            port_id,
                                            l3_port_check=False)
        except l3.RouterNotFound:
            LOG.debug(
                'Router %s has already been removed '
                'by concurrent operation', router_id)
            # we try to clear the HA network here in case the port we created
            # blocked the concurrent router delete operation from getting rid
            # of the HA network
            ha_net = plugin.get_ha_network(ctxt, tenant_id)
            if ha_net:
                plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id)
Esempio n. 36
0
 def get_objects(cls, context, _pager=None, validate_filters=True,
                 **kwargs):
     with db_api.autonested_transaction(context.session):
         objs = super(SubnetPool, cls).get_objects(context, _pager,
                                                   validate_filters,
                                                   **kwargs)
         for obj in objs:
             obj.reload_prefixes()
         return objs
Esempio n. 37
0
 def update_trunk(self, context, trunk_id, trunk):
     """Update information for the specified trunk."""
     trunk_data = trunk['trunk']
     with db_api.autonested_transaction(context.session):
         trunk_obj = self._get_trunk(context, trunk_id)
         trunk_obj.update_nonidentifying_fields(
             trunk_data, reset_changes=True)
         trunk_obj.update()
         return trunk_obj
Esempio n. 38
0
    def create_port(self, context, port):
        LOG.debug("MidonetMixin.create_port called: port=%r", port)

        port_data = port['port']
        # REVISIT(yamamoto): this nested transaction is a workaround
        # for bug #1490917.
        with db_api.autonested_transaction(context.session):
            # Create a Neutron port
            new_port = super(MidonetMixin, self).create_port(context, port)

            # Do not create a gateway port if it has no IP address assigned as
            # MidoNet does not yet handle this case.
            if (new_port.get('device_owner') == n_const.DEVICE_OWNER_ROUTER_GW
                    and not new_port['fixed_ips']):
                msg = (_("No IPs assigned to the gateway port for"
                         " router %s") % port_data['device_id'])
                raise n_exc.BadRequest(resource='router', msg=msg)

            dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])

            # Make sure that the port created is valid
            if "id" not in new_port:
                raise n_exc.BadRequest(resource='port',
                                       msg="Invalid port created")

            # Update fields
            port_data.update(new_port)

            # Bind security groups to the port
            self._ensure_default_security_group_on_port(context, port)
            sg_ids = self._get_security_groups_on_port(context, port)
            self._process_port_create_security_group(context, new_port, sg_ids)

            # Process port bindings
            self._process_portbindings_create_and_update(context, port_data,
                                                         new_port)

            self._process_port_create_extra_dhcp_opts(context, new_port,
                                                      dhcp_opts)
            self.client.create_port_precommit(context, new_port)

        try:
            self.client.create_port_postcommit(new_port)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to create a port %(new_port)s: %(err)s"),
                          {"new_port": new_port, "err": ex})
                try:
                    self.delete_port(context, new_port['id'],
                                     l3_port_check=False)
                except Exception:
                    LOG.exception(_LE("Failed to delete port %s"),
                                  new_port['id'])

        LOG.debug("MidonetMixin.create_port exiting: port=%r", new_port)
        return new_port
Esempio n. 39
0
    def add_subports(self, context, trunk_id, subports):
        """Add one or more subports to trunk."""
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)

            # Check for basic validation since the request body here is not
            # automatically validated by the API layer.
            subports = subports['sub_ports']
            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports, trunk['port_id'])
            subports = subports_validator.validate(context,
                                                   basic_validation=True)
            added_subports = []

            rules.trunk_can_be_managed(context, trunk)
            original_trunk = copy.deepcopy(trunk)
            # NOTE(status_police): the trunk status should transition to
            # DOWN (and finally in ACTIVE or ERROR), only if it is not in
            # ERROR status already. A user should attempt to resolve the ERROR
            # condition before adding more subports to the trunk. Should a
            # trunk be in DOWN or BUILD state (e.g. when dealing with
            # multiple concurrent requests), the status is still forced to
            # DOWN and thus can potentially overwrite an interleaving state
            # change to ACTIVE. Eventually the driver should bring the status
            # back to ACTIVE or ERROR.
            if trunk.status == constants.ERROR_STATUS:
                raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
            else:
                trunk.update(status=constants.DOWN_STATUS)

            for subport in subports:
                obj = trunk_objects.SubPort(
                    context=context,
                    trunk_id=trunk_id,
                    port_id=subport['port_id'],
                    segmentation_type=subport['segmentation_type'],
                    segmentation_id=subport['segmentation_id'])
                obj.create()
                trunk['sub_ports'].append(obj)
                added_subports.append(obj)
            payload = callbacks.TrunkPayload(context,
                                             trunk_id,
                                             current_trunk=trunk,
                                             original_trunk=original_trunk,
                                             subports=added_subports)
            if added_subports:
                registry.notify(constants.SUBPORTS,
                                events.PRECOMMIT_CREATE,
                                self,
                                payload=payload)
        if added_subports:
            registry.notify(constants.SUBPORTS,
                            events.AFTER_CREATE,
                            self,
                            payload=payload)
        return trunk
Esempio n. 40
0
    def remove_subports(self, context, trunk_id, subports):
        """Remove one or more subports from trunk."""
        subports = subports['sub_ports']
        with db_api.autonested_transaction(context.session):
            trunk = self._get_trunk(context, trunk_id)
            original_trunk = copy.deepcopy(trunk)
            rules.trunk_can_be_managed(context, trunk)

            subports_validator = rules.SubPortsValidator(
                self._segmentation_types, subports)
            # the subports are being removed, therefore we do not need to
            # enforce any specific trunk rules, other than basic validation
            # of the request body.
            subports = subports_validator.validate(context,
                                                   basic_validation=True,
                                                   trunk_validation=False)

            current_subports = {p.port_id: p for p in trunk.sub_ports}
            removed_subports = []

            for subport in subports:
                subport_obj = current_subports.pop(subport['port_id'], None)

                if not subport_obj:
                    raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
                                                    port_id=subport['port_id'])
                subport_obj.delete()
                removed_subports.append(subport_obj)

            del trunk.sub_ports[:]
            trunk.sub_ports.extend(current_subports.values())
            # NOTE(status_police): the trunk status should transition to
            # PENDING irrespective of the status in which it is in to allow
            # the user to resolve potential conflicts due to prior add_subports
            # operations.
            # Should a trunk be in PENDING or BUILD state (e.g. when dealing
            # with multiple concurrent requests), the status is still forced
            # to PENDING. See add_subports() for more details.
            trunk.status = constants.PENDING_STATUS
            trunk.update()
            payload = callbacks.TrunkPayload(context,
                                             trunk_id,
                                             current_trunk=trunk,
                                             original_trunk=original_trunk,
                                             subports=removed_subports)
            if removed_subports:
                registry.notify(constants.SUBPORTS,
                                events.PRECOMMIT_DELETE,
                                self,
                                payload=payload)
        if removed_subports:
            registry.notify(constants.SUBPORTS,
                            events.AFTER_DELETE,
                            self,
                            payload=payload)
        return trunk
Esempio n. 41
0
    def update(self):
        updates = self._get_changed_persistent_fields()
        updates = self._validate_changed_fields(updates)

        with db_api.autonested_transaction(self.obj_context.session):
            db_obj = obj_db_api.update_object(
                self.obj_context, self.db_model,
                self.modify_fields_to_db(updates),
                **self.modify_fields_to_db(self._get_composite_keys()))
            self.from_db_object(db_obj)
Esempio n. 42
0
def delete_completed_rows(context):
    """Journal maintenance operation for deleting completed rows."""
    rows_retention = cfg.CONF.ml2_odl.completed_rows_retention
    if rows_retention <= 0:
        return

    LOG.debug("Deleting completed rows")
    with db_api.autonested_transaction(context.session):
        db.delete_rows_by_state_and_time(context.session, odl_const.COMPLETED,
                                         timedelta(seconds=rows_retention))
Esempio n. 43
0
 def create(self):
     synthetic_changes = self._get_changed_synthetic_fields()
     with db_api.autonested_transaction(self.obj_context.session):
         super(SubnetPool, self).create()
         if 'prefixes' in synthetic_changes:
             for prefix in self.prefixes:
                 prefix = SubnetPoolPrefix(
                     self.obj_context, subnetpool_id=self.id, cidr=prefix)
                 prefix.create()
         self.reload_prefixes()
Esempio n. 44
0
def get_rules(context, qos_policy_id):
    all_rules = []
    with db_api.autonested_transaction(context.session):
        for rule_type in qos_consts.VALID_RULE_TYPES:
            rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type)
            rule_cls = getattr(sys.modules[__name__], rule_cls_name)

            rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id)
            all_rules.extend(rules)
    return all_rules
Esempio n. 45
0
 def create(self):
     deltas = self.resource_deltas
     with db_api.autonested_transaction(self.obj_context.session):
         super(Reservation, self).create()
         if deltas:
             for delta in deltas:
                 delta.reservation_id = self.id
                 delta.create()
                 self.resource_deltas.append(delta)
             self.obj_reset_changes(['resource_deltas'])
Esempio n. 46
0
 def create(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         dns_domain = self.dns_domain
         qos_policy_id = self.qos_policy_id
         super(Network, self).create()
         if 'dns_domain' in fields:
             self._set_dns_domain(dns_domain)
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(qos_policy_id)
Esempio n. 47
0
 def _create_port_with_mac(self, context, network_id, port_data,
                           mac_address):
     try:
         # since this method could either be used within or outside the
         # transaction, use convenience method to avoid passing a flag
         with db_api.autonested_transaction(context.session):
             db_port = models_v2.Port(mac_address=mac_address, **port_data)
             context.session.add(db_port)
             return db_port
     except db_exc.DBDuplicateEntry:
         raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address)
Esempio n. 48
0
 def get_policy_bandwidth_limit_rule(self, context, rule_id,
                                     policy_id, fields=None):
     # make sure we have access to the policy when fetching the rule
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         self._get_policy_obj(context, policy_id)
         rule = rule_object.QosBandwidthLimitRule.get_by_id(
             context, rule_id)
     if not rule:
         raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
     return rule
Esempio n. 49
0
 def create(self):
     fields = self.obj_get_changes()
     with db_api.autonested_transaction(self.obj_context.session):
         sg_ids = self.security_group_ids
         if sg_ids is None:
             sg_ids = set()
         qos_policy_id = self.qos_policy_id
         super(Port, self).create()
         if 'security_group_ids' in fields:
             self._attach_security_groups(sg_ids)
         if 'qos_policy_id' in fields:
             self._attach_qos_policy(qos_policy_id)
Esempio n. 50
0
    def create(self):
        fields = self._get_changed_persistent_fields()
        with db_api.autonested_transaction(self._context.session):
            try:
                db_obj = obj_db_api.create_object(
                    self._context, self.db_model,
                    self.modify_fields_to_db(fields))
            except obj_exc.DBDuplicateEntry as db_exc:
                raise NeutronDbObjectDuplicateEntry(
                    object_class=self.__class__, db_exception=db_exc)

            self.from_db_object(db_obj)
Esempio n. 51
0
    def delete(self):
        with db_api.autonested_transaction(self.obj_context.session):
            for object_type, model in self.binding_models.items():
                binding_db_obj = obj_db_api.get_object(self.obj_context, model,
                                                       policy_id=self.id)
                if binding_db_obj:
                    raise exceptions.QosPolicyInUse(
                        policy_id=self.id,
                        object_type=object_type,
                        object_id=binding_db_obj['%s_id' % object_type])

            super(QosPolicy, self).delete()
Esempio n. 52
0
 def get_policy_bandwidth_limit_rules(self, context, policy_id,
                                      filters=None, fields=None,
                                      sorts=None, limit=None,
                                      marker=None, page_reverse=False):
     # make sure we have access to the policy when fetching rules
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         self._get_policy_obj(context, policy_id)
         filters = filters or dict()
         filters[qos_consts.QOS_POLICY_ID] = policy_id
         return rule_object.QosBandwidthLimitRule.get_objects(context,
                                                              **filters)
Esempio n. 53
0
def delete_rows_by_state_and_time(session, state, time_delta):
    # NOTE(mpeterson): The reason behind deleting one-by-one is that InnoDB
    # ignores the WHERE clause to issue a LOCK when executing a DELETE. By
    # executing each operation indepently, we minimize exposures to DEADLOCKS.
    with db_api.autonested_transaction(session):
        now = session.execute(func.now()).scalar()
        rows = session.query(models.OpenDaylightJournal).filter(
            models.OpenDaylightJournal.state == state,
            models.OpenDaylightJournal.last_retried < now - time_delta).all()
        for row in rows:
            delete_row(session, row, flush=False)
        session.expire_all()
Esempio n. 54
0
    def get_object(cls, context, **kwargs):
        # We want to get the policy regardless of its tenant id. We'll make
        # sure the tenant has permission to access the policy later on.
        admin_context = context.elevated()
        with db_api.autonested_transaction(admin_context.session):
            policy_obj = super(QosPolicy,
                               cls).get_object(admin_context, **kwargs)
            if (not policy_obj or not cls.is_accessible(context, policy_obj)):
                return

            policy_obj.reload_rules()
            return policy_obj
Esempio n. 55
0
 def create_policy_dscp_marking_rule(self, context, policy_id,
                                     dscp_marking_rule):
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         policy = self._get_policy_obj(context, policy_id)
         rule = rule_object.QosDscpMarkingRule(
             context,
             qos_policy_id=policy_id,
             **dscp_marking_rule['dscp_marking_rule'])
         rule.create()
         policy.reload_rules()
     self.notification_driver_manager.update_policy(context, policy)
     return rule
Esempio n. 56
0
 def update(self):
     with db_api.autonested_transaction(self.obj_context.session):
         synthetic_changes = self._get_changed_synthetic_fields()
         super(SubnetPool, self).update()
         if synthetic_changes:
             if 'prefixes' in synthetic_changes:
                 SubnetPoolPrefix.delete_objects(self.obj_context,
                                                 subnetpool_id=self.id)
                 for prefix in self.prefixes:
                     prefix_obj = SubnetPoolPrefix(self.obj_context,
                                                   subnetpool_id=self.id,
                                                   cidr=prefix)
                     prefix_obj.create()
Esempio n. 57
0
File: plugin.py Progetto: vj17/hdn
 def create_port(self, context, port):
     # Set port status as PENDING_CREATE
     port['port']['status'] = constants.STATUS_PENDING_CREATE
     with db_api.autonested_transaction(context.session):
         new_port = super(HdnNeutronPlugin, self).create_port(
             context, port)
     registry.notify(resources.PORT, events.AFTER_CREATE, self,
                     tenant_id=context.tenant_id,
                     resource_id=new_port['id'])
     # Notify HDN operators
     hdnlib.notify_port_create(new_port)
     LOG.debug("Queued request to create port: %s", new_port['id'])
     return new_port
Esempio n. 58
0
 def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id,
                                        bandwidth_limit_rule):
     # make sure we will have a policy object to push resource update
     with db_api.autonested_transaction(context.session):
         # first, validate that we have access to the policy
         policy = self._get_policy_obj(context, policy_id)
         rule = rule_object.QosBandwidthLimitRule(
             context, **bandwidth_limit_rule['bandwidth_limit_rule'])
         rule.id = rule_id
         rule.update()
         policy.reload_rules()
     self.notification_driver_manager.update_policy(context, policy)
     return rule
Esempio n. 59
0
 def create(self):
     # save is_default before super() resets it to False
     is_default = self.is_default
     with db_api.autonested_transaction(self.obj_context.session):
         super(SecurityGroup, self).create()
         if is_default:
             default_group = DefaultSecurityGroup(
                 self.obj_context,
                 project_id=self.project_id,
                 security_group_id=self.id)
             default_group.create()
             self.is_default = True
             self.obj_reset_changes(['is_default'])
Esempio n. 60
0
 def get_objects(cls, context, **kwargs):
     # We want to get the policy regardless of its tenant id. We'll make
     # sure the tenant has permission to access the policy later on.
     admin_context = context.elevated()
     with db_api.autonested_transaction(admin_context.session):
         objs = super(QosPolicy, cls).get_objects(admin_context, **kwargs)
         result = []
         for obj in objs:
             if not cls.is_accessible(context, obj):
                 continue
             obj.reload_rules()
             result.append(obj)
         return result