def delete_trunk(self, context, trunk_id): """Delete the trunk port.""" LOG.debug("Deleting trunk_id %s", trunk_id) deleted_from_db = False with db_context_writer.using(context): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if not trunk_port_validator.is_bound(context): trunk.delete() deleted_from_db = True payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(trunk_const.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: raise trunk_exc.TrunkInUse(trunk_id=trunk_id) if deleted_from_db: registry.notify(trunk_const.TRUNK, events.AFTER_DELETE, self, payload=payload)
def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.context_manager.writer.using(context): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) # NOTE(status_police): a trunk status should not change during an # update_trunk(), even in face of PRECOMMIT failures. This is # because only name and admin_state_up are being affected, and # these are DB properties only. trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj) registry.notify(constants.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(constants.TRUNK, events.AFTER_UPDATE, self, payload=payload) return trunk_obj
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.CONTEXT_WRITER.using(context): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. try: trunk.delete() except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning( 'Trunk driver raised exception when ' 'deleting trunk port %s: %s', trunk_id, str(e)) payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: LOG.info( 'Trunk driver does not consider trunk %s ' 'untrunkable', trunk_id) raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(resources.TRUNK, events.AFTER_DELETE, self, payload=payload)
def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.autonested_transaction(context.session): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) # NOTE(status_police): a trunk status should not change during an # update_trunk(), even in face of PRECOMMIT failures. This is # because only name and admin_state_up are being affected, and # these are DB properties only. trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = events.DBEventPayload(context, resource_id=trunk_id, states=(original_trunk, ), desired_state=trunk_obj, request_body=trunk_data) registry.publish(resources.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(resources.TRUNK, events.AFTER_UPDATE, self, payload=callbacks.TrunkPayload( context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj)) return trunk_obj
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] admin_state_up = trunk.get('admin_state_up', True) trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), tenant_id=trunk['tenant_id'], port_id=trunk['port_id'], sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify(constants.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify(constants.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [trunk_objects.SubPort( context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports']] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in PENDING status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), tenant_id=trunk['tenant_id'], port_id=trunk['port_id'], status=constants.PENDING_STATUS, sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify( constants.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify( constants.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. trunk.delete() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(resources.TRUNK, events.AFTER_DELETE, self, payload=payload)
def test_unset_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 device_owner = 'trunk:subport' host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] trunk_port = models_v2.Port(tenant_id=tenant_id, network_id=network_id, device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.tenant_provisioned.return_value = False mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port resource = 'SubPort' event = 'AFTER_DELETE' trigger = 'AristaDriver' subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[trunk_models.SubPort()]) sp = models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id) self.drv.ndb.get_port.return_value = sp self.drv.unset_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.unplug_port_from_network(vm_id, device_owner, host_id, port_id, network_id, tenant_id, [], vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group([], profile), mock.call.tenant_provisioned(tenant_id), mock.call.delete_tenant(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)
def _test_trunk_create_notify(self, event): with self.port() as parent_port: callback = register_mock_callback(constants.TRUNK, event) trunk = self._create_test_trunk(parent_port) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], current_trunk=trunk_obj) callback.assert_called_once_with( constants.TRUNK, event, self.trunk_plugin, payload=payload)
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from trunk.""" subports = subports['sub_ports'] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) subports_validator = rules.SubPortsValidator( self._segmentation_types, subports) # the subports are being removed, therefore we do not need to # enforce any specific trunk rules, other than basic validation # of the request body. subports = subports_validator.validate(context, basic_validation=True, trunk_validation=False) current_subports = {p.port_id: p for p in trunk.sub_ports} removed_subports = [] for subport in subports: subport_obj = current_subports.pop(subport['port_id'], None) if not subport_obj: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) # NOTE(status_police): the trunk status should transition to # PENDING irrespective of the status in which it is in to allow # the user to resolve potential conflicts due to prior add_subports # operations. # Should a trunk be in PENDING or BUILD state (e.g. when dealing # with multiple concurrent requests), the status is still forced # to PENDING. See add_subports() for more details. trunk.status = constants.PENDING_STATUS trunk.update() payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) if removed_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) if removed_subports: registry.notify(constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator( self._segmentation_types, subports, trunk['port_id']) subports = subports_validator.validate(context, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # NOTE(status_police): the trunk status should transition to # DOWN (and finally in ACTIVE or ERROR), only if it is not in # ERROR status already. A user should attempt to resolve the ERROR # condition before adding more subports to the trunk. Should a # trunk be in DOWN or BUILD state (e.g. when dealing with # multiple concurrent requests), the status is still forced to # DOWN and thus can potentially overwrite an interleaving state # change to ACTIVE. Eventually the driver should bring the status # back to ACTIVE or ERROR. if trunk.status == constants.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: trunk.update(status=constants.DOWN_STATUS) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify(resources.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def _fake_trunk_event_payload(self): payload = callbacks.TrunkPayload( mock.Mock(), 'fake_id', mock.Mock(return_value=FAKE_TRUNK), mock.Mock(return_value=FAKE_TRUNK), mock.Mock(return_value=FAKE_TRUNK['sub_ports'])) payload.current_trunk.status = trunk_consts.DOWN_STATUS payload.current_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) payload.original_trunk.status = trunk_consts.DOWN_STATUS payload.original_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) return payload
def test_process_event(self): test_obj = backend.ServerSideRpcBackend() test_obj._stub = mock_stub = mock.Mock() trunk_plugin = mock.Mock() test_obj.process_event( trunk_consts.TRUNK, events.AFTER_CREATE, trunk_plugin, callbacks.TrunkPayload("context", "id", current_trunk="current_trunk")) test_obj.process_event( trunk_consts.TRUNK, events.AFTER_DELETE, trunk_plugin, callbacks.TrunkPayload("context", "id", original_trunk="original_trunk")) calls = [ mock.call.trunk_created("context", "current_trunk"), mock.call.trunk_deleted("context", "original_trunk") ] mock_stub.assert_has_calls(calls, any_order=False)
def _test_trunk_update_notify(self, event): with self.port() as parent_port: callback = register_mock_callback(constants.TRUNK, event) trunk = self._create_test_trunk(parent_port) orig_trunk_obj = self._get_trunk_obj(trunk['id']) trunk_req = {'trunk': {'name': 'foo'}} self.trunk_plugin.update_trunk(self.context, trunk['id'], trunk_req) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], original_trunk=orig_trunk_obj, current_trunk=trunk_obj) callback.assert_called_once_with( constants.TRUNK, event, self.trunk_plugin, payload=payload)
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from the trunk.""" LOG.debug("Removing subports %s from trunk %s", subports, trunk_id) trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) subports = subports['sub_ports'] subports = self.validate_subports(context, subports, trunk, basic_validation=True, trunk_validation=False) removed_subports = [] rules.trunk_can_be_managed(context, trunk) # The trunk should not be in the ERROR_STATUS if trunk.status == trunk_const.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: # The trunk will transition to DOWN and subsequently to ACTIVE # when a subport is removed. trunk.update(status=trunk_const.DOWN_STATUS) current_subports = {p.port_id: p for p in trunk.sub_ports} # Ensure that all sub-ports to be removed are actually present for subport in subports: if subport['port_id'] not in current_subports: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) with neutron_db_api.context_manager.writer.using(context): for subport in subports: subport_obj = current_subports.pop(subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) if removed_subports: del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) registry.notify(trunk_const.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) self.send_subport_update_to_etcd(context, trunk) if removed_subports: registry.notify(trunk_const.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def test__trigger_trunk_status_change_vif_type_changed_unbound(self): callback = register_mock_callback(constants.TRUNK, events.AFTER_UPDATE) with self.port() as parent: parent[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_UNBOUND original_port = {portbindings.VIF_TYPE: 'fakeviftype'} original_trunk, current_trunk = ( self._test__trigger_trunk_status_change( parent, original_port, constants.ACTIVE_STATUS, constants.DOWN_STATUS)) payload = callbacks.TrunkPayload(self.context, original_trunk['id'], original_trunk=original_trunk, current_trunk=current_trunk) callback.assert_called_once_with( constants.TRUNK, events.AFTER_UPDATE, self.trunk_plugin, payload=payload)
def _test_remove_subports_notify(self, event): with self.port() as parent_port, self.port() as child_port: subport = create_subport_dict(child_port['port']['id']) trunk = self._create_test_trunk(parent_port, [subport]) orig_trunk_obj = self._get_trunk_obj(trunk['id']) callback = register_mock_callback(constants.SUBPORTS, event) subport_obj = self._get_subport_obj(subport['port_id']) self.trunk_plugin.remove_subports( self.context, trunk['id'], {'sub_ports': [subport]}) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], current_trunk=trunk_obj, original_trunk=orig_trunk_obj, subports=[subport_obj]) callback.assert_called_once_with( constants.SUBPORTS, event, self.trunk_plugin, payload=payload)
def add_subports(self, context, trunk_id, subports): """Add one or more subports to a trunk.""" LOG.debug("Adding subports %s to trunk %s", subports, trunk_id) trunk = self._get_trunk(context, trunk_id) subports = subports['sub_ports'] subports = self.validate_subports(context, subports, trunk, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # The trunk should not be in the ERROR_STATUS if trunk.status == trunk_const.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: # The trunk will transition to DOWN and subsequently to ACTIVE # when a subport is added. trunk.update(status=trunk_const.DOWN_STATUS) with db_context_writer.using(context): for subport in subports: subport_obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) subport_obj.create() trunk['sub_ports'].append(subport_obj) added_subports.append(subport_obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(trunk_const.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) self.send_subport_update_to_etcd(context, trunk) if added_subports: registry.notify(trunk_const.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from trunk.""" subports = subports['sub_ports'] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) subports_validator = rules.SubPortsValidator( self._segmentation_types, subports) # the subports are being removed, therefore we do not need to # enforce any specific trunk rules, other than basic validation # of the request body. subports = subports_validator.validate(context, basic_validation=True, trunk_validation=False) current_subports = {p.port_id: p for p in trunk.sub_ports} removed_subports = [] for subport in subports: subport_obj = current_subports.pop(subport['port_id'], None) if not subport_obj: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) if removed_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) if removed_subports: registry.notify(constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.autonested_transaction(context.session): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj) registry.notify(constants.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(constants.TRUNK, events.AFTER_UPDATE, self, payload=payload) return trunk_obj
def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if not trunk_port_validator.is_bound(context): trunk.delete() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(constants.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(constants.TRUNK, events.AFTER_DELETE, self, payload=payload)
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator(self._segmentation_types, subports) subports = subports_validator.validate(context, basic_validation=True) added_subports = [] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify(constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def update_trunk(self, context, trunk_id, trunk): """Update the trunk object.""" LOG.debug("Updating trunk %s trunk_id %s", trunk, trunk_id) trunk_data = trunk['trunk'] with db_context_writer.using(context): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj) registry.notify(trunk_const.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(trunk_const.TRUNK, events.AFTER_UPDATE, self, payload=payload) return trunk_obj
def create_trunk(self, context, trunk): """Create a trunk object.""" LOG.debug("Creating trunk %s", trunk) trunk = self.validate_trunk(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] trunk_obj = trunk_objects.Trunk( context=context, admin_state_up=trunk.get('admin_state_up', True), id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk.get('description', ""), project_id=trunk['tenant_id'], port_id=trunk['port_id'], # Trunk will turn active only after it has been bound on a host status=trunk_const.DOWN_STATUS, sub_ports=sub_ports) with db_context_writer.using(context): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify(trunk_const.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify(trunk_const.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from the trunk. param: subports: {u'sub_ports': [{u'port_id': u'fa006724-dbca-4e7f-bb6b-ec70162eb681'}]} """ LOG.debug("Removing subports %s from trunk %s", subports, trunk_id) trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) # key-value data corresponding to original trunk original_trunk_data = self._get_trunk_data(trunk) # ID's of subports to remove subports_to_remove = [pid['port_id'] for pid in subports['sub_ports']] LOG.debug('trunk subports to remove: %s', subports_to_remove) subports = subports['sub_ports'] subports = self.validate_subports(context, subports, trunk, basic_validation=True, trunk_validation=False) removed_subports = [] rules.trunk_can_be_managed(context, trunk) # The trunk should not be in the ERROR_STATUS if trunk.status == trunk_const.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: # The trunk will transition to DOWN and subsequently to ACTIVE # when a subport is removed. trunk.update(status=trunk_const.DOWN_STATUS) current_subports = {p.port_id: p for p in trunk.sub_ports} # Ensure that all sub-ports to be removed are actually present for subport in subports: if subport['port_id'] not in current_subports: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) with db_context_writer.using(context): for subport in subports: subport_obj = current_subports.pop(subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) if removed_subports: del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) registry.notify(trunk_const.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) self.send_subport_update_to_etcd(context, trunk) # Subport data to remove subports = [ subport for subport in original_trunk_data['sub_ports'] if subport['port_id'] in subports_to_remove ] original_trunk_data['sub_ports'] = subports trunk_data = self.add_data_to_subports(context, original_trunk_data) # Remove all remote-group subport keys from etcd LOG.debug('trunk data with subports to remove: %s', trunk_data) for subport_data in trunk_data['sub_ports']: self._write_remote_group_journal(context, subport_data, remove_key=True) if removed_subports: registry.notify(trunk_const.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk
def test_set_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] sg = ['security-groups'] orig_sg = None trunk_port = models_v2.Port(tenant_id=tenant_id, network_id='net-trunk', device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port mechanism_arista.db_lib.is_network_provisioned.return_value = True resource = 'SubPort' event = 'AFTER_CREATE' trigger = 'AristaDriver' sp = dict( models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id, name='subport')) sp['security_groups'] = ['security-groups'] subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[subport]) segments = [{ 'segmentation_id': 12, 'physical_network': 'default', 'id': 'segment_id', 'network_type': 'vlan' }] bindings = [] self.drv.ndb.get_port.return_value = sp self.drv.ndb.get_network_id_from_port_id.return_value = network_id mechanism_arista.db_lib.get_network_segments_by_port_id.return_value = \ segments self.drv.set_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.get_network_segments_by_port_id('p1'), mock.call.plug_port_into_network(vm_id, host_id, port_id, network_id, tenant_id, 'subport', 'trunk:subport', sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)