def test_udate_subport_bindings_error(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port(self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' obj = trunk_obj.SubPort(context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin self.mock_update_port.return_value = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED } updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id']) self.assertEqual(trunk.status, constants.TRUNK_ERROR_STATUS) self.assertEqual([], updated_subports[trunk.id])
def manage_trunk(self, port): LOG.debug("Managing trunk for port: %s", port) if getattr(port, 'trunk_details', None): trunk_id = port.trunk_details['trunk_id'] master_id = port.trunk_details['master_port_id'] # Track the subports in the trunk. Since we don't get a # notification on unbind - we have no way to clean up # the patch ports for the subports. We trigger the # clean up when our scan finds that the parent is not # present due to unplugging of the VM. self.managed_trunks.setdefault(trunk_id, set()) self.managed_trunks[master_id] = trunk_id # Attach subports if port.vif_id == master_id: subports = [ trunk_objects.SubPort( context=self.context, trunk_id=trunk_id, port_id=x['port_id'], segmentation_type=x['segmentation_type'], segmentation_id=x['segmentation_id']) for x in port.trunk_details['subports']] self.handle_subports( self.context, None, subports, events.CREATED, trunk_id=trunk_id) self.trunk_rpc.update_trunk_status(self.context, trunk_id, constants.TRUNK_ACTIVE_STATUS)
def test_update_subport_bindings(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port(self.context, parent_port['port']['id'], {'port': port_data}) subports = [] mock_return_vals = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' mock_return_vals.append(new_port) obj = trunk_obj.SubPort(context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) self.mock_update_port.side_effect = mock_return_vals test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id']) self.assertEqual(trunk.status, constants.TRUNK_BUILD_STATUS) self.assertIn(trunk.id, updated_subports) for port in updated_subports[trunk['id']]: self.assertEqual('trunk_host_id', port[portbindings.HOST_ID])
def test_udate_subport_bindings_noretryerror(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port(self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' obj = trunk_obj.SubPort(context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin self.mock_update_port.return_value = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED } mock_trunk_obj = mock.Mock(port_id=parent_port['port']['id']) mock_trunk_obj.update.side_effect = KeyError with mock.patch.object(trunk_obj.Trunk, 'get_object', return_value=mock_trunk_obj): self.assertRaises(KeyError, test_obj.update_subport_bindings, self.context, subports=subports) self.assertEqual(1, mock_trunk_obj.update.call_count)
def setUp(self): super(TestOVSDBHandler, self).setUp() self.ovsdb_handler = ovsdb_handler.OVSDBHandler(mock.sentinel.manager) mock.patch.object(self.ovsdb_handler, 'trunk_rpc').start() mock.patch.object(self.ovsdb_handler, 'trunk_manager').start() self.trunk_manager = self.ovsdb_handler.trunk_manager self.trunk_id = uuidutils.generate_uuid() self.fake_subports = [ trunk_obj.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_id=1) ] self.fake_port = { 'name': 'foo', 'external_ids': { 'trunk_id': 'trunk_id', 'subport_ids': jsonutils.dumps([s.id for s in self.fake_subports]), } } self.subport_bindings = { 'trunk_id': [{ 'id': subport.port_id, 'mac_address': 'mac' } for subport in self.fake_subports] }
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] admin_state_up = trunk.get('admin_state_up', True) trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), tenant_id=trunk['tenant_id'], port_id=trunk['port_id'], sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify(constants.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify(constants.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [trunk_objects.SubPort( context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports']] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in PENDING status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), tenant_id=trunk['tenant_id'], port_id=trunk['port_id'], status=constants.PENDING_STATUS, sub_ports=sub_ports) with db_api.autonested_transaction(context.session): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify( constants.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify( constants.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def test_update_subport_bindings(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port(self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: obj = trunk_obj.SubPort(context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) self.assertIn(trunk['id'], updated_subports) for port in updated_subports[trunk['id']]: self.assertEqual('trunk_host_id', port[portbindings.HOST_ID])
def _create_ml2_ovs_test_resources(self, vif_details_list): self.subport_profiles = {} ctx = n_context.get_admin_context() for sid in range(1, 6): net_arg = {pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: sid} network_id = self._make_network(self.fmt, 'net%d' % sid, True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg)['network']['id'] for vif_details in vif_details_list: port = self._make_port(self.fmt, network_id)['port'] port_o = port_obj.PortBinding.get_object( ctx, port_id=port['id'], host='') port_o.vif_type = 'ovs' port_o.vif_details = vif_details port_o.update() for i in range(1, 4): port = self._make_port(self.fmt, network_id)['port'] subport1 = self._make_port(self.fmt, network_id)['port'] subport2 = self._make_port(self.fmt, network_id)['port'] trunk_id = uuidutils.generate_uuid() subports = [trunk_obj.SubPort( ctx, port_id=subport1['id'], trunk_id=trunk_id, segmentation_type="vlan", segmentation_id=i * 10 + j) for j in range(2)] trunk = trunk_obj.Trunk( ctx, id=trunk_id, port_id=port['id'], project_id='foo', subports=subports) trunk.create() subport_pb = port_obj.PortBinding.get_object( ctx, port_id=subport1['id'], host='') self.assertFalse(subport_pb.profile) self.subport_profiles[subport1['id']] = {"parent_name": port['id'], "tag": i * 10} self.subport_profiles[subport2['id']] = {"parent_name": port['id'], "tag": i * 10 + 1} # set something to the last subport port binding subport_pb = port_obj.PortBinding.get_object( ctx, port_id=subport2['id'], host='') # need to generate new id subport_pb.profile = subport_pb.profile.copy() subport_pb.profile['foo'] = 'bar' subport_pb.update() self.subport_profiles[subport2['id']]["foo"] = "bar"
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator( self._segmentation_types, subports, trunk['port_id']) subports = subports_validator.validate(context, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # NOTE(status_police): the trunk status should transition to # DOWN (and finally in ACTIVE or ERROR), only if it is not in # ERROR status already. A user should attempt to resolve the ERROR # condition before adding more subports to the trunk. Should a # trunk be in DOWN or BUILD state (e.g. when dealing with # multiple concurrent requests), the status is still forced to # DOWN and thus can potentially overwrite an interleaving state # change to ACTIVE. Eventually the driver should bring the status # back to ACTIVE or ERROR. if trunk.status == constants.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: trunk.update(status=constants.DOWN_STATUS) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify(resources.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def test_put_trunk_subport(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) new = trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=1010) self.tapi.put_trunk_subport(self.trunk.id, new) subs = self.tapi.get_trunk('ctx', self.trunk.port_id).sub_ports self.assertEqual(21, len(subs)) self.assertEqual(new, subs[-1])
def _fill_trunk_dict(self, num=3): ports = self.create_test_ports(amount=num) self.trunk_dict['port_id'] = ports[0]['id'] self.trunk_dict['sub_ports'] = [trunk_obj.SubPort( id=uuidutils.generate_uuid(), port_id=ports[i]['id'], mac_address=ports[i]['mac_address'], segmentation_id=i, trunk_id=self.trunk_dict['id']) for i in range(1, num)] return ports
def _gen_trunk(self): trunk_obj = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=trunk_obj.id, segmentation_id=i) for i in range(20, 40)] trunk_obj.sub_ports = subports return trunk_obj
def test_trunk_creation_with_subports(self): ports = self.create_test_ports(amount=3) self.trunk_dict['port_id'] = ports[0]['id'] self.trunk_dict['sub_ports'] = [ trunk_obj.SubPort(id=uuidutils.generate_uuid(), port_id=ports[i]['id'], mac_address=ports[i]['mac_address'], segmentation_id=i, trunk_id=self.trunk_dict['id']) for i in range(1, 3) ] self._test_trunk_creation_helper(ports[:1])
def _test_create_trunk_with_subports(self, port_id, vids): tenant_id = uuidutils.generate_uuid() sub_ports = [] for vid in vids: port = self._create_port(network_id=self._network['id']) sub_ports.append(t_obj.SubPort(self.context, port_id=port['id'], segmentation_type='vlan', segmentation_id=vid)) trunk = t_obj.Trunk(self.context, port_id=port_id, sub_ports=sub_ports, tenant_id=tenant_id) trunk.create() self.assertEqual(sub_ports, trunk.sub_ports) return trunk
def setUp(self): super(TrunkAPITestCase, self).setUp() self.stub = mock.create_autospec(driver.trunk_rpc.TrunkStub()) self.tapi = driver._TrunkAPI(self.stub) self.trunk = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), tenant_id=uuidutils.generate_uuid()) self.subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=i) for i in range(20)] self.trunk.sub_ports = self.subports self.stub.get_trunk_details.return_value = self.trunk
def setUp(self): super(LinuxBridgeTrunkDriverTestCase, self).setUp() self.plumber = mock.create_autospec(trunk_plumber.Plumber()) self.stub = mock.create_autospec(driver.trunk_rpc.TrunkStub()) self.tapi = mock.create_autospec(driver._TrunkAPI(self.stub)) self.lbd = driver.LinuxBridgeTrunkDriver(self.plumber, self.tapi) self.trunk = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), tenant_id=uuidutils.generate_uuid()) self.subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=i) for i in range(20)] self.trunk.sub_ports = self.subports
def test_trunk_handler(self): port = mock.Mock() port.vif_id = uuidutils.generate_uuid() trunk_id = uuidutils.generate_uuid() subports = [ trunk_obj.SubPort(port_id=uuidutils.generate_uuid(), trunk_id=trunk_id, segmentation_type='foo', segmentation_id=i) for i in range(2) ] trunk_details = {} trunk_details['trunk_id'] = trunk_id trunk_details['master_port_id'] = port.vif_id trunk_details['subports'] = subports port.trunk_details = trunk_details self.agent.bridge_manager.handle_subports(None, None, subports, events.CREATED) self.agent.bridge_manager.handle_subports(None, None, subports, events.DELETED) self.assertFalse(self.agent.bridge_manager.add_patch_ports.called) self.assertFalse(self.agent.bridge_manager.delete_patch_ports.called) def binding_call(context, subports): return { trunk_id: [{ 'id': x.port_id, 'mac_address': '%s' % i } for i, x in enumerate(subports)] } self.agent.bridge_manager.trunk_rpc.update_subport_bindings = ( binding_call) self.agent.bridge_manager.manage_trunk(port) self.agent.bridge_manager.unmanage_trunk(port.vif_id) self.agent.bridge_manager.add_patch_ports.assert_called_with( [subports[0].port_id, subports[1].port_id], attached_macs={ subports[0].port_id: '0', subports[1].port_id: '1' }) call_args = self.agent.bridge_manager.delete_patch_ports.call_args self.assertEqual(set(call_args[0][0]), set([subports[0].port_id, subports[1].port_id]))
def add_subports(self, context, trunk_id, subports): """Add one or more subports to a trunk.""" LOG.debug("Adding subports %s to trunk %s", subports, trunk_id) trunk = self._get_trunk(context, trunk_id) subports = subports['sub_ports'] subports = self.validate_subports(context, subports, trunk, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # The trunk should not be in the ERROR_STATUS if trunk.status == trunk_const.ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: # The trunk will transition to DOWN and subsequently to ACTIVE # when a subport is added. trunk.update(status=trunk_const.DOWN_STATUS) with db_context_writer.using(context): for subport in subports: subport_obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) subport_obj.create() trunk['sub_ports'].append(subport_obj) added_subports.append(subport_obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(trunk_const.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) self.send_subport_update_to_etcd(context, trunk) if added_subports: registry.notify(trunk_const.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def setUp(self): super(OvsTrunkSkeletonTest, self).setUp() trunk_manager_cls_mock = mock.patch(TRUNK_MANAGER).start() self.trunk_manager = trunk_manager_cls_mock.return_value handler = ovsdb_handler.OVSDBHandler(self.trunk_manager) mock.patch.object(handler, 'trunk_rpc').start() mock.patch.object(handler, '_set_trunk_metadata').start() mock.patch.object(handler, 'manages_this_trunk', return_value=True).start() self.skeleton = driver.OVSTrunkSkeleton(handler) self.trunk_id = uuidutils.generate_uuid() self.subports = [ trunk_obj.SubPort(port_id=uuidutils.generate_uuid(), trunk_id=self.trunk_id, segmentation_type='foo', segmentation_id=i) for i in range(2) ]
def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in DOWN status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_description = trunk.get('description', "") trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk_description, project_id=trunk['project_id'], port_id=trunk['port_id'], status=constants.TRUNK_DOWN_STATUS, sub_ports=sub_ports) with db_api.CONTEXT_WRITER.using(context): trunk_obj.create() payload = events.DBEventPayload(context, resource_id=trunk_obj.id, desired_state=trunk_obj) registry.publish(resources.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) payload = events.DBEventPayload(context, resource_id=trunk_obj.id, states=(trunk_obj, )) registry.publish(resources.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj
def test_update_subport_bindings_during_migration(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) subports = [] for vid in range(0, 3): with self.port() as new_port: obj = trunk_obj.SubPort(context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) expected_calls = [ mock.call( mock.ANY, subport['port_id'], { 'port': { portbindings.HOST_ID: 'new_trunk_host_id', 'device_owner': constants.TRUNK_SUBPORT_OWNER } }) for subport in subports ] test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin port_data = { portbindings.HOST_ID: 'trunk_host_id', portbindings.PROFILE: { 'migrating_to': 'new_trunk_host_id' } } with mock.patch.object( self.core_plugin, "get_port", return_value=port_data), \ mock.patch.object( test_obj, "_safe_update_trunk"): test_obj.update_subport_bindings(self.context, subports=subports) for expected_call in expected_calls: self.assertIn(expected_call, self.mock_update_port.mock_calls)
def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator(self._segmentation_types, subports) subports = subports_validator.validate(context, basic_validation=True) added_subports = [] with db_api.autonested_transaction(context.session): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify(constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk
def test_trunk_handler(self): trunk_id = uuidutils.generate_uuid() subports = [ trunk_obj.SubPort(port_id=uuidutils.generate_uuid(), trunk_id=trunk_id, segmentation_type='foo', segmentation_id=i) for i in range(2) ] self.agent.bridge_manager.handle_subports(None, None, subports, events.CREATED) self.agent.bridge_manager.handle_subports(None, None, subports, events.DELETED) self.assertFalse(self.agent.bridge_manager.add_patch_ports.called) self.assertFalse(self.agent.bridge_manager.delete_patch_ports.called) self.agent.bridge_manager.managed_trunks[trunk_id] = 'master_port' self.agent.bridge_manager.managed_trunks['master_port'] = trunk_id def binding_call(context, subports): return { trunk_id: [{ 'id': x.port_id, 'mac_address': '%s' % i } for i, x in enumerate(subports)] } self.agent.bridge_manager.trunk_rpc.update_subport_bindings = ( binding_call) self.agent.bridge_manager.handle_subports(None, None, subports, events.CREATED) self.agent.bridge_manager.handle_subports(None, None, subports, events.DELETED) self.agent.bridge_manager.add_patch_ports.assert_called_with( [subports[0].port_id, subports[1].port_id], attached_macs={ subports[0].port_id: '0', subports[1].port_id: '1' }) self.agent.bridge_manager.delete_patch_ports.assert_called_with( [subports[0].port_id, subports[1].port_id])
def manage_trunk(self, port): LOG.debug("Managing trunk for port: %s" % port) if getattr(port, 'trunk_details', None): trunk_id = port.trunk_details['trunk_id'] master_id = port.trunk_details['master_port_id'] self.managed_trunks[trunk_id] = master_id self.managed_trunks[master_id] = trunk_id # Attach subports if port.vif_id == master_id: subports = [ trunk_objects.SubPort( context=self.context, trunk_id=trunk_id, port_id=x['port_id'], segmentation_type=x['segmentation_type'], segmentation_id=x['segmentation_id']) for x in port.trunk_details['subports'] ] self.handle_subports(subports, events.CREATED, trunk_id=trunk_id) self.trunk_rpc.update_trunk_status(self.context, trunk_id, constants.ACTIVE_STATUS)
def create_trunk(self, context, trunk): """Create a trunk object.""" LOG.debug("Creating trunk %s", trunk) trunk = self.validate_trunk(context, trunk['trunk']) sub_ports = [ trunk_objects.SubPort(context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports'] ] trunk_obj = trunk_objects.Trunk( context=context, admin_state_up=trunk.get('admin_state_up', True), id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk.get('description', ""), project_id=trunk['tenant_id'], port_id=trunk['port_id'], # Trunk will turn active only after it has been bound on a host status=trunk_const.DOWN_STATUS, sub_ports=sub_ports) with db_context_writer.using(context): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify(trunk_const.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify(trunk_const.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj