def bind_port_to_host(port_id, host, network_ctx): session = db_api.get_writer_session() for level, segment in enumerate(network_ctx.network_segments): port_binding = FakePortBindingLevel(port_id, level, 'vendor-1', segment['id'], host) session.add(ml2_models.PortBindingLevel(**port_binding.__dict__)) session.flush()
def _bind_port_level(self, context, level, segments_to_bind): binding = context._binding port_id = context.current['id'] LOG.debug("Attempting to bind port %(port)s on host %(host)s " "at level %(level)s using segments %(segments)s", {'port': port_id, 'host': context.host, 'level': level, 'segments': segments_to_bind}) if level == MAX_BINDING_LEVELS: LOG.error(_LE("Exceeded maximum binding levels attempting to bind " "port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': context.host}) return False for driver in self.ordered_mech_drivers: if not self._check_driver_to_bind(driver, segments_to_bind, context._binding_levels): continue try: context._prepare_to_bind(segments_to_bind) driver.obj.bind_port(context) segment = context._new_bound_segment if segment: context._push_binding_level( models.PortBindingLevel(port_id=port_id, host=context.host, level=level, driver=driver.name, segment_id=segment)) next_segments = context._next_segments_to_bind if next_segments: # Continue binding another level. if self._bind_port_level(context, level + 1, next_segments): return True else: context._pop_binding_level() else: # Binding complete. LOG.debug("Bound port: %(port)s, " "host: %(host)s, " "vif_type: %(vif_type)s, " "vif_details: %(vif_details)s, " "binding_levels: %(binding_levels)s", {'port': port_id, 'host': context.host, 'vif_type': binding.vif_type, 'vif_details': binding.vif_details, 'binding_levels': context.binding_levels}) return True except Exception: LOG.exception(_LE("Mechanism driver %s failed in " "bind_port"), driver.name) LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': binding.host})
def create_port(tenant_id, net_id, device_id, port_id, network_ctx, device_owner='compute', host='ubuntu1', dynamic_segment=None): session = db_api.get_writer_session() ndb = db_lib.NeutronNets() ndb.set_ipam_backend() port_ctx = get_port_context(tenant_id, net_id, device_id, network_ctx, port_id=port_id, device_owner=device_owner, host=host, session=session, dynamic_segment=dynamic_segment) ndb.create_port(port_ctx, {'port': port_ctx.current}) for binding_level in port_ctx._binding_levels: session.add(ml2_models.PortBindingLevel(**binding_level.__dict__)) session.flush() return port_ctx
def add_binding_bound(context, port_id, segment_id, host, interface_name): context.session.add( ml2_models.PortBindingLevel(port_id=port_id, host=host, level=0, driver='midonet', segment_id=segment_id)) profile = {} if interface_name is not None: profile['interface_name'] = interface_name context.session.add( ml2_models.PortBinding(port_id=port_id, host=host, vif_type='midonet', vnic_type='normal', profile=jsonutils.dumps(profile), vif_details=jsonutils.dumps( {'port_filter': True}), status='ACTIVE'))
def test_trunk_port(self): with self.session.begin(subtransactions=True): self.session.add(trunk_model.SubPort( port_id=self.port_id_2, trunk_id=self.trunk_id_1, segmentation_type="vlan", segmentation_id=11 )) self.session.add(ml2_models.PortBinding( port_id=self.port_id_2, host=self.host, vif_type="ovs" )) self.session.add(ml2_models.PortBindingLevel( port_id=self.port_id_2, host=self.host, driver=nsxv3_constants.NSXV3, level=1 )) port_2 = db.get_port(self.ctx, self.host, self.port_id_2) self.assertDictSupersetOf( { "id": self.port_id_2, "parent_id": self.port_id_1, "traffic_tag": 11, "admin_state_up": True, "status": "ACTIVE", "qos_policy_id": "", "security_groups": [], "address_bindings": [], "revision_number": 0, "binding:host_id": "test", "vif_details": "", "binding:vnic_type": "normal", "binding:vif_type": "ovs" }, port_2)
def _bind_port_level(self, context, level, segments_to_bind): binding = context._binding port_id = context.current['id'] LOG.debug( "Attempting to bind port %(port)s on host %(host)s " "at level %(level)s using segments %(segments)s", { 'port': port_id, 'host': context.host, 'level': level, 'segments': segments_to_bind }) if level == MAX_BINDING_LEVELS: LOG.error( "Exceeded maximum binding levels attempting to bind " "port %(port)s on host %(host)s", { 'port': context.current['id'], 'host': context.host }) return False # 循环调用所有的Mechanism Driver for driver in self.ordered_mech_drivers: # 防止binding loop的发生, # 即同一个driver 不能在同一个segment的不同level上进行binding if not self._check_driver_to_bind(driver, segments_to_bind, context._binding_levels): continue try: # self._segments_to_bind = segments_to_bind # self._new_bound_segment = None # self._next_segments_to_bind = None context._prepare_to_bind(segments_to_bind) # 调用Mechanism driver的bind_port()接口,AgentMechanismDriverBase.bind_port() driver.obj.bind_port(context) # 如果driver bind成功的话,这个driver会将PortContext的 # new_bound_segment设置成刚刚被bound的segment。 # SimpleAgentMechanismDriverBase.try_to_bind_segment_for_agent()完成 segment = context._new_bound_segment if segment: # 将目前binding情况写入数据库:ml2_port_binding_levels context._push_binding_level( models.PortBindingLevel(port_id=port_id, host=context.host, level=level, driver=driver.name, segment_id=segment)) next_segments = context._next_segments_to_bind # 并且如果这个driver认为还有下一级需要做port binding的话 # 将 next_segment_to_bind设置成这个driver动态分配的segment_id # 如果设置了_next_segments_to_bind,将继续在下一个 # level上做binding if next_segments: # Continue binding another level. if self._bind_port_level(context, level + 1, next_segments): return True else: LOG.warning( "Failed to bind port %(port)s on " "host %(host)s at level %(lvl)s", { 'port': context.current['id'], 'host': context.host, 'lvl': level + 1 }) context._pop_binding_level() else: # Binding complete. LOG.debug( "Bound port: %(port)s, " "host: %(host)s, " "vif_type: %(vif_type)s, " "vif_details: %(vif_details)s, " "binding_levels: %(binding_levels)s", { 'port': port_id, 'host': context.host, 'vif_type': binding.vif_type, 'vif_details': binding.vif_details, 'binding_levels': context.binding_levels }) return True except Exception: LOG.exception("Mechanism driver %s failed in " "bind_port", driver.name)
def _populate_neutron_db(self): self.plugin.create_network(self.ctx, {"network": { "tenant_id": self.tenant_id, "id": self.net_id, "shared": False, "name": "test_net_1", "admin_state_up": True, "description": "" }}) self.plugin.create_subnetpool(self.ctx, {"subnetpool": { "tenant_id": self.tenant_id, "id": self.ip_pool_id, "name": "default_test_pool", "prefixes": ["192.168.0.0", "192.168.1.0", "192.168.2.0"], # "min_prefix": 16, "min_prefixlen": 16, # "max_prefix": "", "max_prefixlen": 32, # "default_prefix": "", "default_prefixlen": 32, # "default_quota": "", # "address_scope_id": "", "is_default": True, "shared": True, "description": "" }}) self.plugin.create_port(self.ctx, {"port": { "tenant_id": self.tenant_id, "name": "test_port_1", "id": self.port_id_1, "network_id": self.net_id, "fixed_ips": constants.ATTR_NOT_SPECIFIED, "admin_state_up": True, "device_id": "123", "device_owner": "admin", "description": "" }}) self.plugin.create_port(self.ctx, {"port": { "tenant_id": self.tenant_id, "name": "test_port_2", "id": self.port_id_2, "network_id": self.net_id, "fixed_ips": constants.ATTR_NOT_SPECIFIED, "admin_state_up": True, "device_id": "1234", "device_owner": "admin", "description": "" }}) subnet = self.plugin.create_subnet(self.ctx, {"subnet": { "tenant_id": self.tenant_id, "name": "subnet_192_168", "cidr": "192.168.0.0/32", "ip_version": 4, "network_id": self.net_id, "subnetpool_id": self.ip_pool_id, "allocation_pools": [], "enable_dhcp": True, "dns_nameservers": [], "host_routes": [] }}) neutron_db = [ ml2_models.PortBinding( port_id=self.port_id_1, host=self.host, vif_type="ovs" ), ml2_models.PortBindingLevel( port_id=self.port_id_1, host=self.host, driver=nsxv3_constants.NSXV3, level=1 ), models_v2.IPAllocation( port_id=self.port_id_1, ip_address="192.168.0.100", subnet_id=subnet.get("id"), network_id=self.net_id ), QosPolicy( id=self.qos_id_1, project_id=self.tenant_id, name="Test_QOS_1" ), trunk_model.Trunk( id=self.trunk_id_1, project_id=self.tenant_id, name="test_trunk_1", port_id=self.port_id_1 ), sg_model.SecurityGroup( id=self.sg_id_1, project_id=self.tenant_id, name="test_sg_1", ) ] with self.session.begin(subtransactions=True): for entry in neutron_db: self.session.add(entry)