def update_subnet(self, rollback_list, subnet_request): """Update IPAM Subnet. Updates allocation pools, dns zones, or EAs for the subnet in the Infoblox backend. """ neutron_subnet = self._build_subnet_from_request(subnet_request) ib_network = self._get_ib_network(neutron_subnet['id'], neutron_subnet['ip_version']) if not ib_network: raise exc.InfobloxCannotFindSubnet(subnet_id=neutron_subnet['id'], cidr=neutron_subnet['cidr']) ib_cxt = ib_context.InfobloxContext(self._context, self._context.user_id, None, neutron_subnet, self._grid_config, plugin=self._plugin, ib_network=ib_network) ipam_controller = ipam.IpamSyncController(ib_cxt) dns_controller = dns.DnsController(ib_cxt) ipam_controller.update_subnet_allocation_pools(rollback_list) if self._is_new_zone_required(neutron_subnet, ib_network): # subnet name is used in the domain suffix pattern and the name # has been changed; we need to create new zones. dns_controller.create_dns_zones(rollback_list) ipam_controller.update_subnet_details(ib_network)
def _test_reserve_service_members_without_ib_network_for_gm_auth( self, test_authority_member, dns_support, expected_dns_members, dbi_service_member_mock): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = True self.grid_config.dns_support = dns_support dbi_service_member_mock.return_value = [] ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.mapping.authority_member = test_authority_member ib_cxt.grid_config.use_grid_master_for_dhcp = True ib_cxt._register_services = mock.Mock() ib_cxt.reserve_service_members() self.assertEqual(test_authority_member.member_id, ib_cxt.mapping.dhcp_members[0].member_id) self.assertEqual(expected_dns_members, ib_cxt.mapping.dns_members)
def _get_instance_name_from_fip(self, floatingip): """Get instance name from fip associated with an instance Get instance name using the following info. in floatingip: 1. port_id - this is the port id for the instance 2. fixed_ip_address - this is the fixed ip for the instance Using the above, construct InfobloxContext and query NIOS for FixedAddress/HostRecord for instance. From the result, extract instance name from the "VM Name" EA """ port_id = floatingip.get('port_id') fixed_ip = floatingip.get('fixed_ip_address') port = self.plugin.get_port(self.context, port_id) if not port: LOG.warning("No port found for port_id: %s" % port_id) return None if port['device_owner'] in const.NEUTRON_DEVICE_OWNER_COMPUTE_LIST: instance = dbi.get_instance(self.context.session, port['device_id']) if instance: return instance.instance_name subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips'] if ip['ip_address'] == fixed_ip] if not subnet_ids: LOG.warning("No subnet_ids found for port: %s, fixed_ip: " % (port, fixed_ip)) return None subnet = self.plugin.get_subnet(self.context, subnet_ids[0]) if not subnet: LOG.warning("No subnet was found for subnet_id: %s" % subnet_ids[0]) return ib_context = context.InfobloxContext(self.context, self.user_id, None, subnet, self.grid_config, self.plugin, self._cached_grid_members, self._cached_network_views, self._cached_mapping_conditions) connector = ib_context.connector netview = ib_context.mapping.network_view dns_view = ib_context.mapping.dns_view ib_address = ib_objects.FixedAddress.search(connector, network_view=netview, ip=fixed_ip) if not ib_address: ib_address = ib_objects.HostRecord.search(connector, view=dns_view, ip=fixed_ip) if not ib_address: return None return ib_address.extattrs.get(const.EA_VM_NAME)
def _get_ib_context(self, user_id, network, subnet): mock_km_str = 'networking_infoblox.neutron.common.keystone_manager.' with mock.patch(mock_km_str + 'get_all_tenants'), ( mock.patch(mock_km_str + 'sync_tenants_from_keystone')): ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) return ib_cxt
def delete_instance_sync(self, payload): """Notifies that an instance has been deleted.""" instance_id = payload.get('instance_id') session = self.context.session dbi.remove_instance(session, instance_id) if self.traceable: LOG.info("Deleted instance: %s", instance_id) vm_id_ea = ib_objects.EA({'VM ID': instance_id}) subnets = dbi.get_external_subnets(self.context.session) for cur_subnet in subnets: subnet = self.plugin.get_subnet(self.context, cur_subnet.id) network = self.plugin.get_network(self.context, cur_subnet.network_id) ib_context = context.InfobloxContext( self.context, self.user_id, network, subnet, self.grid_config, self.plugin, self._cached_grid_members, self._cached_network_views, self._cached_mapping_conditions) dns_controller = dns.DnsController(ib_context) connector = ib_context.connector netview = ib_context.mapping.network_view dns_view = ib_context.mapping.dns_view ib_address = ib_objects.FixedAddress.search( connector, network_view=netview, network=subnet['cidr'], search_extattrs=vm_id_ea) if not ib_address: ib_address = ib_objects.HostRecord.search( connector, view=dns_view, zone=dns_controller.dns_zone, search_extattrs=vm_id_ea) if not ib_address: continue if hasattr(ib_address, 'ips'): ips = [ipaddr.ip for ipaddr in ib_address.ips if netaddr.IPAddress(ipaddr.ip) in netaddr.IPNetwork(subnet['cidr'])] else: ips = [ib_address.ip] tenant_id = ib_address.extattrs.get('Tenant ID') db_ports = dbi.get_floatingip_ports( session, ips, cur_subnet.network_id) for port in db_ports: port_id = port[0] device_id = port[1] device_owner = port[2] floating_ip = port[3] port_name = port[4] dns_controller.bind_names( floating_ip, None, port_id, tenant_id, device_id, device_owner, False, port_name) LOG.info("Instance deletion sync: instance id = %s, " "floating ip = %s, port id = %s, device owner = %s", instance_id, floating_ip, port_id, device_owner)
def update_floatingip_sync(self, payload): """Notifies that the floating ip has been updated. update could be either association if port_id is not empty or dissociation if port_id is None. """ floatingip = payload.get('floatingip') if self.traceable: LOG.info("Updated floatingip: %s", floatingip) session = self.context.session floating_ip_id = floatingip.get('id') tenant_id = floatingip.get('tenant_id') associated_port_id = floatingip.get('port_id') network_id = floatingip.get('floating_network_id') floating_ip = floatingip.get('floating_ip_address') instance_name = None # find mapping subnet id by network id and floating ip since # subnet info is not passed. subnet = self._get_mapping_neutron_subnet(network_id, floating_ip) if subnet is None: return network = self.plugin.get_network(self.context, network_id) ib_context = context.InfobloxContext(self.context, self.user_id, network, subnet, self.grid_config, self.plugin, self._cached_grid_members, self._cached_network_views, self._cached_mapping_conditions) dns_controller = dns.DnsController(ib_context) if associated_port_id: instance_name = self._get_instance_name_from_fip(floatingip) is_floating_ip = True db_port = dbi.get_port_by_id(session, associated_port_id) else: is_floating_ip = False db_floatingip = dbi.get_floatingip_by_id(session, floating_ip_id) db_port = dbi.get_port_by_id(session, db_floatingip.floating_port_id) dns_controller.bind_names(floating_ip, instance_name, db_port.id, tenant_id, db_port.device_id, db_port.device_owner, is_floating_ip, db_port.name) LOG.info("Floating ip update sync: floating ip = %s, " "instance name = %s, port id = %s, device id: %s, " "device owner = %s", floating_ip, instance_name, db_port.id, db_port.device_id, db_port.device_owner)
def test_network_view_mapping_conditions_with_tenant_id_condition(self): user_id = 'test user' tenant_id = '80afaaba012acb9c12888128d5123a09' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet with cidr tat is not used in mapping conditions subnet_name = 'Test Subnet' subnet_cidr = '10.0.0.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) # make sure that no subnet cidr is used in mapping conditions db_conditions = dbi.get_mapping_conditions( self.ctx.session, grid_id=self.grid_id, neutron_object_value=subnet_cidr) self.assertEqual([], db_conditions) # make sure that tenant id is used in mapping condition once db_conditions = dbi.get_mapping_conditions( self.ctx.session, grid_id=self.grid_id, neutron_object_value=tenant_id) self.assertEqual(1, len(db_conditions)) # test mapping where tenant id mapping is found ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.connector = mock.Mock() ib_cxt.ibom = mock.Mock() ib_cxt.ip_allocator = mock.Mock() # validate the mapping network view expected_netview_id = db_conditions[0].network_view_id db_netviews = dbi.get_network_views(self.ctx.session, grid_id=self.grid_id) netview_row = utils.find_one_in_list('id', expected_netview_id, db_netviews) expected_netview = netview_row.network_view db_mapping_members = dbi.get_mapping_members(self.ctx.session, expected_netview_id, grid_id=self.grid_id) expected_member_id = db_mapping_members[0].member_id self.assertEqual(expected_netview_id, ib_cxt.mapping.network_view_id) self.assertEqual(expected_netview, ib_cxt.mapping.network_view) self.assertEqual(expected_member_id, ib_cxt.mapping.authority_member.member_id)
def _test_reserve_service_members_with_ib_network_without_dhcp_member( self, dns_support): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = True self.grid_config.dns_support = dns_support ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt._register_services = mock.Mock() # ib network with dhcp member and gateway ips assigned connector = mock.Mock() test_ib_network = ib_objects.NetworkV4(connector, network_view='test-view', cidr='12.12.1.0/24') test_ib_network.members = [ ib_objects.AnyMember( _struct='dhcpmember', name=ib_cxt.mapping.authority_member.member_name) ] test_gateway_ip = '12.12.1.1' test_ib_network.options = [ ib_objects.DhcpOption(name='routers', value=test_gateway_ip) ] ib_cxt.reserve_service_members(test_ib_network) expected_dns_members = ([ib_cxt.mapping.authority_member] if dns_support else []) # authority member is CPM, so dhcp/dns member should be the same as # authority member self.assertEqual([ib_cxt.mapping.authority_member], ib_cxt.mapping.dhcp_members) self.assertEqual(expected_dns_members, ib_cxt.mapping.dns_members) actual_opt_router = [ opt for opt in test_ib_network.options if opt.name == 'routers' ] self.assertEqual(subnet['gateway_ip'] + ',' + test_gateway_ip, actual_opt_router[0].value)
def allocate_subnet(self, rollback_list, subnet_request): """Create an IPAM subnet from the subnet request which contains cidr. Allocates a subnet to the Infoblox backend. :param subnet_request: instance of SubnetRequest child :returns: a InfobloxSubnet instance """ # if subnetpool is defined, the request is AnySubnetRequest, so # we need to convert it to SpecificSubnetRequest calling # SubnetAllocator; however, calling this will not pass custom # parameters we defined so we need to get them back from the original # subnet_request. if self._subnetpool: orig_request = { 'name': subnet_request.name, 'network_id': subnet_request.network_id, 'subnetpool_id': subnet_request.subnetpool_id, 'enable_dhcp': subnet_request.enable_dhcp, 'dns_nameservers': subnet_request.dns_nameservers } subnet = super(InfobloxPool, self).allocate_subnet(subnet_request) subnet_request = subnet.get_details() subnet_request.name = orig_request['name'] subnet_request.network_id = orig_request['network_id'] subnet_request.subnetpool_id = orig_request['subnetpool_id'] subnet_request.enable_dhcp = orig_request['enable_dhcp'] subnet_request.dns_nameservers = orig_request['dns_nameservers'] # SubnetRequest must be SpecificSubnet at this point if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest): raise ipam_exc.InvalidSubnetRequestType( subnet_type=type(subnet_request)) neutron_subnet = self._build_subnet_from_request(subnet_request) ib_cxt = ib_context.InfobloxContext(self._context, self._context.user_id, None, neutron_subnet, self._grid_config, plugin=self._plugin) ipam_controller = ipam.IpamSyncController(ib_cxt) dns_controller = dns.DnsController(ib_cxt) ib_network = self._create_ib_network(rollback_list, ipam_controller) if ib_network: dns_controller.create_dns_zones(rollback_list) LOG.info("Created DNS zones.") return InfobloxSubnet(subnet_request, neutron_subnet, ib_network, ib_cxt)
def test_get_ip_allocator_for_hosts(self): user_id = 'test user' tenant_id = '90fbad5a098a4b05524826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dns_support = True self.grid_config.ip_allocation_strategy = ( const.IP_ALLOCATION_STRATEGY_HOST_RECORD) self.grid_config.zone_creation_strategy = (const.GRID_CONFIG_DEFAULTS[ const.EA_GRID_CONFIG_ZONE_CREATION_STRATEGY]) ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ip_allocator = ib_cxt._get_ip_allocator() self.assertEqual(True, ip_allocator.opts['configure_for_dns']) self.grid_config.dns_support = False ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ip_allocator = ib_cxt._get_ip_allocator() self.assertEqual(False, ip_allocator.opts['configure_for_dns']) self.grid_config.dns_support = True self.grid_config.zone_creation_strategy = [ const.ZONE_CREATION_STRATEGY_REVERSE ] ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ip_allocator = ib_cxt._get_ip_allocator() self.assertEqual(False, ip_allocator.opts['configure_for_dns'])
def test_reserve_authority_member_with_dhcp_support( self, dbi_next_authority_mock, dbi_network_view_mock): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.2.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = True test_authority_member = utils.json_to_obj( 'AuthorityMember', { 'member_id': 'member-id', 'member_type': 'CPM', 'member_ip': '11.11.1.11', 'member_ipv6': None, 'member_dns_ip': '12.11.1.11', 'member_dns_ipv6': None, 'member_dhcp_ip': None, 'member_dhcp_ipv6': None, 'member_name': 'm1', 'member_status': 'ON', 'member_wapi': '11.11.1.11' }) dbi_next_authority_mock.return_value = test_authority_member test_network_view = utils.json_to_obj( 'NetworkView', { 'id': 'ZG5zLm5ldHdvcmtfdmlldyQ1', 'network_view': 'hs-view-1', 'shared': False }) dbi_network_view_mock.return_value = test_network_view ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.mapping.network_view = test_network_view.network_view ib_cxt.reserve_authority_member() self.assertEqual(test_network_view.network_view, ib_cxt.mapping.network_view) self.assertEqual(test_authority_member.member_id, ib_cxt.mapping.authority_member.member_id)
def test_network_view_mapping_conditions_with_single_scope(self): user_id = 'test user' tenant_id = 'test-tenant' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet with cidr tat is not used in mapping conditions subnet_name = 'Test Subnet' subnet_cidr = '10.0.0.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) # make sure that no subnet cidr is used in mapping conditions db_conditions = dbi.get_mapping_conditions( self.ctx.session, grid_id=self.grid_id, neutron_object_value=subnet_cidr) self.assertEqual([], db_conditions) # check default network view when no mapping condition matches self.assertEqual('Single', self.grid_config.default_network_view_scope) self.assertEqual('default', self.grid_config.default_network_view) # test default mapping as 'Single' ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.connector = mock.Mock() ib_cxt.ibom = mock.Mock() ib_cxt.ip_allocator = mock.Mock() # verify that 'default' view is used db_netviews = dbi.get_network_views(self.ctx.session, grid_id=self.grid_id) netview_row = utils.find_one_in_list('network_view', 'default', db_netviews) expected_netview_id = netview_row.id db_grid_members = dbi.get_members(self.ctx.session, grid_id=self.grid_id) member_row = utils.find_one_in_list('member_type', 'GM', db_grid_members) expected_member_name = member_row.member_name self.assertEqual(expected_netview_id, ib_cxt.mapping.network_view_id) self.assertEqual('default', ib_cxt.mapping.network_view) self.assertEqual(expected_member_name, ib_cxt.mapping.authority_member.member_name)
def _test_reserve_service_members_with_ib_network_with_dhcp_member( self, test_dhcp_member, dns_support, expected_dns_members): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = True self.grid_config.dns_support = dns_support ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt._register_services = mock.Mock() ib_cxt._get_dhcp_members = mock.Mock(return_value=[test_dhcp_member]) ib_cxt._get_dns_members = mock.Mock(return_value=[test_dhcp_member]) # ib network with dhcp member assigned connector = mock.Mock() test_ib_network = ib_objects.NetworkV4(connector, network_view='test-view', cidr='12.12.1.0/24') test_ib_network.members = [ ib_objects.AnyMember(_struct='dhcpmember', name=test_dhcp_member.member_name, ipv4addr=test_dhcp_member.member_ip) ] test_ib_network.options = [ ib_objects.DhcpOption(name='domain-name-servers', value=test_dhcp_member.member_ip) ] ib_cxt.reserve_service_members(test_ib_network) self.assertEqual([test_dhcp_member], ib_cxt.mapping.dhcp_members) self.assertEqual(expected_dns_members, ib_cxt.mapping.dns_members) actual_opt_router = [ opt for opt in test_ib_network.options if opt.name == 'routers' ] self.assertEqual(subnet['gateway_ip'], actual_opt_router[0].value)
def test_network_view_mapping_conditions_with_subnet_cidr_condition(self): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet with cidr used in mapping conditions subnet_name = 'Test Subnet' subnet_cidr = '12.12.2.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) # make sure that mapping condition exists and prepare expectations db_conditions = dbi.get_mapping_conditions( self.ctx.session, grid_id=self.grid_id, neutron_object_value=subnet_cidr) self.assertEqual(1, len(db_conditions)) expected_network_view_id = db_conditions[0].network_view_id db_network_views = dbi.get_network_views(self.ctx.session, grid_id=self.grid_id) expected_netview_row = utils.find_one_in_list( 'id', expected_network_view_id, db_network_views) expected_authority_member_id = expected_netview_row.authority_member_id expected_network_view = expected_netview_row.network_view # prepare network view mapping to neutron network and subnet dbi.associate_network_view(self.ctx.session, expected_network_view_id, network['id'], subnet['id']) # test mapping where both tenant id and tenant cidr match ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.connector = mock.Mock() ib_cxt.ibom = mock.Mock() ib_cxt.ip_allocator = mock.Mock() # validate mapping self.assertEqual(expected_network_view_id, ib_cxt.mapping.network_view_id) self.assertEqual(expected_network_view, ib_cxt.mapping.network_view) self.assertEqual(expected_authority_member_id, ib_cxt.mapping.authority_member.member_id)
def _test_reserve_service_members_with_ib_network_gm_owned( self, dns_support): user_id = 'test user' tenant_id = 'tenant-id' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = True self.grid_config.dns_support = dns_support ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt._register_services = mock.Mock() dhcp_members = dbi.get_service_members( self.ctx.session, network_view_id=ib_cxt.mapping.network_view_id, service=const.SERVICE_TYPE_DHCP) expected_dhcp_member = dhcp_members[0] # ib network with no members and options connector = mock.Mock() test_ib_network = ib_objects.NetworkV4(connector, network_view='default', cidr='12.12.1.0/24') test_ib_network.members = [] test_ib_network.options = [] ib_cxt.reserve_service_members(test_ib_network) expected_dns_members = (ib_cxt.mapping.dhcp_members if dns_support else []) self.assertEqual(expected_dhcp_member.member_id, ib_cxt.mapping.dhcp_members[0].member_id) self.assertEqual(expected_dns_members, ib_cxt.mapping.dns_members) actual_opt_router = [ opt for opt in test_ib_network.options if opt.name == 'routers' ] self.assertEqual(subnet['gateway_ip'], actual_opt_router[0].value)
def update_network_sync(self, need_new_zones=False): """Updates EAs for each subnet that belongs to the updated network.""" session = self.ib_cxt.context.session network = self.ib_cxt.network network_id = network.get('id') subnets = dbi.get_subnets_by_network_id(session, network_id) for subnet in subnets: network_view = None cidr = subnet.get('cidr') subnet_id = subnet.get('id') netview_mappings = dbi.get_network_view_mappings( session, network_id=network_id, subnet_id=subnet_id) if netview_mappings: netview_row = utils.find_one_in_list( 'id', netview_mappings[0].network_view_id, self.ib_cxt.discovered_network_views) network_view = netview_row.network_view ib_network = None if network_view: ib_network = self.ib_cxt.ibom.get_network(network_view, cidr) ea_network = eam.get_ea_for_network(self.ib_cxt.user_id, self.ib_cxt.tenant_id, self.ib_cxt.tenant_name, network, subnet) self.ib_cxt.ibom.update_network_options(ib_network, ea_network) if need_new_zones: # Need context with ib_network to create zones ib_cxt = context.InfobloxContext(self.ib_cxt.context, self.ib_cxt.user_id, network, subnet, self.grid_config, self.ib_cxt.plugin, ib_network=ib_network) dns_controller = dns.DnsController(ib_cxt) rollback_list = [] dns_controller.create_dns_zones(rollback_list) else: self.ib_cxt.subnet = subnet dns_controller = dns.DnsController(self.ib_cxt) dns_controller.update_dns_zones()
def get_subnet(self, subnet_id): """Retrieve an IPAM subnet. :param subnet_id: Neutron subnet identifier :returns: a InfobloxSubnet instance """ neutron_subnet = self._fetch_subnet(subnet_id) subnet_request = self._build_request_from_subnet(neutron_subnet) ib_cxt = ib_context.InfobloxContext(self._context, self._context.user_id, None, neutron_subnet, self._grid_config, plugin=self._plugin) ipam_controller = ipam.IpamSyncController(ib_cxt) ib_network = ipam_controller.get_subnet() return InfobloxSubnet(subnet_request, neutron_subnet, ib_network, ib_cxt)
def test_get_dns_members_without_dhcp_support(self): user_id = 'test user' tenant_id = '90fbad5a098a4b7cb98826128d5b40b3' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet subnet_name = 'Test Subnet' subnet_cidr = '11.11.1.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) self.grid_config.dhcp_support = False ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) test_authority_member = utils.json_to_obj( 'AuthorityMember', { 'member_id': 'member-id', 'member_type': 'GM', 'member_ip': '11.11.1.11', 'member_ipv6': None, 'member_name': 'm1', 'member_status': 'ON', 'member_wapi': '11.11.1.11' }) ib_cxt.mapping.authority_member = test_authority_member grid_primaries, grid_secondaries = ib_cxt.get_dns_members() expected_grid_primaries = [ ib_objects.AnyMember(_struct='memberserver', name=test_authority_member.member_name) ] self.assertEqual(expected_grid_primaries[0].name, grid_primaries[0].name) self.assertEqual(None, grid_secondaries)
def remove_subnet(self, subnet_id): """Remove IPAM Subnet. Removes a subnet from the Infoblox backend. """ ib_network = self._get_ib_network(subnet_id) if not ib_network: return neutron_subnet = self._build_subnet_from_ib_network(ib_network) ib_cxt = ib_context.InfobloxContext(self._context, self._context.user_id, None, neutron_subnet, self._grid_config, plugin=self._plugin, ib_network=ib_network) ipam_controller = ipam.IpamSyncController(ib_cxt) dns_controller = dns.DnsController(ib_cxt) ipam_controller.delete_subnet(ib_network) dns_controller.delete_dns_zones(ib_network=ib_network)
def update_network_sync(self, payload): """Notifies that the network property has been updated.""" network = payload.get('network') if self.traceable: LOG.info("Updated network: %s", network) ib_context = context.InfobloxContext(self.context, self.user_id, network, None, self.grid_config, self.plugin) ipam_controller = ipam.IpamAsyncController(ib_context) network_id = network.get('id') new_name = network.get('name') old_network = dbi.get_network(self.context.session, network_id) need_new_zones = False if new_name is not None and (old_network is None or new_name != old_network.network_name): dbi.add_or_update_network(self.context.session, network_id, new_name) pattern = self.grid_config.default_domain_name_pattern if '{network_name}' in pattern: need_new_zones = True ipam_controller.update_network_sync(need_new_zones)
def test_network_view_mapping_conditions_with_tenant_scope(self): user_id = 'test user' tenant_id = 'test-tenant-id' # prepare network network_name = 'Test Network' network = self.plugin_stub.create_network(tenant_id, network_name) # prepare subnet with cidr tat is not used in mapping conditions subnet_name = 'Test Subnet' subnet_cidr = '10.0.0.0/24' subnet = self.plugin_stub.create_subnet(tenant_id, subnet_name, network['id'], subnet_cidr) # make sure that no subnet cidr is used in mapping conditions db_conditions = dbi.get_mapping_conditions( self.ctx.session, grid_id=self.grid_id, neutron_object_value=subnet_cidr) self.assertEqual([], db_conditions) # set default network view scope to 'Tenant' self.grid_config.default_network_view_scope = ( const.NETWORK_VIEW_SCOPE_TENANT) # test default mapping as 'Tenant' ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, network, subnet, self.grid_config, self.plugin) ib_cxt.connector = mock.Mock() ib_cxt.ibom = mock.Mock() ib_cxt.ip_allocator = mock.Mock() # validate the mapping network view expected_netview = utils.generate_network_view_name(tenant_id) self.assertIsNone(ib_cxt.mapping.network_view_id) self.assertEqual(expected_netview, ib_cxt.mapping.network_view) self.assertEqual(None, ib_cxt.mapping.authority_member)
def _process_port(self, port, event, instance_name=None): for fixed_ip in port['fixed_ips']: subnet_id = fixed_ip['subnet_id'] subnet = self.plugin.get_subnet(self.context, subnet_id) if not subnet: LOG.warning("No subnet was found for subnet_id=%s", subnet_id) continue ib_context = context.InfobloxContext( self.context, self.user_id, None, subnet, self.grid_config, self.plugin, self._cached_grid_members, self._cached_network_views, self._cached_mapping_conditions) dns_controller = dns.DnsController(ib_context) if instance_name is not None: dns_controller.bind_names(fixed_ip['ip_address'], instance_name, port['id'], port['tenant_id'], port['device_id'], port['device_owner'], port_name=port['name']) LOG.info( "%s sync: ip = %s, instance name = %s, " "port id = %s, device id: %s, device owner: %s", event, fixed_ip['ip_address'], instance_name, port['id'], port['device_id'], port['device_owner']) else: dns_controller.unbind_names( fixed_ip['ip_address'], None, port['id'], port['tenant_id'], None, const.NEUTRON_DEVICE_OWNER_COMPUTE_NOVA, port_name=port['name'])
def test_reserve_authority_member_without_dhcp_support( self, dbi_next_authority_mock, dbi_network_view_mock): user_id = 'test user' self.grid_config.dhcp_support = False self.grid_config.dns_view = 'test-view' test_authority_member = utils.json_to_obj( 'AuthorityMember', { 'member_id': 'member-id', 'member_type': 'GM', 'member_ip': '11.11.1.10', 'member_ipv6': None, 'member_dns_ip': '12.11.1.11', 'member_dns_ipv6': None, 'member_dhcp_ip': None, 'member_dhcp_ipv6': None, 'member_wapi': '11.11.1.10' }) dbi_next_authority_mock.return_value = test_authority_member test_network_view = utils.json_to_obj('NetworkView', { 'id': 'test-id', 'network_view': 'test-view', 'shared': False }) dbi_network_view_mock.return_value = test_network_view ib_cxt = ib_context.InfobloxContext(self.ctx, user_id, None, None, self.grid_config, self.plugin) ib_cxt.mapping.network_view = test_network_view.network_view ib_cxt.reserve_authority_member() self.assertEqual(test_network_view.network_view, ib_cxt.mapping.network_view) self.assertEqual(test_authority_member.member_id, ib_cxt.mapping.authority_member.member_id)
def sync_neutron_to_infoblox(context, credentials, grid_manager): """Sync neutron objects to Infoblox grid Prerequisites: 1. network views to sync must have "Cloud Adapter ID" EA set. 2. infoblox agent sync should have been processed and updated members and network views. """ LOG.info("Starting migration...\n") delete_unknown_ips = cfg.CONF.delete_unknown_ips grid_config = grid_manager.grid_config grid_id = grid_config.grid_id session = context.session neutron_api = neutron_client.Client(**credentials) payload = neutron_api.list_networks() networks = payload['networks'] if not networks: LOG.info("No network exists...Exiting...") return payload = neutron_api.list_subnets() subnets = payload['subnets'] if not subnets: LOG.info("No subnet exists...Exiting...") return payload = neutron_api.list_ports() ports = payload['ports'] nova_api = nova_client.Client(NOVA_API_VERSION, session=credentials['session']) instance_names_by_instance_id = dict() instance_names_by_floating_ip = dict() for server in nova_api.servers.list(search_opts={'all_tenants': 1}): instance_names_by_instance_id[server.id] = server.name floating_ips = [] for net in server.addresses: floating_ips += [ ip['addr'] for ip in server.addresses[net] if ip['OS-EXT-IPS:type'] == 'floating' ] for fip in floating_ips: instance_names_by_floating_ip[fip] = server.name user_id = neutron_api.httpclient.get_user_id() user_tenant_id = neutron_api.httpclient.get_project_id() ib_networks = [] should_exit = False # sync subnets for subnet in subnets: subnet_id = subnet['id'] subnet_name = subnet['name'] network_id = subnet['network_id'] network = utils.find_one_in_list('id', network_id, networks) if not network: LOG.warning("network (%s) is not found. Skipping subnet (%s)", network_id, subnet_id) continue network_name = network['name'] ib_cxt = ib_context.InfobloxContext(context, user_id, network, subnet, grid_config, plugin=neutron_api) db_mapped_netview = dbi.get_network_view_by_mapping( session, grid_id=grid_id, network_id=network_id, subnet_id=subnet_id) if db_mapped_netview: LOG.info("Mapping found for network (%s), subnet (%s)", network_name, subnet_name) if len(db_mapped_netview) > 1: LOG.warning("More that one db_mapped_netview returned") if delete_unknown_ips: ib_network = ib_objects.Network.search( ib_cxt.connector, network_view=db_mapped_netview[0].network_view, cidr=subnet.get('cidr')) ib_networks.append(ib_network) continue ipam_controller = ipam.IpamSyncController(ib_cxt) dns_controller = dns.DnsController(ib_cxt) rollback_list = [] try: ib_network = ipam_controller.create_subnet(rollback_list) if ib_network: if delete_unknown_ips: ib_networks.append(ib_network) dns_controller.create_dns_zones(rollback_list) LOG.info("Created network (%s), subnet (%s)", network_name, subnet_name) except Exception as e: LOG.error(_LE("Error occurred: %(error)s"), {'error': e}) for ib_obj in reversed(rollback_list): try: ib_obj.delete() except ib_exc.InfobloxException as e: LOG.warning( _LW("Unable to delete %(obj)s due to " "error: %(error)s."), { 'obj': ib_obj, 'error': e }) should_exit = True break if should_exit: LOG.info("Exiting due to the error in creating subnet...") return # sync ports for port in ports: port_id = port['id'] port_name = port['name'] port_mac_address = port['mac_address'] tenant_id = port.get('tenant_id') or user_tenant_id network_id = port['network_id'] device_owner = port['device_owner'] device_id = port['device_id'] instance_name = (instance_names_by_instance_id[device_id] if device_id in instance_names_by_instance_id else None) network = utils.find_one_in_list('id', network_id, networks) if not network: LOG.error("network (%s) not found", network_id) break for ip_set in port.get('fixed_ips'): subnet_id = ip_set['subnet_id'] ip_address = ip_set['ip_address'] LOG.info("Adding port for %s: %s...", device_owner, ip_address) subnet = utils.find_one_in_list('id', subnet_id, subnets) if not subnet: should_exit = True LOG.error("subnet (%s) not found", subnet_id) break ib_cxt = ib_context.InfobloxContext(context, user_id, network, subnet, grid_config, plugin=neutron_api) connector = ib_cxt.connector netview = ib_cxt.mapping.network_view search_fields = {'network_view': netview, 'ip_address': ip_address} obj_type = ('ipv4address' if utils.get_ip_version(ip_address) == 4 else 'ipv6address') ib_address = connector.get_object(obj_type, search_fields, return_fields=['objects'], force_proxy=True) if ib_address and ib_address[0]['objects']: LOG.info("%s is found...no need to create", ip_address) continue ipam_controller = ipam.IpamSyncController(ib_cxt) dns_controller = dns.DnsController(ib_cxt) # for a floating ip port, check for its association. # if associated, then port info needs to be the associated port, # not the floating ip port because the associated port contains # actual attached device info is_floating_ip = False if ip_address in instance_names_by_floating_ip: db_floatingip = dbi.get_floatingip_by_ip_address( session, ip_address) db_port = dbi.get_port_by_id(session, db_floatingip.fixed_port_id) port_id = db_port.id port_name = db_port.name tenant_id = db_port.tenant_id device_id = db_port.device_id device_owner = db_port.device_owner instance_name = instance_names_by_floating_ip[ip_address] is_floating_ip = True allocated_ip = ipam_controller.allocate_specific_ip( ip_address, port_mac_address, port_id, tenant_id, device_id, device_owner) if allocated_ip and device_owner: try: dns_controller.bind_names(allocated_ip, instance_name, port_id, tenant_id, device_id, device_owner, is_floating_ip, port_name) except Exception as e: should_exit = True LOG.error("Unable to allocate ip (%s): %s", ip_address, e) ipam_controller.deallocate_ip(allocated_ip) break LOG.info("Allocated %s", ip_address) if should_exit: LOG.info("Existing due to error in port creation...") break if delete_unknown_ips: LOG.info("Start deleting unknown Fixed IP's from Infoblox...") for ib_network in ib_networks: nw_ea = ib_network.extattrs # Skip network if it doesn't have EA or if EA indicates it's # shared or external. if (not nw_ea or nw_ea.get('Is External') or nw_ea.get('Is Shared')): continue LOG.info("Searching for Fixed IP: network_view='%s', cidr='%s'" % (ib_network.network_view, ib_network.network)) fixed_ips = ib_objects.FixedAddress.search_all( ib_cxt.connector, network_view=ib_network.network_view, network=ib_network.network) if not fixed_ips: LOG.info("No FixedIP found: network_view='%s', cidr='%s'" % (ib_network.network_view, ib_network.network)) continue for fixed_ip in fixed_ips: ea = fixed_ip.extattrs port_id = None if ea: port_id = ea.get('Port ID') # Delete Fixed IP if: # - Fixed IP does not have 'Port ID' EA, or # - No port_id in neutron matches 'Port ID' EA value if not (port_id and utils.find_one_in_list('id', port_id, ports)): LOG.info("Deleting Fixed IP from Infoblox: '%s'" % fixed_ip) fixed_ip.delete() LOG.info("Ending migration...")