def _create_flip(context, flip, port_fixed_ips): """Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ if port_fixed_ips: context.session.begin() try: ports = [val['port'] for val in port_fixed_ips.values()] flip = db_api.port_associate_ip(context, ports, flip, port_fixed_ips.keys()) for port_id in port_fixed_ips: fixed_ip = port_fixed_ips[port_id]['fixed_ip'] flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() flip_driver.register_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # alexm: Notify from this method for consistency with _delete_flip billing.notify(context, billing.IP_ASSOC, flip)
def _create_flip(context, flip, port_fixed_ips): """Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ if port_fixed_ips: context.session.begin() try: ports = [val['port'] for val in port_fixed_ips.values()] flip = db_api.port_associate_ip(context, ports, flip, port_fixed_ips.keys()) for port_id in port_fixed_ips: fixed_ip = port_fixed_ips[port_id]['fixed_ip'] flip = db_api.floating_ip_associate_fixed_ip( context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() flip_driver.register_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # alexm: Notify from this method for consistency with _delete_flip billing.notify(context, billing.IP_ASSOC, flip)
def deallocate_ip_address(self, context, address): if address["version"] == 6: db_api.ip_address_delete(context, address) else: address["deallocated"] = 1 address["address_type"] = None notify(context, 'ip.delete', address, send_usage=True)
def test_env_cap_enabled(self, notifier): cfg.CONF.set_override('environment_capabilities', 'security_groups,ip_billing', 'QUARK') ipaddress = get_fake_fixed_address() ipaddress.allocated_at = datetime.datetime.utcnow() billing.notify(self.context, billing.IP_ADD, ipaddress) notifier.assert_called_once_with('network') cfg.CONF.clear_override('environment_capabilities', 'QUARK')
def deallocate_ip_address(self, context, address, **kwargs): if address["version"] == 6: db_api.ip_address_delete(context, address) else: address["deallocated"] = 1 address["address_type"] = None billing.notify(context, billing.IP_DEL, address, send_usage=True, **kwargs)
def test_env_cap_disabled(self, notifier): cfg.CONF.set_override('environment_capabilities', '', 'QUARK') ipaddress = get_fake_fixed_address() ipaddress.allocated_at = datetime.datetime.utcnow() billing.notify(self.context, billing.IP_ADD, ipaddress) self.assertFalse(notifier.called) cfg.CONF.clear_override('environment_capabilities', 'QUARK')
def _delete_flip(context, id, address_type): filters = {'address_type': address_type, '_deallocated': False} flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not flip: raise q_exc.FloatingIpNotFound(id=id) current_ports = flip.ports if address_type == ip_types.FLOATING: if current_ports: current_ports = [flip.ports[0]] elif address_type == ip_types.SCALING: current_ports = flip.ports context.session.begin() try: strategy_name = flip.network.get('ipam_strategy') ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name) ipam_driver.deallocate_ip_address(context, flip) if current_ports: db_api.port_disassociate_ip(context, current_ports, flip) if flip.fixed_ips: db_api.floating_ip_disassociate_all_fixed_ips(context, flip) context.session.commit() except Exception: context.session.rollback() raise try: driver = registry.DRIVER_REGISTRY.get_driver() driver.remove_floating_ip(flip) except Exception as e: LOG.error('There was an error when trying to delete the floating ip ' 'on the unicorn API. The ip has been cleaned up, but ' 'may need to be handled manually in the unicorn API. ' 'Error: %s' % e.message) # alexm: Notify from this method because we don't have the flip object # in the callers billing.notify(context, billing.IP_DISASSOC, flip)
def _update_flip(context, flip_id, ip_type, requested_ports): """Update a flip based IPAddress :param context: neutron api request context. :param flip_id: id of the flip or scip :param ip_type: ip_types.FLOATING | ip_types.SCALING :param requested_ports: dictionary of the structure: {"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"} :return: quark.models.IPAddress """ # This list will hold flips that require notifications. # Using sets to avoid dups, if any. notifications = {billing.IP_ASSOC: set(), billing.IP_DISASSOC: set()} context.session.begin() try: flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE) if not flip: if ip_type == ip_types.SCALING: raise q_exc.ScalingIpNotFound(id=flip_id) raise q_exc.FloatingIpNotFound(id=flip_id) current_ports = flip.ports # Determine what ports are being removed, being added, and remain req_port_ids = [ request_port.get('port_id') for request_port in requested_ports ] curr_port_ids = [curr_port.id for curr_port in current_ports] added_port_ids = [ port_id for port_id in req_port_ids if port_id and port_id not in curr_port_ids ] removed_port_ids = [ port_id for port_id in curr_port_ids if port_id not in req_port_ids ] remaining_port_ids = set(curr_port_ids) - set(removed_port_ids) # Validations just for floating ip types if (ip_type == ip_types.FLOATING and curr_port_ids and curr_port_ids == req_port_ids): d = dict(flip_id=flip_id, port_id=curr_port_ids[0]) raise q_exc.PortAlreadyAssociatedToFloatingIp(**d) if (ip_type == ip_types.FLOATING and not curr_port_ids and not req_port_ids): raise q_exc.FloatingIpUpdateNoPortIdSupplied() # Validate that GW IP is not in use on the NW. flip_subnet = v._make_subnet_dict(flip.subnet) for added_port_id in added_port_ids: port = _get_port(context, added_port_id) nw = port.network nw_ports = v._make_ports_list(nw.ports) fixed_ips = [ ip.get('ip_address') for p in nw_ports for ip in p.get('fixed_ips') ] gw_ip = flip_subnet.get('gateway_ip') if gw_ip in fixed_ips: port_with_gateway_ip = None for port in nw_ports: for ip in port.get('fixed_ips'): if gw_ip in ip.get('ip_address'): port_with_gateway_ip = port break port_id = port_with_gateway_ip.get('id') network_id = port_with_gateway_ip.get('network_id') raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id, network_id=network_id) port_fixed_ips = {} # Keep the ports and fixed ips that have not changed for port_id in remaining_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} # Disassociate the ports and fixed ips from the flip that were # associated to the flip but are not anymore for port_id in removed_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) flip = db_api.port_disassociate_ip(context, [port], flip) notifications[billing.IP_DISASSOC].add(flip) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) if fixed_ip: flip = db_api.floating_ip_disassociate_fixed_ip( context, flip, fixed_ip) # Validate the new ports with the flip and associate the new ports # and fixed ips with the flip for port_id in added_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.FLOATING)): raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.SCALING)): raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id) fixed_ip = _get_next_available_fixed_ip(port) LOG.info('new fixed ip: %s' % fixed_ip) if not fixed_ip: raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} flip = db_api.port_associate_ip(context, [port], flip, [port_id]) notifications[billing.IP_ASSOC].add(flip) flip = db_api.floating_ip_associate_fixed_ip( context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() # If there are not any remaining ports and no new ones are being added, # remove the floating ip from unicorn if not remaining_port_ids and not added_port_ids: flip_driver.remove_floating_ip(flip) # If new ports are being added but there previously was not any ports, # then register a new floating ip with the driver because it is # assumed it does not exist elif added_port_ids and not curr_port_ids: flip_driver.register_floating_ip(flip, port_fixed_ips) else: flip_driver.update_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # Send notifications for possible associate/disassociate events for notif_type, flip_set in notifications.iteritems(): for flip in flip_set: billing.notify(context, notif_type, flip) # NOTE(blogan): ORM does not seem to update the model to the real state # of the database, so I'm doing an explicit refresh for now. context.session.refresh(flip) return flip
def _update_flip(context, flip_id, ip_type, requested_ports): """Update a flip based IPAddress :param context: neutron api request context. :param flip_id: id of the flip or scip :param ip_type: ip_types.FLOATING | ip_types.SCALING :param requested_ports: dictionary of the structure: {"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"} :return: quark.models.IPAddress """ # This list will hold flips that require notifications. # Using sets to avoid dups, if any. notifications = { billing.IP_ASSOC: set(), billing.IP_DISASSOC: set() } context.session.begin() try: flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE) if not flip: if ip_type == ip_types.SCALING: raise q_exc.ScalingIpNotFound(id=flip_id) raise q_exc.FloatingIpNotFound(id=flip_id) current_ports = flip.ports # Determine what ports are being removed, being added, and remain req_port_ids = [request_port.get('port_id') for request_port in requested_ports] curr_port_ids = [curr_port.id for curr_port in current_ports] added_port_ids = [port_id for port_id in req_port_ids if port_id and port_id not in curr_port_ids] removed_port_ids = [port_id for port_id in curr_port_ids if port_id not in req_port_ids] remaining_port_ids = set(curr_port_ids) - set(removed_port_ids) # Validations just for floating ip types if (ip_type == ip_types.FLOATING and curr_port_ids and curr_port_ids == req_port_ids): d = dict(flip_id=flip_id, port_id=curr_port_ids[0]) raise q_exc.PortAlreadyAssociatedToFloatingIp(**d) if (ip_type == ip_types.FLOATING and not curr_port_ids and not req_port_ids): raise q_exc.FloatingIpUpdateNoPortIdSupplied() # Validate that GW IP is not in use on the NW. flip_subnet = v._make_subnet_dict(flip.subnet) for added_port_id in added_port_ids: port = _get_port(context, added_port_id) nw = port.network nw_ports = v._make_ports_list(nw.ports) fixed_ips = [ip.get('ip_address') for p in nw_ports for ip in p.get('fixed_ips')] gw_ip = flip_subnet.get('gateway_ip') if gw_ip in fixed_ips: port_with_gateway_ip = None for port in nw_ports: for ip in port.get('fixed_ips'): if gw_ip in ip.get('ip_address'): port_with_gateway_ip = port break port_id = port_with_gateway_ip.get('id') network_id = port_with_gateway_ip.get('network_id') raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id, network_id=network_id) port_fixed_ips = {} # Keep the ports and fixed ips that have not changed for port_id in remaining_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} # Disassociate the ports and fixed ips from the flip that were # associated to the flip but are not anymore for port_id in removed_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) flip = db_api.port_disassociate_ip(context, [port], flip) notifications[billing.IP_DISASSOC].add(flip) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) if fixed_ip: flip = db_api.floating_ip_disassociate_fixed_ip( context, flip, fixed_ip) # Validate the new ports with the flip and associate the new ports # and fixed ips with the flip for port_id in added_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.FLOATING)): raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.SCALING)): raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id) fixed_ip = _get_next_available_fixed_ip(port) LOG.info('new fixed ip: %s' % fixed_ip) if not fixed_ip: raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} flip = db_api.port_associate_ip(context, [port], flip, [port_id]) notifications[billing.IP_ASSOC].add(flip) flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() # If there are not any remaining ports and no new ones are being added, # remove the floating ip from unicorn if not remaining_port_ids and not added_port_ids: flip_driver.remove_floating_ip(flip) # If new ports are being added but there previously was not any ports, # then register a new floating ip with the driver because it is # assumed it does not exist elif added_port_ids and not curr_port_ids: flip_driver.register_floating_ip(flip, port_fixed_ips) else: flip_driver.update_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # Send notifications for possible associate/disassociate events for notif_type, flip_set in notifications.iteritems(): for flip in flip_set: billing.notify(context, notif_type, flip) # NOTE(blogan): ORM does not seem to update the model to the real state # of the database, so I'm doing an explicit refresh for now. context.session.refresh(flip) return flip
def f_undo(*args, **kwargs): billing.notify(self.context, billing.IP_ADD, ipaddress, *args, **kwargs)
def deallocate_ips_by_port(self, context, port=None, **kwargs): ips_to_remove = [] for addr in port["ip_addresses"]: if "ip_address" in kwargs: ip = kwargs["ip_address"] if ip != netaddr.IPAddress(int(addr["address"])): continue # Note: only deallocate ip if this is the # only port mapped ips_to_remove.append(addr) port["ip_addresses"] = list( set(port["ip_addresses"]) - set(ips_to_remove)) # NCP-1541: We don't need to track v6 IPs the same way. Also, we can't # delete them until we've removed the FK on the assoc record first, so # we have to flush the current state of the transaction. # NOTE(mdietz): this does increase traffic to the db because we need # to flush, fetch the records again and potentially make # another trip to deallocate each IP, but keeping our # indices smaller probably provides more value than the # cost # NOTE(aquillin): For floating IPs associated with the port, we do not # want to deallocate the IP or disassociate the IP from # the tenant, instead we will disassociate floating's # fixed IP address. context.session.flush() deallocated_ips = [] flip = None for ip in ips_to_remove: if ip["address_type"] in (ip_types.FLOATING, ip_types.SCALING): flip = ip else: if len(ip["ports"]) == 0: self.deallocate_ip_address(context, ip) deallocated_ips.append(ip.id) if flip: if flip.fixed_ips and len(flip.fixed_ips) == 1: # This is a FLIP or SCIP that is only associated with one # port and fixed_ip, so we can safely just disassociate all # and remove the flip from unicorn. db_api.floating_ip_disassociate_all_fixed_ips(context, flip) # NOTE(blogan): I'm not too happy about having do another # flush but some test runs showed inconsistent state based on # SQLAlchemy caching. context.session.add(flip) context.session.flush() billing.notify(context, billing.IP_DISASSOC, flip, **kwargs) driver = registry.DRIVER_REGISTRY.get_driver() driver.remove_floating_ip(flip) elif len(flip.fixed_ips) > 1: # This is a SCIP and we need to diassociate the one fixed_ip # from the SCIP and update unicorn with the remaining # ports and fixed_ips remaining_fixed_ips = [] for fix_ip in flip.fixed_ips: if fix_ip.id in deallocated_ips: db_api.floating_ip_disassociate_fixed_ip( context, flip, fix_ip) context.session.add(flip) context.session.flush() billing.notify(context, billing.IP_DISASSOC, flip, **kwargs) else: remaining_fixed_ips.append(fix_ip) port_fixed_ips = {} for fix_ip in remaining_fixed_ips: # NOTE(blogan): Since this is the flip's fixed_ips it # should be safe to assume there is only one port # associated with it. remaining_port = fix_ip.ports[0] port_fixed_ips[remaining_port.id] = { 'port': remaining_port, 'fixed_ip': fix_ip } driver = registry.DRIVER_REGISTRY.get_driver() driver.update_floating_ip(flip, port_fixed_ips)
def _allocate_from_v6_subnet(self, context, net_id, subnet, port_id, reuse_after, ip_address=None, **kwargs): """This attempts to allocate v6 addresses as per RFC2462 and RFC3041. To accomodate this, we effectively treat all v6 assignment as a first time allocation utilizing the MAC address of the VIF. Because we recycle MACs, we will eventually attempt to recreate a previously generated v6 address. Instead of failing, we've opted to handle reallocating that address in this method. This should provide a performance boost over attempting to check each and every subnet in the existing reallocate logic, as we'd have to iterate over each and every subnet returned """ LOG.info("Attempting to allocate a v6 address - [{0}]".format( utils.pretty_kwargs(network_id=net_id, subnet=subnet, port_id=port_id, ip_address=ip_address))) if ip_address: LOG.info("IP %s explicitly requested, deferring to standard " "allocation" % ip_address) return self._allocate_from_subnet(context, net_id=net_id, subnet=subnet, port_id=port_id, reuse_after=reuse_after, ip_address=ip_address, **kwargs) else: mac = kwargs.get("mac_address") if mac: mac = kwargs["mac_address"].get("address") if subnet and subnet["ip_policy"]: ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) for tries, ip_address in enumerate( generate_v6(mac, port_id, subnet["cidr"])): LOG.info("Attempt {0} of {1}".format( tries + 1, CONF.QUARK.v6_allocation_attempts)) if tries > CONF.QUARK.v6_allocation_attempts - 1: LOG.info("Exceeded v6 allocation attempts, bailing") raise ip_address_failure(net_id) ip_address = netaddr.IPAddress(ip_address).ipv6() LOG.info("Generated a new v6 address {0}".format( str(ip_address))) if (ip_policy_cidrs is not None and ip_address in ip_policy_cidrs): LOG.info("Address {0} excluded by policy".format( str(ip_address))) continue try: with context.session.begin(): address = db_api.ip_address_create( context, address=ip_address, subnet_id=subnet["id"], version=subnet["ip_version"], network_id=net_id, address_type=kwargs.get('address_type', ip_types.FIXED)) # alexm: need to notify from here because this code # does not go through the _allocate_from_subnet() path. notify(context, 'ip.add', address) return address except db_exception.DBDuplicateEntry: # This shouldn't ever happen, since we hold a unique MAC # address from the previous IPAM step. LOG.info("{0} exists but was already " "allocated".format(str(ip_address))) LOG.debug( "Duplicate entry found when inserting subnet_id" " %s ip_address %s", subnet["id"], ip_address)
def allocate_ip_address(self, context, new_addresses, net_id, port_id, reuse_after, segment_id=None, version=None, ip_addresses=None, subnets=None, **kwargs): elevated = context.elevated() subnets = subnets or [] ip_addresses = ip_addresses or [] ipam_log = kwargs.get('ipam_log', None) LOG.info("Starting a new IP address(es) allocation. Strategy " "is {0} - [{1}]".format( self.get_name(), utils.pretty_kwargs(network_id=net_id, port_id=port_id, new_addresses=new_addresses, ip_addresses=ip_addresses, subnets=subnets, segment_id=segment_id, version=version))) def _try_reallocate_ip_address(ipam_log, ip_addr=None): new_addresses.extend(self.attempt_to_reallocate_ip( context, net_id, port_id, reuse_after, version=version, ip_address=ip_addr, segment_id=segment_id, subnets=subnets, **kwargs)) def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None): for retry in xrange(CONF.QUARK.ip_address_retry_max): attempt = None if ipam_log: attempt = ipam_log.make_entry("_try_allocate_ip_address") LOG.info("Allocating new IP attempt {0} of {1}".format( retry + 1, CONF.QUARK.ip_address_retry_max)) if not sub: subnets = self._choose_available_subnet( elevated, net_id, version, segment_id=segment_id, ip_address=ip_addr, reallocated_ips=new_addresses) else: subnets = [self.select_subnet(context, net_id, ip_addr, segment_id, subnet_ids=[sub])] LOG.info("Subnet selection returned {0} viable subnet(s) - " "IDs: {1}".format(len(subnets), ", ".join([str(s["id"]) for s in subnets if s]))) try: self._allocate_ips_from_subnets(context, new_addresses, net_id, subnets, port_id, reuse_after, ip_addr, **kwargs) except q_exc.IPAddressRetryableFailure: LOG.exception("Error in allocating IP") if attempt: LOG.debug("ATTEMPT FAILED") attempt.failed() remaining = CONF.QUARK.ip_address_retry_max - retry - 1 if remaining > 0: LOG.info("{0} retries remain, retrying...".format( remaining)) else: LOG.info("No retries remaing, bailing") continue finally: if attempt: attempt.end() break ip_addresses = [netaddr.IPAddress(ip_address) for ip_address in ip_addresses] if ip_addresses: for ip_address in ip_addresses: _try_reallocate_ip_address(ipam_log, ip_address) else: _try_reallocate_ip_address(ipam_log) if self.is_strategy_satisfied(new_addresses): return else: LOG.info("Reallocated addresses {0} but still need more addresses " "to satisfy strategy {1}. Falling back to creating " "IPs".format(new_addresses, self.get_name())) if ip_addresses or subnets: for ip_address, subnet in itertools.izip_longest(ip_addresses, subnets): _try_allocate_ip_address(ipam_log, ip_address, subnet) else: _try_allocate_ip_address(ipam_log) if self.is_strategy_satisfied(new_addresses, allocate_complete=True): # Only notify when all went well for address in new_addresses: billing.notify(context, billing.IP_ADD, address, **kwargs) LOG.info("IPAM for port ID {0} completed with addresses " "{1}".format(port_id, [a["address_readable"] for a in new_addresses])) return ipam_log.failed() raise ip_address_failure(net_id)
def allocate_ip_address(self, context, new_addresses, net_id, port_id, reuse_after, segment_id=None, version=None, ip_addresses=None, subnets=None, **kwargs): elevated = context.elevated() subnets = subnets or [] ip_addresses = ip_addresses or [] ipam_log = kwargs.get('ipam_log', None) LOG.info("Starting a new IP address(es) allocation. Strategy " "is {0} - [{1}]".format( self.get_name(), utils.pretty_kwargs(network_id=net_id, port_id=port_id, new_addresses=new_addresses, ip_addresses=ip_addresses, subnets=subnets, segment_id=segment_id, version=version))) def _try_reallocate_ip_address(ipam_log, ip_addr=None): new_addresses.extend( self.attempt_to_reallocate_ip(context, net_id, port_id, reuse_after, version=version, ip_address=ip_addr, segment_id=segment_id, subnets=subnets, **kwargs)) def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None): for retry in xrange(CONF.QUARK.ip_address_retry_max): attempt = None if ipam_log: attempt = ipam_log.make_entry("_try_allocate_ip_address") LOG.info("Allocating new IP attempt {0} of {1}".format( retry + 1, CONF.QUARK.ip_address_retry_max)) if not sub: subnets = self._choose_available_subnet( elevated, net_id, version, segment_id=segment_id, ip_address=ip_addr, reallocated_ips=new_addresses) else: subnets = [ self.select_subnet(context, net_id, ip_addr, segment_id, subnet_ids=[sub]) ] LOG.info("Subnet selection returned {0} viable subnet(s) - " "IDs: {1}".format( len(subnets), ", ".join([str(s["id"]) for s in subnets if s]))) try: self._allocate_ips_from_subnets(context, new_addresses, net_id, subnets, port_id, reuse_after, ip_addr, **kwargs) except q_exc.IPAddressRetryableFailure: LOG.exception("Error in allocating IP") if attempt: LOG.debug("ATTEMPT FAILED") attempt.failed() remaining = CONF.QUARK.ip_address_retry_max - retry - 1 if remaining > 0: LOG.info("{0} retries remain, retrying...".format( remaining)) else: LOG.info("No retries remaing, bailing") continue finally: if attempt: attempt.end() break ip_addresses = [ netaddr.IPAddress(ip_address) for ip_address in ip_addresses ] if ip_addresses: for ip_address in ip_addresses: _try_reallocate_ip_address(ipam_log, ip_address) else: _try_reallocate_ip_address(ipam_log) if self.is_strategy_satisfied(new_addresses): return else: LOG.info("Reallocated addresses {0} but still need more addresses " "to satisfy strategy {1}. Falling back to creating " "IPs".format(new_addresses, self.get_name())) if ip_addresses or subnets: for ip_address, subnet in itertools.izip_longest( ip_addresses, subnets): _try_allocate_ip_address(ipam_log, ip_address, subnet) else: _try_allocate_ip_address(ipam_log) if self.is_strategy_satisfied(new_addresses, allocate_complete=True): # Only notify when all went well for address in new_addresses: notify(context, 'ip.add', address) LOG.info( "IPAM for port ID {0} completed with addresses " "{1}".format(port_id, [a["address_readable"] for a in new_addresses])) return ipam_log.failed() raise ip_address_failure(net_id)
def _allocate_from_v6_subnet(self, context, net_id, subnet, port_id, reuse_after, ip_address=None, **kwargs): """This attempts to allocate v6 addresses as per RFC2462 and RFC3041. To accomodate this, we effectively treat all v6 assignment as a first time allocation utilizing the MAC address of the VIF. Because we recycle MACs, we will eventually attempt to recreate a previously generated v6 address. Instead of failing, we've opted to handle reallocating that address in this method. This should provide a performance boost over attempting to check each and every subnet in the existing reallocate logic, as we'd have to iterate over each and every subnet returned """ LOG.info("Attempting to allocate a v6 address - [{0}]".format( utils.pretty_kwargs(network_id=net_id, subnet=subnet, port_id=port_id, ip_address=ip_address))) if ip_address: LOG.info("IP %s explicitly requested, deferring to standard " "allocation" % ip_address) return self._allocate_from_subnet(context, net_id=net_id, subnet=subnet, port_id=port_id, reuse_after=reuse_after, ip_address=ip_address, **kwargs) else: mac = kwargs.get("mac_address") if mac: mac = kwargs["mac_address"].get("address") if subnet and subnet["ip_policy"]: ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) for tries, ip_address in enumerate( generate_v6(mac, port_id, subnet["cidr"])): LOG.info("Attempt {0} of {1}".format( tries + 1, CONF.QUARK.v6_allocation_attempts)) if tries > CONF.QUARK.v6_allocation_attempts - 1: LOG.info("Exceeded v6 allocation attempts, bailing") raise ip_address_failure(net_id) ip_address = netaddr.IPAddress(ip_address).ipv6() LOG.info("Generated a new v6 address {0}".format( str(ip_address))) if (ip_policy_cidrs is not None and ip_address in ip_policy_cidrs): LOG.info("Address {0} excluded by policy".format( str(ip_address))) continue try: with context.session.begin(): address = db_api.ip_address_create( context, address=ip_address, subnet_id=subnet["id"], version=subnet["ip_version"], network_id=net_id, address_type=kwargs.get('address_type', ip_types.FIXED)) # alexm: need to notify from here because this code # does not go through the _allocate_from_subnet() path. notify(context, 'ip.add', address) return address except db_exception.DBDuplicateEntry: # This shouldn't ever happen, since we hold a unique MAC # address from the previous IPAM step. LOG.info("{0} exists but was already " "allocated".format(str(ip_address))) LOG.debug("Duplicate entry found when inserting subnet_id" " %s ip_address %s", subnet["id"], ip_address)
def deallocate_ips_by_port(self, context, port=None, **kwargs): ips_to_remove = [] for addr in port["ip_addresses"]: if "ip_address" in kwargs: ip = kwargs["ip_address"] if ip != netaddr.IPAddress(int(addr["address"])): continue # Note: only deallocate ip if this is the # only port mapped ips_to_remove.append(addr) port["ip_addresses"] = list( set(port["ip_addresses"]) - set(ips_to_remove)) # NCP-1541: We don't need to track v6 IPs the same way. Also, we can't # delete them until we've removed the FK on the assoc record first, so # we have to flush the current state of the transaction. # NOTE(mdietz): this does increase traffic to the db because we need # to flush, fetch the records again and potentially make # another trip to deallocate each IP, but keeping our # indices smaller probably provides more value than the # cost # NOTE(aquillin): For floating IPs associated with the port, we do not # want to deallocate the IP or disassociate the IP from # the tenant, instead we will disassociate floating's # fixed IP address. context.session.flush() deallocated_ips = [] flip = None for ip in ips_to_remove: if ip["address_type"] in (ip_types.FLOATING, ip_types.SCALING): flip = ip else: if len(ip["ports"]) == 0: self.deallocate_ip_address(context, ip) deallocated_ips.append(ip.id) if flip: if flip.fixed_ips and len(flip.fixed_ips) == 1: # This is a FLIP or SCIP that is only associated with one # port and fixed_ip, so we can safely just disassociate all # and remove the flip from unicorn. db_api.floating_ip_disassociate_all_fixed_ips(context, flip) # NOTE(blogan): I'm not too happy about having do another # flush but some test runs showed inconsistent state based on # SQLAlchemy caching. context.session.add(flip) context.session.flush() notify(context, 'ip.disassociate', flip) driver = registry.DRIVER_REGISTRY.get_driver() driver.remove_floating_ip(flip) elif len(flip.fixed_ips) > 1: # This is a SCIP and we need to diassociate the one fixed_ip # from the SCIP and update unicorn with the remaining # ports and fixed_ips remaining_fixed_ips = [] for fix_ip in flip.fixed_ips: if fix_ip.id in deallocated_ips: db_api.floating_ip_disassociate_fixed_ip( context, flip, fix_ip) context.session.add(flip) context.session.flush() notify(context, 'ip.disassociate', flip) else: remaining_fixed_ips.append(fix_ip) port_fixed_ips = {} for fix_ip in remaining_fixed_ips: # NOTE(blogan): Since this is the flip's fixed_ips it # should be safe to assume there is only one port # associated with it. remaining_port = fix_ip.ports[0] port_fixed_ips[remaining_port.id] = { 'port': remaining_port, 'fixed_ip': fix_ip } driver = registry.DRIVER_REGISTRY.get_driver() driver.update_floating_ip(flip, port_fixed_ips)