Example #1
0
def _translate_ipv6_addr(ipv6_addrs):
    return [
        addr for addr in ipv6_addrs
        if not netaddr.IPAddress(addr.split('/')[0]).is_link_local()]
Example #2
0
    def get_machines(self, machine_id=None, hostname=None, ip=None, any=None,
                     substring=False):
        """
        Return matching machines.

        :param machine_id: can be matched as substring
        :param hostname: can be matched as substring
        :param ip: can not be matched as substring
        :param substring: Whether the filtering should be a substring matching
        :type substring: bool
        :param any: a substring that matches EITHER hostname, machineid or ip
        :type any: basestring
        :return: list of Machine Objects
        """
        machines = []

        f = open(self.filename, "r")
        try:
            for line in f:
                split_line = line.split()
                if len(split_line) < 2:
                    # skip lines with less than 2 columns
                    continue
                if split_line[0][0] == "#":
                    # skip comments
                    continue
                line_id = split_line[0]
                line_ip = netaddr.IPAddress(split_line[0])
                line_hostname = split_line[1:]
                # check if machine_id, ip or hostname matches a substring
                if (any and any not in line_id and
                        len([x for x in line_hostname if any in x]) <= 0 and
                        any not in "{0!s}".format(line_ip)):
                    # "any" was provided but did not match either
                    # hostname, ip or machine_id
                    continue

                else:
                    if machine_id:
                        if not substring and machine_id == line_id:
                            return [Machine(self.name, line_id,
                                            hostname=line_hostname, ip=line_ip)]
                        if substring and machine_id not in line_id:
                            # do not append this machine!
                            continue
                    if hostname:
                        if substring:
                            h_match = len([x for x in line_hostname if hostname in x])
                        else:
                            h_match = hostname in line_hostname
                        if not h_match:
                            # do not append this machine!
                            continue

                    if ip and ip != line_ip:
                        # Do not append this machine!
                        continue

                machines.append(Machine(self.name, line_id,
                                        hostname=line_hostname,
                                        ip=line_ip))
        finally:
            f.close()
        return machines
Example #3
0
 def is_in_subnet(self, subnet):
     if self['address'] and subnet['cidr']:
         return (netaddr.IPAddress(self['address'])
                 in netaddr.IPNetwork(subnet['cidr']))
     else:
         return False
Example #4
0
def validate_local_address(ip_address):
    if not valid_ip_address(ip_address):
        raise ConfigValueError(desc='Invalid local ip_address: %s' %
                               ip_address)
    return str(netaddr.IPAddress(ip_address))
Example #5
0
def in_subnet(ip_str, subnet):
    for net in subnet:
        if netaddr.IPAddress(ip_str) in netaddr.IPNetwork(net):
            return True
    return False
Example #6
0
 def add_ipv6_prefix(self, prefix):
     # having more than one prefix is not supported
     self.ipv6_prefix = netaddr.IPAddress(prefix)
Example #7
0
    def deallocate_ips_by_port(self, context, port=None, **kwargs):
        ips_to_remove = []
        for addr in port["ip_addresses"]:
            if "ip_address" in kwargs:
                ip = kwargs["ip_address"]
                if ip != netaddr.IPAddress(int(addr["address"])):
                    continue

            # Note: only deallocate ip if this is the
            # only port mapped
            ips_to_remove.append(addr)

        port["ip_addresses"] = list(
            set(port["ip_addresses"]) - set(ips_to_remove))

        # NCP-1541: We don't need to track v6 IPs the same way. Also, we can't
        # delete them until we've removed the FK on the assoc record first, so
        # we have to flush the current state of the transaction.
        # NOTE(mdietz): this does increase traffic to the db because we need
        #               to flush, fetch the records again and potentially make
        #               another trip to deallocate each IP, but keeping our
        #               indices smaller probably provides more value than the
        #               cost
        # NOTE(aquillin): For floating IPs associated with the port, we do not
        #                 want to deallocate the IP or disassociate the IP from
        #                 the tenant, instead we will disassociate floating's
        #                 fixed IP address.
        context.session.flush()
        deallocated_ips = []
        flip = None
        for ip in ips_to_remove:
            if ip["address_type"] in (ip_types.FLOATING, ip_types.SCALING):
                flip = ip
            else:
                if len(ip["ports"]) == 0:
                    self.deallocate_ip_address(context, ip)
                    deallocated_ips.append(ip.id)
        if flip:
            if flip.fixed_ips and len(flip.fixed_ips) == 1:
                # This is a FLIP or SCIP that is only associated with one
                # port and fixed_ip, so we can safely just disassociate all
                # and remove the flip from unicorn.
                db_api.floating_ip_disassociate_all_fixed_ips(context, flip)
                # NOTE(blogan): I'm not too happy about having do another
                # flush but some test runs showed inconsistent state based on
                # SQLAlchemy caching.
                context.session.add(flip)
                context.session.flush()
                billing.notify(context, billing.IP_DISASSOC, flip, **kwargs)
                driver = registry.DRIVER_REGISTRY.get_driver()
                driver.remove_floating_ip(flip)
            elif len(flip.fixed_ips) > 1:
                # This is a SCIP and we need to diassociate the one fixed_ip
                # from the SCIP and update unicorn with the remaining
                # ports and fixed_ips
                remaining_fixed_ips = []
                for fix_ip in flip.fixed_ips:
                    if fix_ip.id in deallocated_ips:
                        db_api.floating_ip_disassociate_fixed_ip(
                            context, flip, fix_ip)
                        context.session.add(flip)
                        context.session.flush()
                        billing.notify(context, billing.IP_DISASSOC, flip,
                                       **kwargs)
                    else:
                        remaining_fixed_ips.append(fix_ip)
                port_fixed_ips = {}
                for fix_ip in remaining_fixed_ips:
                    # NOTE(blogan): Since this is the flip's fixed_ips it
                    # should be safe to assume there is only one port
                    # associated with it.
                    remaining_port = fix_ip.ports[0]
                    port_fixed_ips[remaining_port.id] = {
                        'port': remaining_port,
                        'fixed_ip': fix_ip
                    }
                driver = registry.DRIVER_REGISTRY.get_driver()
                driver.update_floating_ip(flip, port_fixed_ips)
Example #8
0
 def constraint(obj, attr, value):
     try:
         return netaddr.IPAddress(value).__str__()
     except netaddr.AddrFormatError as e:
         raise ValueError(six.text_type(e))
Example #9
0
    def _allocate_from_v6_subnet(self, context, net_id, subnet,
                                 port_id, reuse_after, ip_address=None,
                                 **kwargs):
        """This attempts to allocate v6 addresses as per RFC2462 and RFC3041.

        To accomodate this, we effectively treat all v6 assignment as a
        first time allocation utilizing the MAC address of the VIF. Because
        we recycle MACs, we will eventually attempt to recreate a previously
        generated v6 address. Instead of failing, we've opted to handle
        reallocating that address in this method.

        This should provide a performance boost over attempting to check
        each and every subnet in the existing reallocate logic, as we'd
        have to iterate over each and every subnet returned
        """

        LOG.info("Attempting to allocate a v6 address - [{0}]".format(
            utils.pretty_kwargs(network_id=net_id, subnet=subnet,
                                port_id=port_id, ip_address=ip_address)))

        if ip_address:
            LOG.info("IP %s explicitly requested, deferring to standard "
                     "allocation" % ip_address)
            return self._allocate_from_subnet(context, net_id=net_id,
                                              subnet=subnet, port_id=port_id,
                                              reuse_after=reuse_after,
                                              ip_address=ip_address, **kwargs)
        else:
            mac = kwargs.get("mac_address")
            if mac:
                mac = kwargs["mac_address"].get("address")

            if subnet and subnet["ip_policy"]:
                ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set()
            else:
                ip_policy_cidrs = netaddr.IPSet([])

            for tries, ip_address in enumerate(
                    generate_v6(mac, port_id, subnet["cidr"])):

                LOG.info("Attempt {0} of {1}".format(
                    tries + 1, CONF.QUARK.v6_allocation_attempts))

                if tries > CONF.QUARK.v6_allocation_attempts - 1:
                    LOG.info("Exceeded v6 allocation attempts, bailing")
                    raise ip_address_failure(net_id)

                ip_address = netaddr.IPAddress(ip_address).ipv6()
                LOG.info("Generated a new v6 address {0}".format(
                    str(ip_address)))

                if (ip_policy_cidrs is not None and
                        ip_address in ip_policy_cidrs):
                    LOG.info("Address {0} excluded by policy".format(
                        str(ip_address)))
                    continue

                try:
                    with context.session.begin():
                        address = db_api.ip_address_create(
                            context, address=ip_address,
                            subnet_id=subnet["id"],
                            version=subnet["ip_version"], network_id=net_id,
                            address_type=kwargs.get('address_type',
                                                    ip_types.FIXED))
                        return address
                except db_exception.DBDuplicateEntry:
                    # This shouldn't ever happen, since we hold a unique MAC
                    # address from the previous IPAM step.
                    LOG.info("{0} exists but was already "
                             "allocated".format(str(ip_address)))
                    LOG.debug("Duplicate entry found when inserting subnet_id"
                              " %s ip_address %s", subnet["id"], ip_address)
Example #10
0
    def allocate_ip_address(self, context, new_addresses, net_id, port_id,
                            reuse_after, segment_id=None, version=None,
                            ip_addresses=None, subnets=None, **kwargs):
        elevated = context.elevated()
        subnets = subnets or []
        ip_addresses = ip_addresses or []

        ipam_log = kwargs.get('ipam_log', None)
        LOG.info("Starting a new IP address(es) allocation. Strategy "
                 "is {0} - [{1}]".format(
                     self.get_name(),
                     utils.pretty_kwargs(network_id=net_id, port_id=port_id,
                                         new_addresses=new_addresses,
                                         ip_addresses=ip_addresses,
                                         subnets=subnets,
                                         segment_id=segment_id,
                                         version=version)))

        def _try_reallocate_ip_address(ipam_log, ip_addr=None):
            new_addresses.extend(self.attempt_to_reallocate_ip(
                context, net_id, port_id, reuse_after, version=version,
                ip_address=ip_addr, segment_id=segment_id, subnets=subnets,
                **kwargs))

        def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None):
            for retry in xrange(CONF.QUARK.ip_address_retry_max):
                attempt = None
                if ipam_log:
                    attempt = ipam_log.make_entry("_try_allocate_ip_address")
                LOG.info("Allocating new IP attempt {0} of {1}".format(
                    retry + 1, CONF.QUARK.ip_address_retry_max))
                if not sub:
                    subnets = self._choose_available_subnet(
                        elevated, net_id, version, segment_id=segment_id,
                        ip_address=ip_addr, reallocated_ips=new_addresses)
                else:
                    subnets = [self.select_subnet(context, net_id,
                                                  ip_addr, segment_id,
                                                  subnet_ids=[sub])]
                LOG.info("Subnet selection returned {0} viable subnet(s) - "
                         "IDs: {1}".format(len(subnets),
                                           ", ".join([str(s["id"])
                                                      for s in subnets if s])))

                try:
                    self._allocate_ips_from_subnets(context, new_addresses,
                                                    net_id, subnets,
                                                    port_id, reuse_after,
                                                    ip_addr, **kwargs)
                except q_exc.IPAddressRetryableFailure:
                    LOG.exception("Error in allocating IP")
                    if attempt:
                        LOG.debug("ATTEMPT FAILED")
                        attempt.failed()
                    remaining = CONF.QUARK.ip_address_retry_max - retry - 1
                    if remaining > 0:
                        LOG.info("{0} retries remain, retrying...".format(
                            remaining))
                    else:
                        LOG.info("No retries remaing, bailing")
                    continue
                finally:
                    if attempt:
                        attempt.end()

                break

        ip_addresses = [netaddr.IPAddress(ip_address)
                        for ip_address in ip_addresses]

        if ip_addresses:
            for ip_address in ip_addresses:
                _try_reallocate_ip_address(ipam_log, ip_address)
        else:
            _try_reallocate_ip_address(ipam_log)

        if self.is_strategy_satisfied(new_addresses):
            return
        else:
            LOG.info("Reallocated addresses {0} but still need more addresses "
                     "to satisfy strategy {1}. Falling back to creating "
                     "IPs".format(new_addresses, self.get_name()))

        if ip_addresses or subnets:
            for ip_address, subnet in itertools.izip_longest(ip_addresses,
                                                             subnets):
                _try_allocate_ip_address(ipam_log, ip_address, subnet)
        else:
            _try_allocate_ip_address(ipam_log)

        if self.is_strategy_satisfied(new_addresses, allocate_complete=True):
            # Only notify when all went well
            for address in new_addresses:
                billing.notify(context, billing.IP_ADD, address, **kwargs)
            LOG.info("IPAM for port ID {0} completed with addresses "
                     "{1}".format(port_id,
                                  [a["address_readable"]
                                   for a in new_addresses]))
            return
        ipam_log.failed()

        raise ip_address_failure(net_id)
Example #11
0
def cmdbprocess(scanrange, beintrusive, filename, project_id):
    localipstring = getlocalinfo(project_id)
    localip = str(localipstring).split("$")[0]
    subnet = str(localipstring).split("$")[1]
    interface = str(localipstring).split("$")[2]
    print "processing " + str(len(localip)) + "local interfaces " + interface
    cmdb_file_ips = []

    gateway = netifaces.gateways()['default'][netifaces.AF_INET][0]
    print "Gateway: " + gateway

    gatewaynewnode = makeanode(gateway, subnet, project_id, 1, "CMDB", "",
                               True)
    localnode = makeanode(localip, subnet, project_id, 0, "CMDB", "", True)
    gatewaynewnode.connected.connect(localnode)
    addaction(project_id, "CMDB", filename, gatewaynewnode)
    addaction(project_id, "CMDB", filename, localnode)

    for line in scanrange:
        try:
            ipaddress.ip_address(line)
            cmdb_file_ips.append(line)
            print "scanning IP " + line
            scanresult = networkscan(line)
            print "Scan results " + str(scanresult)
            for ip in scanresult:
                local_ip_range = localip + "/" + str(
                    netaddr.IPAddress(subnet).netmask_bits())
                if netaddr.IPAddress(ip) in netaddr.IPNetwork(local_ip_range):
                    print "in local range"
                    node = makeanode(ip, subnet, project_id, 2, "CMDB", "",
                                     True)
                    gatewaynewnode.connected.connect(node)
                    addaction(project_id, "CMDB", filename, node)
                else:
                    print "performing traceroute to " + ip
                    output = traceroute.delay(ip, 33434, 30, project_id, True)
                    findseed = project_id + "#SEED"
                    seednode = Machine.nodes.get(tag__startswith=findseed)
                    addaction(
                        project_id, "GOTO", ip + "@" + str(output) + "@" +
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
                        seednode)
        except Exception as ex:
            print str(ex)
            print "Not a valid IP. Ignoring " + line

    if str(beintrusive) == "on":
        local_ip_range = gateway + "/" + str(
            netaddr.IPAddress(subnet).netmask_bits())
        output = cmdbanalysis.delay(local_ip_range, cmdb_file_ips, project_id,
                                    filename)
        findseed = project_id + "#SEED"
        seednode = Machine.nodes.get(tag__startswith=findseed)
        addaction(
            project_id, "CMDB",
            str(local_ip_range) + "@" + str(output) + "@" +
            datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), seednode)
        output = traceroute.delay(
            "google.com", 33434, 30, project_id,
            True)  # perform traceroute to (multiple?)google.com
        seednode = Machine.nodes.get(tag__startswith=findseed)
        addaction(
            project_id, "GOTO", "google.com" + "@" + str(output) + "@" +
            datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), seednode)
    return {"Status": True}
Example #12
0
def is_eui64_address(ip_address):
    """Check if ip address is EUI64."""
    ip = netaddr.IPAddress(ip_address)
    # '0xfffe' addition is used to build EUI-64 from MAC (RFC4291)
    # Look for it in the middle of the EUI-64 part of address
    return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000)
Example #13
0
def validate_next_hop(ip_address):
    if not valid_ip_address(ip_address):
        raise ConfigValueError(desc='Invalid next_hop ip_address: %s' %
                               ip_address)
    return str(netaddr.IPAddress(ip_address))
Example #14
0
def get_first_host_ip(net, ip_version):
    return str(netaddr.IPAddress(net.first + 1, ip_version))
Example #15
0
    def tunnel_sync(self, rpc_context, **kwargs):
        """Update new tunnel.

        Updates the database with the tunnel IP. All listening agents will also
        be notified about the new tunnel IP.
        """
        tunnel_ip = kwargs.get('tunnel_ip')
        if not tunnel_ip:
            msg = _("Tunnel IP value needed by the ML2 plugin")
            raise exc.InvalidInput(error_message=msg)

        host = kwargs.get('host')
        version = netaddr.IPAddress(tunnel_ip).version
        if version != cfg.CONF.ml2.overlay_ip_version:
            msg = (_("Tunnel IP version does not match ML2 "
                     "overlay_ip_version: %(overlay)s, host: %(host)s, "
                     "tunnel_ip: %(ip)s"),
                   {'overlay': cfg.CONF.ml2.overlay_ip_version,
                    'host': host, 'ip': tunnel_ip})
            raise exc.InvalidInput(error_message=msg)

        tunnel_type = kwargs.get('tunnel_type')
        if not tunnel_type:
            msg = _("Network type value needed by the ML2 plugin")
            raise exc.InvalidInput(error_message=msg)

        driver = self._type_manager.drivers.get(tunnel_type)
        if driver:
            # The given conditional statements will verify the following
            # things:
            # 1. If host is not passed from an agent, it is a legacy mode.
            # 2. If passed host and tunnel_ip are not found in the DB,
            #    it is a new endpoint.
            # 3. If host is passed from an agent and it is not found in DB
            #    but the passed tunnel_ip is found, delete the endpoint
            #    from DB and add the endpoint with (tunnel_ip, host),
            #    it is an upgrade case.
            # 4. If passed host is found in DB and passed tunnel ip is not
            #    found, delete the endpoint belonging to that host and
            #    add endpoint with latest (tunnel_ip, host), it is a case
            #    where local_ip of an agent got changed.
            # 5. If the passed host had another ip in the DB the host-id has
            #    roamed to a different IP then delete any reference to the new
            #    local_ip or the host id. Don't notify tunnel_delete for the
            #    old IP since that one could have been taken by a different
            #    agent host-id (neutron-ovs-cleanup should be used to clean up
            #    the stale endpoints).
            #    Finally create a new endpoint for the (tunnel_ip, host).
            if host:
                host_endpoint = driver.obj.get_endpoint_by_host(host)
                ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip)

                if (ip_endpoint and ip_endpoint.host is None and
                        host_endpoint is None):
                    driver.obj.delete_endpoint(ip_endpoint.ip_address)
                elif (ip_endpoint and ip_endpoint.host != host):
                    LOG.info(
                        "Tunnel IP %(ip)s was used by host %(host)s and "
                        "will be assigned to %(new_host)s",
                        {'ip': ip_endpoint.ip_address,
                         'host': ip_endpoint.host,
                         'new_host': host})
                    driver.obj.delete_endpoint_by_host_or_ip(
                        host, ip_endpoint.ip_address)
                elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
                    # Notify all other listening agents to delete stale tunnels
                    self._notifier.tunnel_delete(
                        rpc_context, host_endpoint.ip_address, tunnel_type)
                    driver.obj.delete_endpoint(host_endpoint.ip_address)

            tunnel = driver.obj.add_endpoint(tunnel_ip, host)
            tunnels = driver.obj.get_endpoints()
            entry = {'tunnels': tunnels}
            # Notify all other listening agents
            self._notifier.tunnel_update(rpc_context, tunnel.ip_address,
                                         tunnel_type)
            # Return the list of tunnels IP's to the agent
            return entry
        else:
            msg = (_("Network type value %(type)s not supported, "
                    "host: %(host)s with tunnel IP: %(ip)s") %
                    {'type': tunnel_type,
                     'host': host or 'legacy mode (no host provided by agent)',
                     'ip': tunnel_ip})
            raise exc.InvalidInput(error_message=msg)
Example #16
0
    def _validate_allocation_pools(self, ip_pools, subnet_cidr):
        """Validate IP allocation pools.

        Verify start and end address for each allocation pool are valid,
        ie: constituted by valid and appropriately ordered IP addresses.
        Also, verify pools do not overlap among themselves.
        Finally, verify that each range fall within the subnet's CIDR.
        """
        subnet = netaddr.IPNetwork(subnet_cidr)
        subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
        # last address is broadcast in v4
        subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4))

        LOG.debug("Performing IP validity checks on allocation pools")
        ip_sets = []
        for ip_pool in ip_pools:
            try:
                start_ip = netaddr.IPAddress(ip_pool['start'])
                end_ip = netaddr.IPAddress(ip_pool['end'])
            except netaddr.AddrFormatError:
                LOG.info(_LI("Found invalid IP address in pool: "
                             "%(start)s - %(end)s:"),
                         {'start': ip_pool['start'],
                          'end': ip_pool['end']})
                raise n_exc.InvalidAllocationPool(pool=ip_pool)
            if (start_ip.version != subnet.version or
                    end_ip.version != subnet.version):
                LOG.info(_LI("Specified IP addresses do not match "
                             "the subnet IP version"))
                raise n_exc.InvalidAllocationPool(pool=ip_pool)
            if end_ip < start_ip:
                LOG.info(_LI("Start IP (%(start)s) is greater than end IP "
                             "(%(end)s)"),
                         {'start': ip_pool['start'], 'end': ip_pool['end']})
                raise n_exc.InvalidAllocationPool(pool=ip_pool)
            if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
                LOG.info(_LI("Found pool larger than subnet "
                             "CIDR:%(start)s - %(end)s"),
                         {'start': ip_pool['start'],
                          'end': ip_pool['end']})
                raise n_exc.OutOfBoundsAllocationPool(
                    pool=ip_pool,
                    subnet_cidr=subnet_cidr)
            # Valid allocation pool
            # Create an IPSet for it for easily verifying overlaps
            ip_sets.append(netaddr.IPSet(netaddr.IPRange(
                ip_pool['start'],
                ip_pool['end']).cidrs()))

        LOG.debug("Checking for overlaps among allocation pools "
                  "and gateway ip")
        ip_ranges = ip_pools[:]

        # Use integer cursors as an efficient way for implementing
        # comparison and avoiding comparing the same pair twice
        for l_cursor in range(len(ip_sets)):
            for r_cursor in range(l_cursor + 1, len(ip_sets)):
                if ip_sets[l_cursor] & ip_sets[r_cursor]:
                    l_range = ip_ranges[l_cursor]
                    r_range = ip_ranges[r_cursor]
                    LOG.info(_LI("Found overlapping ranges: %(l_range)s and "
                                 "%(r_range)s"),
                             {'l_range': l_range, 'r_range': r_range})
                    raise n_exc.OverlappingAllocationPools(
                        pool_1=l_range,
                        pool_2=r_range,
                        subnet_cidr=subnet_cidr)
Example #17
0
def _translate_vip_object_graph(extended_vip, plugin, context):
    """Translate the extended vip

    translate to a structure that can be
    understood by the workflow.

    """
    def _create_key(prefix, property_name):
        return prefix + '_' + property_name + '_array'

    def _trans_prop_name(prop_name):
        if prop_name == 'id':
            return 'uuid'
        else:
            return prop_name

    def get_ids(extended_vip):
        ids = {}
        ids['vip'] = extended_vip['id']
        ids['pool'] = extended_vip['pool']['id']
        ids['members'] = [m['id'] for m in extended_vip['members']]
        ids['health_monitors'] = [
            hm['id'] for hm in extended_vip['health_monitors']
        ]
        return ids

    trans_vip = {}
    LOG.debug('Vip graph to be translated: ' + str(extended_vip))
    for vip_property in VIP_PROPERTIES:
        trans_vip['vip_' + vip_property] = extended_vip.get(
            vip_property, TRANSLATION_DEFAULTS.get(vip_property))
    for pool_property in POOL_PROPERTIES:
        trans_vip['pool_' +
                  pool_property] = extended_vip['pool'][pool_property]
    for member_property in MEMBER_PROPERTIES:
        trans_vip[_create_key('member', member_property)] = []

    two_leg = (extended_vip['pip_address'] != extended_vip['address'])
    if two_leg:
        pool_subnet = plugin._core_plugin.get_subnet(
            context, extended_vip['pool']['subnet_id'])

    for member in extended_vip['members']:
        if member['status'] != constants.PENDING_DELETE:
            if (two_leg and netaddr.IPAddress(member['address'])
                    not in netaddr.IPNetwork(pool_subnet['cidr'])):
                member_ports = plugin._core_plugin.get_ports(
                    context,
                    filters={
                        'fixed_ips': {
                            'ip_address': [member['address']]
                        },
                        'tenant_id': [extended_vip['tenant_id']]
                    })
                if len(member_ports) == 1:
                    member_subnet = plugin._core_plugin.get_subnet(
                        context, member_ports[0]['fixed_ips'][0]['subnet_id'])
                    member_network = netaddr.IPNetwork(member_subnet['cidr'])
                    member['subnet'] = str(member_network.network)
                    member['mask'] = str(member_network.netmask)
                else:
                    member['subnet'] = member['address']

                member['gw'] = pool_subnet['gateway_ip']

            for member_property in MEMBER_PROPERTIES:
                trans_vip[_create_key('member', member_property)].append(
                    member.get(member_property,
                               TRANSLATION_DEFAULTS.get(member_property)))

    for hm_property in HEALTH_MONITOR_PROPERTIES:
        trans_vip[_create_key('hm', _trans_prop_name(hm_property))] = []
    for hm in extended_vip['health_monitors']:
        hm_pool = plugin.get_pool_health_monitor(context, hm['id'],
                                                 extended_vip['pool']['id'])
        if hm_pool['status'] != constants.PENDING_DELETE:
            for hm_property in HEALTH_MONITOR_PROPERTIES:
                value = hm.get(hm_property,
                               TRANSLATION_DEFAULTS.get(hm_property))
                trans_vip[_create_key(
                    'hm', _trans_prop_name(hm_property))].append(value)
    ids = get_ids(extended_vip)
    trans_vip['__ids__'] = ids
    if 'pip_address' in extended_vip:
        trans_vip['pip_address'] = extended_vip['pip_address']
    LOG.debug('Translated Vip graph: ' + str(trans_vip))
    return trans_vip
Example #18
0
            LAST_EXTERNAL_IP = stun_ip
            LAST_EXTERNAL_IP_TIME = time.time()

            return LAST_EXTERNAL_IP

    except Exception, e:
        logger.debug('external_ip: STUN failed: %s', e)

    ctx = tinyhttp.HTTP(timeout=5, headers={'User-Agent': 'curl/7.12.3'})
    for service in OWN_IP:
        for scheme in ['https', 'http']:
            try:
                data, code = ctx.get(scheme + '://' + service, code=True)
                if code == 200:
                    addr = netaddr.IPAddress(data.strip())
                    if force_ipv4 and addr.version == 6:
                        continue

                    LAST_EXTERNAL_IP = addr
                    LAST_EXTERNAL_IP_TIME = time.time()

                    return LAST_EXTERNAL_IP

            except Exception, e:
                logger.debug('Get IP service failed: %s: %s (%s)', service, e,
                             type(e))

    LAST_EXTERNAL_IP = dns_external_ip()
    if LAST_EXTERNAL_IP:
        LAST_EXTERNAL_IP_TIME = time.time()
Example #19
0
 def _verify_port_dns(self,
                      net,
                      port,
                      dns_data_db,
                      dns_name=True,
                      dns_domain=True,
                      ptr_zones=True,
                      delete_records=False,
                      provider_net=True,
                      dns_driver=True,
                      original_ips=None,
                      current_dns_name=DNSNAME,
                      previous_dns_name=''):
     if dns_name:
         self.assertEqual(current_dns_name, port[dns.DNSNAME])
     if dns_name and dns_domain and provider_net and dns_driver:
         self.assertEqual(current_dns_name, dns_data_db['current_dns_name'])
         self.assertEqual(previous_dns_name,
                          dns_data_db['previous_dns_name'])
         if current_dns_name:
             self.assertEqual(net[dns.DNSDOMAIN],
                              dns_data_db['current_dns_domain'])
         else:
             self.assertFalse(dns_data_db['current_dns_domain'])
         records_v4 = [
             ip['ip_address'] for ip in port['fixed_ips']
             if netaddr.IPAddress(ip['ip_address']).version == 4
         ]
         records_v6 = [
             ip['ip_address'] for ip in port['fixed_ips']
             if netaddr.IPAddress(ip['ip_address']).version == 6
         ]
         expected = []
         expected_delete = []
         if records_v4:
             if current_dns_name:
                 expected.append(
                     mock.call(net[dns.DNSDOMAIN], current_dns_name, 'A',
                               records_v4))
             if delete_records:
                 expected_delete.append(
                     mock.call(net[dns.DNSDOMAIN], V4UUID))
         if records_v6:
             if current_dns_name:
                 expected.append(
                     mock.call(net[dns.DNSDOMAIN], current_dns_name, 'AAAA',
                               records_v6))
             if delete_records:
                 expected_delete.append(
                     mock.call(net[dns.DNSDOMAIN], V6UUID))
         mock_client.recordsets.create.assert_has_calls(expected,
                                                        any_order=True)
         self.assertEqual(len(mock_client.recordsets.create.call_args_list),
                          len(expected))
         mock_client.recordsets.delete.assert_has_calls(expected_delete,
                                                        any_order=True)
         self.assertEqual(len(mock_client.recordsets.delete.call_args_list),
                          len(expected_delete))
         expected = []
         expected_delete = []
         if ptr_zones:
             records = records_v4 + records_v6
             recordset_name = '%s.%s' % (current_dns_name,
                                         net[dns.DNSDOMAIN])
             for record in records:
                 in_addr_name = netaddr.IPAddress(record).reverse_dns
                 in_addr_zone_name = self._get_in_addr_zone_name(
                     in_addr_name)
                 if current_dns_name:
                     expected.append(
                         mock.call(in_addr_zone_name, in_addr_name, 'PTR',
                                   [recordset_name]))
                 if delete_records and not original_ips:
                     expected_delete.append(
                         mock.call(in_addr_zone_name, in_addr_name))
             if delete_records and original_ips:
                 for record in original_ips:
                     in_addr_name = netaddr.IPAddress(record).reverse_dns
                     in_addr_zone_name = self._get_in_addr_zone_name(
                         in_addr_name)
                     expected_delete.append(
                         mock.call(in_addr_zone_name, in_addr_name))
         mock_admin_client.recordsets.create.assert_has_calls(
             expected, any_order=True)
         self.assertEqual(
             len(mock_admin_client.recordsets.create.call_args_list),
             len(expected))
         mock_admin_client.recordsets.delete.assert_has_calls(
             expected_delete, any_order=True)
         self.assertEqual(
             len(mock_admin_client.recordsets.delete.call_args_list),
             len(expected_delete))
     else:
         if not dns_name:
             self.assertEqual('', port[dns.DNSNAME])
             self.assertIsNone(dns_data_db)
         self.assertFalse(mock_client.recordsets.create.call_args_list)
         self.assertFalse(
             mock_admin_client.recordsets.create.call_args_list)
         self.assertFalse(mock_client.recordsets.delete.call_args_list)
         self.assertFalse(
             mock_admin_client.recordsets.delete.call_args_list)
Example #20
0
 def is_my_ipv6_addr(self, ipv6_addr):
     # get a address string in the canonical format
     target_ipv6_addr = str(netaddr.IPAddress(ipv6_addr))
     return ((self.get_ipv6_global_addr() == target_ipv6_addr)
             or (self.get_ipv6_link_local_addr() == target_ipv6_addr))
Example #21
0
      vcpus=4,
      memory_mb=10 * 1024,
      local_gb=250,
      vcpus_used=2,
      memory_mb_used=5 * 1024,
      local_gb_used=125,
      hypervisor_type="xen",
      hypervisor_version=3,
      hypervisor_hostname="hyper1",
      free_ram_mb=5 * 1024,
      free_disk_gb=125,
      current_workload=2,
      running_vms=2,
      cpu_info='cpu_info',
      disk_available_least=100,
      host_ip=netaddr.IPAddress('1.1.1.1')),
 dict(id=2,
      service_id=2,
      host="compute2",
      vcpus=4,
      memory_mb=10 * 1024,
      local_gb=250,
      vcpus_used=2,
      memory_mb_used=5 * 1024,
      local_gb_used=125,
      hypervisor_type="xen",
      hypervisor_version=3,
      hypervisor_hostname="hyper2",
      free_ram_mb=5 * 1024,
      free_disk_gb=125,
      current_workload=2,
Example #22
0
 def coerce(obj, attr, value):
     try:
         return netaddr.IPAddress(value)
     except netaddr.AddrFormatError as e:
         raise ValueError(str(e))
Example #23
0
 def process_result_value(self, value, dialect):
     return netaddr.IPAddress(value)
    def configure_traffic_stream(self, traffic_flows, nr_of_flows,
                                 packet_size, **kwargs):

        flow_percentage = kwargs.pop("percentage", 1000000) / 10000
        trex_dst_mac = kwargs.pop("traffic_dst_mac", '00:00:02:00:00:00')
        trex_src_mac = kwargs.pop("traffic_src_mac", '00:00:01:00:00:00')
        l2_macs = kwargs.pop("l2_macs", 1)

        #
        # The packet size passed here assumes it includes the checksum, however
        # the TRex packet size does not. Adjust the size to correct this.
        #
        packet_size -= 4

        if traffic_flows == TrafficFlowType.none or \
           self.__traffic_flows != TrafficFlowType.none:
            #
            # We need either a cleanup, or a cleanup before we configure
            # a new traffic flow type
            #
            self._delete_traffic_stream_config()

        if traffic_flows == TrafficFlowType.l2_mac or \
           traffic_flows == TrafficFlowType.l3_ipv4 or \
           traffic_flows == TrafficFlowType.nfv_mobile:

            #
            # Max flows due to IPv4 address limit, and addresses used for tests
            #
            if nr_of_flows > 0x00ffffff:
                raise ValueError("To many flows requested, max {} supported!".
                                 format(0x00ffffff))

            L2 = Ether(src=trex_src_mac,
                       dst=trex_dst_mac)
            L3 = IP(src="1.0.0.0",
                    dst="2.0.0.0")
            L4 = UDP(chksum=0)

            if (len(str(L2/L3/L4)) + 4) > packet_size:  # +4 for Ethernet CRC
                raise ValueError("Packet size ({} bytes) to small for"
                                 "requested packet ({} bytes)!".
                                 format(packet_size, len(L2/L3/L4) + 4))

            if traffic_flows == TrafficFlowType.l2_mac:
                src_base = self._mac_2_int(trex_src_mac) & 0xff000000
                dst_base = self._mac_2_int(trex_dst_mac) & 0xff000000
                vm = [
                    # Source MAC address
                    STLVmFlowVar(name="src",
                                 min_value=src_base,
                                 max_value=src_base + nr_of_flows - 1,
                                 size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="src", pkt_offset=8),

                    # Destination MAC address
                    STLVmFlowVar(name="dst",
                                 min_value=dst_base,
                                 max_value=dst_base + nr_of_flows - 1,
                                 size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="dst", pkt_offset=2)
                ]

            elif traffic_flows == TrafficFlowType.l3_ipv4:

                src_end = str(netaddr.IPAddress(
                    int(netaddr.IPAddress('1.0.0.0')) +
                    nr_of_flows - 1))
                dst_end = str(netaddr.IPAddress(
                    int(netaddr.IPAddress('2.0.0.0')) +
                    nr_of_flows - 1))

                vm = [
                    # Source IPv4 address
                    STLVmFlowVar(name="src", min_value="1.0.0.0",
                                 max_value=src_end, size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="src", pkt_offset="IP.src"),

                    # Destination IPv4 address
                    STLVmFlowVar(name="dst", min_value="2.0.0.0",
                                 max_value=dst_end, size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="dst", pkt_offset="IP.dst"),

                    # Checksum
                    STLVmFixIpv4(offset="IP")
                ]
            elif traffic_flows == TrafficFlowType.nfv_mobile:

                src_end = str(netaddr.IPAddress(
                    int(netaddr.IPAddress('1.0.0.0')) +
                    nr_of_flows - 1))
                dst_end = str(netaddr.IPAddress(
                    int(netaddr.IPAddress('2.0.0.0')) +
                    nr_of_flows - 1))

                vm = [
                    # Source MAC address
                    STLVmFlowVar(name="srcm",
                                 min_value=0x01000001,
                                 max_value=0x01000001 + l2_macs - 1,
                                 size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="srcm", pkt_offset=8),

                    # Destination MAC address
                    STLVmFlowVar(name="dstm",
                                 min_value=0x02000000,
                                 max_value=0x02000000 + l2_macs - 1,
                                 size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="dstm", pkt_offset=2),

                    # Source IPv4 address
                    STLVmFlowVar(name="src", min_value="1.0.0.0",
                                 max_value=src_end, size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="src", pkt_offset="IP.src"),

                    # Destination IPv4 address
                    STLVmFlowVar(name="dst", min_value="2.0.0.0",
                                 max_value=dst_end, size=4, op="inc"),
                    STLVmWrFlowVar(fv_name="dst", pkt_offset="IP.dst"),

                    # Checksum
                    STLVmFixIpv4(offset="IP")
                ]

            else:
                raise ValueError("Unsupported traffic type for T-Rex tester!!!")

            if traffic_flows == TrafficFlowType.nfv_mobile:
                stream_percentage = flow_percentage / 2
            else:
                stream_percentage = flow_percentage

            headers = L2/L3/L4
            padding = max(0, (packet_size - len(headers))) * 'e'
            packet = headers/padding

            trex_packet = STLPktBuilder(pkt=packet, vm=vm)

            trex_stream = STLStream(packet=trex_packet,
                                    mode=STLTXCont(percentage=stream_percentage))

            self.__trex_client.add_streams(trex_stream,
                                           ports=[self.__trex_port])

            #
            # For nfv_mobile we still need to setup the alternating streams.
            #
            if traffic_flows == TrafficFlowType.nfv_mobile:
                alternate_flows = kwargs.pop("alternate_flows", 200000)
                stream_percentage = flow_percentage / 2

                self.__active_alternate_stream = 0
                #
                # Keep the flows the same as for the Xena version, so the
                # traffic scripts using this do not have to differentiate
                # between traffic generator types.
                #
                # The Xena uses streams and every stream can generate 64K
                # flows. To Find the flow start we need the number of base
                # flows rounded of the next 64K (stream) and use the next one.
                #
                # For the individual iterations of the flow set they also
                # need to start at a 64K boundary.
                #
                start_stream_id = self._div_round_up(nr_of_flows, 0x10000) + 1
                for alternate_flow_sets in range(0, 3):
                    flow_start = start_stream_id * 0x10000

                    src_start = str(netaddr.IPAddress(
                        int(netaddr.IPAddress('1.0.0.0')) +
                        flow_start))
                    src_end = str(netaddr.IPAddress(
                        int(netaddr.IPAddress('1.0.0.0')) +
                        flow_start +
                        alternate_flows - 1))
                    dst_start = str(netaddr.IPAddress(
                        int(netaddr.IPAddress('2.0.0.0')) +
                        flow_start))
                    dst_end = str(netaddr.IPAddress(
                        int(netaddr.IPAddress('2.0.0.0')) +
                        flow_start +
                        alternate_flows - 1))

                    vm = [
                        # Source MAC address
                        STLVmFlowVar(name="srcm",
                                     min_value=0x01000001,
                                     max_value=0x01000001 + l2_macs - 1,
                                     size=4, op="inc"),
                        STLVmWrFlowVar(fv_name="srcm", pkt_offset=8),

                        # Destination MAC address
                        STLVmFlowVar(name="dstm",
                                     min_value=0x02000000,
                                     max_value=0x02000000 + l2_macs - 1,
                                     size=4, op="inc"),
                        STLVmWrFlowVar(fv_name="dstm", pkt_offset=2),

                        # Source IPv4 address
                        STLVmFlowVar(name="src", min_value=src_start,
                                     max_value=src_end, size=4, op="inc"),
                        STLVmWrFlowVar(fv_name="src", pkt_offset="IP.src"),

                        # Destination IPv4 address
                        STLVmFlowVar(name="dst", min_value=dst_start,
                                     max_value=dst_end, size=4, op="inc"),
                        STLVmWrFlowVar(fv_name="dst", pkt_offset="IP.dst"),

                        # Checksum
                        STLVmFixIpv4(offset="IP")
                    ]
                    trex_packet = STLPktBuilder(pkt=packet, vm=vm)

                    stream = STLStream(packet=trex_packet,
                                       mode=STLTXCont(percentage=stream_percentage),
                                       start_paused=False
                                       if alternate_flow_sets == 0 else True)

                    self.__alternate_stream_sets.append(
                        self.__trex_client.add_streams(stream,
                                                       ports=[self.__trex_port]))

                    start_stream_id += self._div_round_up(alternate_flows, 0x10000)

            self.__traffic_flows = traffic_flows
            return True
        elif traffic_flows == TrafficFlowType.none:
            self.__traffic_flows = traffic_flows
            return True
        else:
            raise ValueError("Unsupported traffic flow passed for T-Rex tester!")

        self.__traffic_flows = TrafficFlowType.none
        return False
Example #25
0
t = time.time()

while True:
    try:
        if time.time() - t > 5:
            t = time.time()
            try:
                cursor = conn.cursor()
                try:
                    with open('leasesmanual.leases') as f:
                        leasesContent = f.read().splitlines()
                    for content in leasesContent:
                        content = content.split(' ')
                        try:
                            cursor.execute(
                                "insert into leases (id, leaseTime, ip, host, mac, leaseStatus) values ('{0}', now(), '{1}', '{2}', '{3}', 'd') ON DUPLICATE KEY UPDATE leaseTime = now(), ip = '{1}', leaseStatus = 'd'"
                                .format(content[4],
                                        int(netaddr.IPAddress(content[2])),
                                        content[3], content[1]))
                            conn.commit()
                        except:
                            print 'error inserting MySQL row'
                except:
                    print 'error deleting table leases'
            except:
                print 'cannot open cursor to MySQL database...'

    except KeyboardInterrupt:
        conn.close()
        break
        sys.exit()