Beispiel #1
0
def ipranges(ip_input):
    ips = []
    ip_input = ip_input.replace(" ", "")
    if '/' in ip_input:
        if ',' in ip_input:
            ip_input_ranges = ip_input.split(',')
            for ip_range in ip_input_ranges:
                for ip in IPSet([ip_range]):
                    ips.append(ip)
        else:
            ips = IPSet([ip_input])
    elif '-' in ip_input:
        if ',' in ip_input:
            ip_input_ranges = ip_input.split(',')
            for ip_range in ip_input_ranges:
                ip_split = ip_range.split("-")
                ip_range_temp = IPRange(ip_split[0], ip_split[1])
                for idx, ip in enumerate(ip_range_temp):
                    ips.append(ip_range_temp[idx])
        else:
            ip_split = ip_input.split("-")
            ips = IPRange(ip_split[0], ip_split[1])
    else:
        ips = IPAddress(ip_input)
    return ips
Beispiel #2
0
def test_ipset_basic_api():
    range1 = IPRange('192.0.2.1', '192.0.2.15')

    ip_list = [
        IPAddress('192.0.2.1'),
        '192.0.2.2/31',
        IPNetwork('192.0.2.4/31'),
        IPAddress('192.0.2.6'),
        IPAddress('192.0.2.7'),
        '192.0.2.8',
        '192.0.2.9',
        IPAddress('192.0.2.10'),
        IPAddress('192.0.2.11'),
        IPNetwork('192.0.2.12/30'),
    ]

    set1 = IPSet(range1.cidrs())

    set2 = IPSet(ip_list)

    assert set2 == IPSet([
        '192.0.2.1/32',
        '192.0.2.2/31',
        '192.0.2.4/30',
        '192.0.2.8/29',
    ])

    assert set1 == set2
    assert set2.pop() in set1
    assert set1 != set2
    def isWhitelisted(self,conn,indicatorType,indicator):
        """Return whether or not the indicator of type indicatorType is whitelisted by this whitelist.
        If the indicator is a single address, it is whitelisted if it is included in any CIDR.
        If the indicator is a network spec, it is whitelisted if all of the addresses it represents are included in any CIDR
        """
        sn=IPNetwork(indicator)
        minip=sn.first
        maxip=sn.last

        c=conn.cursor()
        c.execute("select cidr,minip,maxip from ipv4sn where ? between minip and maxip or ? between minip and maxip order by minip",(minip,maxip))
        ipset=None
        rec=c.fetchone()
        # create a set of all ips contained network specs that contain the min and max ip specified by the indicator
        while (rec != None):
            if(ipset==None):
                ipset = IPSet(IPNetwork(rec[0]))
            else:
                ipset = ipset | IPSet(IPNetwork(rec[0]))
            rec=c.fetchone()
        # if the resulting set is empty, the indicator is not whitelisted
        if(ipset == None):
            return False
        # if the set of IPs represented by the indicator is a subset of the IPs set created above, then it is whitelisted
        ips=IPSet(sn)
        if(ips.issubset(ipset)):
            rv=True
        else:
            rv=False
        c.close()
        return rv
Beispiel #4
0
def test_ipset_converts_to_cidr_networks_v4():
    s1 = IPSet(IPNetwork('10.1.2.3/8'))
    s1.add(IPNetwork('192.168.1.2/16'))
    assert list(s1.iter_cidrs()) == [
        IPNetwork('10.0.0.0/8'),
        IPNetwork('192.168.0.0/16'),
    ]
Beispiel #5
0
def test_ipset_converts_to_cidr_networks_v6():
    s1 = IPSet(IPNetwork('fe80::4242/64'))
    s1.add(IPNetwork('fe90::4343/64'))
    assert list(s1.iter_cidrs()) == [
        IPNetwork('fe80::/64'),
        IPNetwork('fe90::/64'),
    ]
Beispiel #6
0
def test_ipset_clear():
    ipset = IPSet(['10.0.0.0/16'])
    ipset.update(IPRange('10.1.0.0', '10.1.255.255'))
    assert ipset == IPSet(['10.0.0.0/15'])

    ipset.clear()
    assert ipset == IPSet([])
def assign_ips(_upper_ref, _from_key, lower_refs, to_key,
               ip_start='192.168.0.1', ip_end='192.168.0.254',
               **_kwargs):
    """Assign ips to hosts' configurations."""
    if not ip_start or not ip_end:
        return {}
    host_ips = {}
    unassigned_hosts = []
    ips = IPSet(IPRange(ip_start, ip_end))
    for lower_key, lower_ref in lower_refs.items():
        ip_addr = lower_ref.get(to_key, '')
        if ip_addr:
            host_ips[lower_key] = ip_addr
            ips.remove(ip_addr)
        else:
            unassigned_hosts.append(lower_key)

    for ip_addr in ips:
        if not unassigned_hosts:
            break

        host = unassigned_hosts.pop(0)
        host_ips[host] = str(ip_addr)

    logging.debug('assign %s: %s', to_key, host_ips)
    return host_ips
Beispiel #8
0
    def generateIP(self):
        network = IPSet(IPNetwork(self.cidr))
        network.remove(min(network))
        network.remove(max(network))
        hostlist = IPSet([ h.ip for h in self.hosts.all() ])
        available = network - hostlist
        return min(available)

        '''
Beispiel #9
0
def test_ipset_updates():
    s1 = IPSet(['192.0.2.0/25'])
    s2 = IPSet(['192.0.2.128/25'])

    s1.update(s2)
    assert s1 == IPSet(['192.0.2.0/24'])

    s1.update(['192.0.0.0/24', '192.0.1.0/24', '192.0.3.0/24'])
    assert s1 == IPSet(['192.0.0.0/22'])
Beispiel #10
0
def test_ipset_exceptions():
    s1 = IPSet(['10.0.0.1'])

    #   IPSet objects are not hashable.
    with pytest.raises(TypeError):
        hash(s1)

    #   Bad update argument type.
    with pytest.raises(TypeError):
        s1.update(42)
Beispiel #11
0
def test_disjointed_ipsets():
    s1 = IPSet(['192.0.2.0', '192.0.2.1', '192.0.2.2'])
    s2 = IPSet(['192.0.2.2', '192.0.2.3', '192.0.2.4'])

    assert s1 & s2 == IPSet(['192.0.2.2/32'])
    assert not s1.isdisjoint(s2)

    s3 = IPSet(['192.0.2.0', '192.0.2.1'])
    s4 = IPSet(['192.0.2.3', '192.0.2.4'])

    assert s3 & s4 == IPSet([])
    assert s3.isdisjoint(s4)
Beispiel #12
0
def optimize_network_range(ipstr, threshold=0.9, verbose=DEBUG):
    """
    Parses the input string and then calculates the subnet usage percentage. If over
    the threshold it will return a loose result, otherwise it returns strict.

    :param ipstr:
        IP string to be parsed.

    :param threshold:
        The percentage of the network usage required to return a loose result.

    :param verbose:
        Toggle verbosity.

    Example of default behavior using 0.9 (90% usage) threshold:
        >>> import cidrize
        >>> cidrize.optimize_network_range('10.20.30.40-50', verbose=True)
        Subnet usage ratio: 0.34375; Threshold: 0.9
        Under threshold, IP Parse Mode: STRICT
        [IPNetwork('10.20.30.40/29'), IPNetwork('10.20.30.48/31'), IPNetwork('10.20.30.50/32')]

    Example using a 0.3 (30% threshold):
        >>> import cidrize
        >>> cidrize.optimize_network_range('10.20.30.40-50', threshold=0.3, verbose=True)
        Subnet usage ratio: 0.34375; Threshold: 0.3
        Over threshold, IP Parse Mode: LOOSE
        [IPNetwork('10.20.30.32/27')]

    """
    if threshold > 1 or threshold < 0:
        raise CidrizeError('Threshold must be from 0.0 to 1.0')

    # Can't optimize 0.0.0.0/0!
    if ipstr in EVERYTHING:
        return cidrize(ipstr)

    loose = IPSet(cidrize(ipstr))
    strict = IPSet(cidrize(ipstr, strict=True))
    ratio = float(len(strict)) / float(len(loose))

    if verbose:
        print 'Subnet usage ratio: %s; Threshold: %s' % (ratio, threshold)

    if ratio >= threshold:
        if verbose:
            print 'Over threshold, IP Parse Mode: LOOSE'
        result = loose.iter_cidrs()
    else:
        if verbose:
            print 'Under threshold, IP Parse Mode: STRICT'
        result = strict.iter_cidrs()

    return result
    def parse(self, data):
        mynets = IPSet()

        for line in data.split("\n"):
            if not line or line[0] == ";":
                continue

            ip, sbl = line.split(";")
            ip = IPNetwork(ip.strip())
            mynets.add(ip)

        return mynets
Beispiel #14
0
def concat_networks(context, pool_1, pool_2):
    if pool_1.is_free and pool_2.is_free:
        network_1 = pool_to_network(pool_1)
        network_2 = pool_to_network(pool_2)
        if network_1.size == network_2.size:
            ipset = IPSet([network_1, network_2])
            cidr = ipset.iter_cidrs()[0]
            pool_1.ip = cidr.first
            pool_1.netmask = cidr.netmask.value
            count = len(pool_to_network(pool_1))
            pool_1.count = count
            pool_delete(context, pool_2.pool_id)
            concat_pool(context, pool_1)
Beispiel #15
0
def add_available_prefixes(parent, prefix_list):
    """
    Create fake Prefix objects for all unallocated space within a prefix.
    """

    # Find all unallocated space
    available_prefixes = IPSet(parent) ^ IPSet([p.prefix for p in prefix_list])
    available_prefixes = [Prefix(prefix=p) for p in available_prefixes.iter_cidrs()]

    # Concatenate and sort complete list of children
    prefix_list = list(prefix_list) + available_prefixes
    prefix_list.sort(key=lambda p: p.prefix)

    return prefix_list
Beispiel #16
0
def summarizeIPs(inFile, outFile):
    netSet = IPSet()
    with open(inFile, 'r') as f:
        for line in f.readlines():
            net = IPSet()
            try:
                net.add(line.strip())
            except AddrFormatError:
                continue
            else:
                netSet = netSet | net
    netMin = netSet.iter_cidrs()
    with open(outFile, 'w') as f:
        for net in netMin:
            f.write('{}\n'.format(net))
Beispiel #17
0
def test_ipset_member_insertion_and_deletion():
    s1 = IPSet()
    s1.add('192.0.2.0')
    assert s1 == IPSet(['192.0.2.0/32'])

    s1.remove('192.0.2.0')
    assert s1 == IPSet([])

    s1.add(IPRange("10.0.0.0", "10.0.0.255"))
    assert s1 == IPSet(['10.0.0.0/24'])

    s1.remove(IPRange("10.0.0.128", "10.10.10.10"))
    assert s1 == IPSet(['10.0.0.0/25'])
Beispiel #18
0
def write_file(scope: str, content: IPSet, prefix=''):
    if len(prefix)>0 and not prefix.endswith('-'):
        prefix = prefix + '-'
    filename = 'output/' + prefix + scope + '.txt'
    cidrs = content.iter_cidrs()
    log.info(f"Writing output file: {filename}")
    log.info(f"There are {len(cidrs)} CIDR blocks in {filename}.")
    with open(filename, 'w') as f:
        f.writelines(f"{cidr}\n" for cidr in cidrs)
Beispiel #19
0
def choose_ip(routable_cidrs, excluded_cidrs=[], client_addr=''):
    """Find available IP addresses for both sides of a VPN Tunnel.

    This method iterates over the settings.ALLOWED_CIDRS list in order to
    allocate available IP address to both the client and server side of a
    VPN tunnel. CIDRs that belong to the lists of settings.RESERVED_CIDRS,
    `routable_cidrs`, and `excluded_cidrs` are excluded from the allocation
    process.

    :param routable_cidrs: the CIDRs that are to be routed over a VPN tunnel
    :param excluded_cidrs: an optional list of CIDRs to be excluded from the
                           address allocation process
    :param client_addr:    the `client_addr` is used to attempt to pick an
                           adjacent IP address for the server side

    :return: a private IP address

    """
    exc_nets = routable_cidrs + excluded_cidrs + settings.RESERVED_CIDRS
    # make sure the exc_nets list does not contain any empty strings
    exc_nets = [exc_net for exc_net in exc_nets if exc_net]
    # a list of unique, non-overlapping supernets (to be excluded)
    exc_nets = IPSet(exc_nets).iter_cidrs()
    for network in settings.ALLOWED_CIDRS:
        available_cidrs = IPSet(IPNetwork(network))
        for exc_net in exc_nets:
            available_cidrs.remove(exc_net)
        if not available_cidrs:
            continue
        for cidr in available_cidrs.iter_cidrs():
            first, last = cidr.first, cidr.last
            if client_addr:
                address = IPAddress(client_addr) + 1
            else:
                address = IPAddress(random.randrange(first + 1, last))
            for _ in xrange(first + 1, last):
                if address not in cidr or address == cidr.broadcast:
                    address = cidr.network + 1
                try:
                    Tunnel.objects.get(Q(client=str(address)) |
                                       Q(server=str(address)))
                    address += 1
                except Tunnel.DoesNotExist:
                    return str(address)
    def derive_outwall(self):
        """
        This would not only inverse the set with the "big one", it would also exclude
        See: http://www.tcpipguide.com/free/t_IPReservedPrivateandLoopbackAddresses-3.htm
        """

        self.ipset_outwall = IPSet(['0.0.0.0/0']) ^ self.ipset_inwall ^ self.ipset_reserved
        self.cidrs_outwall = list(self.ipset_outwall.iter_cidrs())

        logging.info("Finished deriving out-wall IP table(s). Total: %i CIDR blocks.", len(self.cidrs_outwall), )
Beispiel #21
0
def test_ipset_adding_and_removing_members_ip_addresses_as_ints():
    s1 = IPSet(['10.0.0.0/25'])

    s1.add('10.0.0.0/24')
    assert s1 == IPSet(['10.0.0.0/24'])

    integer1 = int(IPAddress('10.0.0.1'))
    integer2 = int(IPAddress('fe80::'))
    integer3 = int(IPAddress('10.0.0.2'))

    s2 = IPSet([integer1, integer2])
    assert s2 == IPSet(['10.0.0.1/32', 'fe80::/128'])

    s2.add(integer3)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])

    s2.remove(integer2)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32'])

    s2.update([integer2])
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])
Beispiel #22
0
 def allocate(self, netmask, net_group_name, stack_id=None, stack_name=None):
     context = get_session()
     network = IPNetwork('0.0.0.0/%s' % netmask)
     pool = db.api.free_pool_find_by_netmask_and_netgroup(context, network.netmask.value, net_group_name)
     ip_network = pool_to_network(pool)
     if ip_network.size == network.size:
         pool.is_free = False
         pool.stack_id = stack_id
         pool.stack_name = stack_name
         pool.save()
         allocated_pool = pool
     else:
         pool_list = list(ip_network.subnet(netmask))
         allocated_network = pool_list[0]
         pool_list = IPSet(pool_list[1::])
         allocated_pool = db.api.used_pool_add(context, {'initial_pool': pool.initial_pool, 'cidr': allocated_network,
                                                         'stack_id': stack_id, 'stack_name': stack_name})
         for free_pool in pool_list.iter_cidrs():
             db.api.free_pool_add(context, {'initial_pool':  pool.initial_pool, 'cidr': free_pool})
         db.api.pool_delete(context, pool.pool_id)
         logger.info('allocate pool id %s %s' % (allocated_pool.pool_id, allocated_network))
     return allocated_pool
Beispiel #23
0
def parse_commas(ipstr, **kwargs):
    """
    This will break up a comma-separated input string of assorted inputs, run them through
    cidrize(), flatten the list, and return the list. If any item in the list
    fails, it will allow the exception to pass through as if it were parsed
    individually. All objects must parse or nothing is returned.

    Example:

    :param ipstr:
        A comma-separated string of IP address patterns.
    """
    # Clean whitespace before we process
    ipstr = ipstr.replace(' ', '').strip()
    items = ipstr.split(',')

    # Possibly nested depending on input, so we'll run it thru itertools.chain
    # to flatten it. Then we make it a IPSet to optimize adjacencies and finally
    # return the list of CIDRs within the IPSet
    ipiter = (cidrize(ip, **kwargs) for ip in items)
    flatiter = itertools.chain.from_iterable(ipiter)
    ipset = IPSet(flatiter)

    return ipset.iter_cidrs()
    def parse_table(self):
        logging.info("Start parsing IP table(s)")

        with open(self.cache_apnic, 'r') as f:
            lines = f.readlines()

        ip_list = []
        for line in lines:
            if line.startswith('apnic|CN|ipv4'):
                line = line.rstrip()
                apnic, country, v4v6, prefix, count_of_addr, date, status = line.split('|')
                if v4v6 == 'ipv4' and country == 'CN':
                    decimal = 32 - binary_log(int(count_of_addr))
                    cidr_addr = prefix + '/' + str(decimal)
                    ip_list.append(cidr_addr)

        self.ipset_inwall = IPSet(ip_list)
        self.cidrs_inwall = list(self.ipset_inwall.iter_cidrs())

        logging.info("Finished parsing in-wall IP table(s). Total: %i CIDR blocks.", len(self.cidrs_inwall), )
    def valid(self):
        """
        Make sure the input parameters class stick to the form of below example
        parameters_tuple(src_zone = 'untrust', src_ip = ['100.1.4.2/32', '100.1.2.0/24'],
                             dst_zone = 'trust', dst_ip = ['10.1.3.0/30'],
                             application = {'tcp': {'dst-port': ['80', '20'], 'src-port': ['any', '5-20']}}
                             )
        """

        def ip_validate(ip):
            ip_validator = re.compile('^((2[0-4]\d|25[0-5]|[01]?\d\d?)\.){3}(2[0-4]\d|25[0-5]|[01]?\d\d?)/(\d{1,2})$')
            if ip_validator.match(ip) != None and int(ip_validator.match(ip).groups()[3]) <= 32:
                return True

            return False

        def application_validate(application):
            """
                Unfold the ports from 1-3 to [1,2,3], keep 'any' in it origin form
            """
            try:
                for tuple in application.values():
                    if not isinstance(tuple, dict) or not isinstance(tuple['src-port'], list) or not isinstance(
                            tuple['dst-port'], list):
                        return False

                    src_port_list = []
                    for port in tuple['src-port']:
                        if not port_validate(port):
                            return False
                        src_port_list.extend(port_cal(port))
                    tuple['src-port'] = src_port_list

                    dst_port_list = []
                    for port in tuple['dst-port']:
                        if not port_validate(port):
                            return False
                        dst_port_list.extend(port_cal(port))
                    tuple['dst-port'] = dst_port_list
            except Exception as e:
                logging.warn(str(application) + ' is not a valid application input, and the error is "' + str(e) + '"')
                return False
            else:
                return True

        def port_validate(port):
            port_validator = re.compile(r'(\d+-\d+)|(\d*)|any')
            if port_validator.match(port) is None:
                return False
            return True

        src_ip = IPSet([])
        for ip in self.src_ip:
            if ip == 'any':
                ip = '0.0.0.0/0'
            if not ip_validate(ip):
                return False
            src_ip.add(ip)
        self.src_ip = src_ip

        dst_ip = IPSet([])
        for ip in self.dst_ip:
            if ip == 'any':
                ip = '0.0.0.0/0'
            if not ip_validate(ip):
                return False
            dst_ip.add(ip)
        self.dst_ip = dst_ip

        if not application_validate(self.application):
            return False

        return True
Beispiel #26
0
        'NAME': 'nsupdate.sqlite',               # Or path to database file if using sqlite3.
        # The following settings are not used with sqlite3:
        'USER': '',
        'PASSWORD': '',
        'HOST': '',             # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
        'PORT': ''              # Set to empty string for default.
    }
}

# these useragents are unacceptable for /nic/update service
BAD_AGENTS = set([])  # list can have str elements

# these IPAdresses and/or IPNetworks are unacceptable for /nic/update service
# like e.g. IPs of servers related to illegal activities
from netaddr import IPSet, IPAddress, IPNetwork
BAD_IPS_HOST = IPSet([])  # inner list can have IPAddress and IPNetwork elements

# when encountering these hostnames (fqdn), block them early/silently from
# api usage. avoid any database access, so if someone tries to update
# every 5s, the database won't be locked all the time and we can at least
# delete the host from django admin.
BAD_HOSTS = set([])

# nameservers used e.g. for MX lookups in the registration email validation.
# google / cloudflare DNS IPs are only given as example / fallback -
# please configure your own nameservers in your local settings file.
NAMESERVERS = ['8.8.8.8', '1.1.1.1', ]

# registration email validation: disallow specific email domains,
# e.g. domains that have a non-working mx / that are frequently abused.
# we use a multiline string here with one regex per line (used with re.search).
def sync_subnets(conn, config):
    log.debug("loading routing tables")
    routing_tables = conn.get_all_route_tables()
    route_tables_by_name = {r.tags.get('Name'): r for r in routing_tables}
    route_tables_by_subnet_id = {}
    for r in routing_tables:
        for a in r.associations:
            route_tables_by_subnet_id[a.subnet_id] = r

    # Get list of AZs
    zones = conn.get_all_zones()

    for vpc_id in config:
        # Get a list of all the remote subnets
        remote_subnets = conn.get_all_subnets(filters={'vpcId': vpc_id})

        seen = set()

        # Go through our config, adjusting or any subnets as appropriate
        for cidr, block_config in config[vpc_id].items():
            cidr_net = IPNetwork(cidr)
            table_name = block_config.get('routing_table')
            if table_name and table_name not in route_tables_by_name:
                log.warn("couldn't find routing table %s for block %s", table_name, cidr)
                log.warn("skipping rest of %s", cidr)
                continue
            my_rt = route_tables_by_name[table_name]

            ip_set = IPSet(cidr_net)

            for s in remote_subnets:
                if IPNetwork(s.cidr_block) in cidr_net:
                    ip_set.remove(s.cidr_block)
                    if s.tags.get('Name') != block_config['name']:
                        log.info("Setting Name of %s to %s", s, block_config['name'])
                        s.add_tag('Name', block_config['name'])

                        if s.id in route_tables_by_subnet_id:
                            remote_rt = route_tables_by_subnet_id[s.id]
                        else:
                            remote_rt = route_tables_by_subnet_id[None]
                        if remote_rt != my_rt:
                            log.info(
                                "Changing routing table for %s (%s) to %s (%s)",
                                s, s.tags.get('Name'), my_rt,
                                my_rt.tags.get('Name'))
                            if raw_input("(y/N) ") == "y":
                                conn.associate_route_table(my_rt.id, s.id)
                    seen.add(s)

            # Are we missing any subnets?
            # If so, create them!
            # TODO: We want to evenly distribute the ip range over the
            # configured availability zones, without dividing smaller than a
            # /25 network (128 ips, at least 2 of which are reserved)
            # For now we'll just split them as small as /24, and then assign
            # them into the subnets
            while ip_set:
                log.info("%s - %s isn't covered by any subnets", cidr, ip_set)
                my_zones = [z for z in zones if z.name not in block_config.get('skip_azs', [])]

                remaining_cidrs = list(ip_set.iter_cidrs())
                remaining_cidrs.sort(key=lambda s: s.size, reverse=True)
                for s in remaining_cidrs[:]:
                    if s.prefixlen < 24:
                        added = list(s.subnet(24))
                        remaining_cidrs.remove(s)
                        remaining_cidrs.extend(added)
                    ip_set.remove(s)

                zg = itertools.cycle(my_zones)
                while remaining_cidrs:
                    c = remaining_cidrs.pop()
                    z = next(zg)
                    log.info("creating subnet %s in %s/%s", c, z.name, vpc_id)
                    if raw_input("(y/N) ") == "y":
                        log.debug("creating subnet")
                        s = conn.create_subnet(vpc_id, c, z.name)
                        log.debug("adding tag")
                        # TODO: sometimes the subnet isn't actually created by
                        # the time we try and add the tag, so get a 400 error
                        s.add_tag('Name', block_config['name'])
                        log.debug("associating routing")
                        conn.associate_route_table(my_rt.id, s.id)

        local_missing = set(remote_subnets) - seen
        for m in local_missing:
            log.info("%s:%s (name: %s) is unmanaged", m.id, m.cidr_block, m.tags.get('Name'))
 def test_query_block_everything(self):
     resource = {'properties': {'ipRangeFilter': '', 'isVirtualNetworkFilterEnabled': True}}
     expected = IPSet()
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
 def test_query_regular(self):
     resource = {'properties': {'ipRangeFilter': '10.0.0.0/16,8.8.8.8',
                                'isVirtualNetworkFilterEnabled': False}}
     expected = IPSet(['10.0.0.0/16', '8.8.8.8'])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #30
0
class ServerConfig(Config):
    section = "server"

    def read_config(self, config, **kwargs):
        self.server_name = config["server_name"]
        self.server_context = config.get("server_context", None)

        try:
            parse_and_validate_server_name(self.server_name)
        except ValueError as e:
            raise ConfigError(str(e))

        self.pid_file = self.abspath(config.get("pid_file"))
        self.web_client_location = config.get("web_client_location", None)
        self.soft_file_limit = config.get("soft_file_limit", 0)
        self.daemonize = config.get("daemonize")
        self.print_pidfile = config.get("print_pidfile")
        self.user_agent_suffix = config.get("user_agent_suffix")
        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
        self.public_baseurl = config.get("public_baseurl")

        # Whether to send federation traffic out in this process. This only
        # applies to some federation traffic, and so shouldn't be used to
        # "disable" federation
        self.send_federation = config.get("send_federation", True)

        # Whether to enable user presence.
        self.use_presence = config.get("use_presence", True)

        # Whether to update the user directory or not. This should be set to
        # false only if we are updating the user directory in a worker
        self.update_user_directory = config.get("update_user_directory", True)

        # whether to enable the media repository endpoints. This should be set
        # to false if the media repository is running as a separate endpoint;
        # doing so ensures that we will not run cache cleanup jobs on the
        # master, potentially causing inconsistency.
        self.enable_media_repo = config.get("enable_media_repo", True)

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API.
        self.require_auth_for_profile_requests = config.get(
            "require_auth_for_profile_requests", False
        )

        if "restrict_public_rooms_to_local_users" in config and (
            "allow_public_rooms_without_auth" in config
            or "allow_public_rooms_over_federation" in config
        ):
            raise ConfigError(
                "Can't use 'restrict_public_rooms_to_local_users' if"
                " 'allow_public_rooms_without_auth' and/or"
                " 'allow_public_rooms_over_federation' is set."
            )

        # Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
        # flag is now obsolete but we need to check it for backward-compatibility.
        if config.get("restrict_public_rooms_to_local_users", False):
            self.allow_public_rooms_without_auth = False
            self.allow_public_rooms_over_federation = False
        else:
            # If set to 'False', requires authentication to access the server's public
            # rooms directory through the client API. Defaults to 'True'.
            self.allow_public_rooms_without_auth = config.get(
                "allow_public_rooms_without_auth", True
            )
            # If set to 'False', forbids any other homeserver to fetch the server's public
            # rooms directory via federation. Defaults to 'True'.
            self.allow_public_rooms_over_federation = config.get(
                "allow_public_rooms_over_federation", True
            )

        default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)

        # Ensure room version is a str
        default_room_version = str(default_room_version)

        if default_room_version not in KNOWN_ROOM_VERSIONS:
            raise ConfigError(
                "Unknown default_room_version: %s, known room versions: %s"
                % (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
            )

        # Get the actual room version object rather than just the identifier
        self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]

        # whether to enable search. If disabled, new entries will not be inserted
        # into the search tables and they will not be indexed. Users will receive
        # errors when attempting to search for messages.
        self.enable_search = config.get("enable_search", True)

        self.filter_timeline_limit = config.get("filter_timeline_limit", -1)

        # Whether we should block invites sent to users on this server
        # (other than those sent by local server admins)
        self.block_non_admin_invites = config.get("block_non_admin_invites", False)

        # Whether to enable experimental MSC1849 (aka relations) support
        self.experimental_msc1849_support_enabled = config.get(
            "experimental_msc1849_support_enabled", True
        )

        # Options to control access by tracking MAU
        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
        self.max_mau_value = 0
        if self.limit_usage_by_mau:
            self.max_mau_value = config.get("max_mau_value", 0)
        self.mau_stats_only = config.get("mau_stats_only", False)

        self.mau_limits_reserved_threepids = config.get(
            "mau_limit_reserved_threepids", []
        )

        self.mau_trial_days = config.get("mau_trial_days", 0)
        self.mau_limit_alerting = config.get("mau_limit_alerting", True)

        # How long to keep redacted events in the database in unredacted form
        # before redacting them.
        redaction_retention_period = config.get("redaction_retention_period", "7d")
        if redaction_retention_period is not None:
            self.redaction_retention_period = self.parse_duration(
                redaction_retention_period
            )
        else:
            self.redaction_retention_period = None

        # How long to keep entries in the `users_ips` table.
        user_ips_max_age = config.get("user_ips_max_age", "28d")
        if user_ips_max_age is not None:
            self.user_ips_max_age = self.parse_duration(user_ips_max_age)
        else:
            self.user_ips_max_age = None

        # Options to disable HS
        self.hs_disabled = config.get("hs_disabled", False)
        self.hs_disabled_message = config.get("hs_disabled_message", "")

        # Admin uri to direct users at should their instance become blocked
        # due to resource constraints
        self.admin_contact = config.get("admin_contact", None)

        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None
        federation_domain_whitelist = config.get("federation_domain_whitelist", None)

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", []
        )

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist
            )

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError(
                "Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e
            )

        if self.public_baseurl is not None:
            if self.public_baseurl[-1] != "/":
                self.public_baseurl += "/"
        self.start_pushers = config.get("start_pushers", True)

        # (undocumented) option for torturing the worker-mode replication a bit,
        # for testing. The value defines the number of milliseconds to pause before
        # sending out any replication updates.
        self.replication_torture_level = config.get("replication_torture_level")

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to True.
        self.require_membership_for_aliases = config.get(
            "require_membership_for_aliases", True
        )

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)

        self.listeners = []  # type: List[dict]
        for listener in config.get("listeners", []):
            if not isinstance(listener.get("port", None), int):
                raise ConfigError(
                    "Listener configuration is lacking a valid 'port' option"
                )

            if listener.setdefault("tls", False):
                # no_tls is not really supported any more, but let's grandfather it in
                # here.
                if config.get("no_tls", False):
                    logger.info(
                        "Ignoring TLS-enabled listener on port %i due to no_tls"
                    )
                    continue

            bind_address = listener.pop("bind_address", None)
            bind_addresses = listener.setdefault("bind_addresses", [])

            # if bind_address was specified, add it to the list of addresses
            if bind_address:
                bind_addresses.append(bind_address)

            # if we still have an empty list of addresses, use the default list
            if not bind_addresses:
                if listener["type"] == "metrics":
                    # the metrics listener doesn't support IPv6
                    bind_addresses.append("0.0.0.0")
                else:
                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)

            self.listeners.append(listener)

        if not self.web_client_location:
            _warn_if_webclient_configured(self.listeners)

        self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))

        @attr.s
        class LimitRemoteRoomsConfig(object):
            enabled = attr.ib(
                validator=attr.validators.instance_of(bool), default=False
            )
            complexity = attr.ib(
                validator=attr.validators.instance_of(
                    (float, int)  # type: ignore[arg-type] # noqa
                ),
                default=1.0,
            )
            complexity_error = attr.ib(
                validator=attr.validators.instance_of(str),
                default=ROOM_COMPLEXITY_TOO_GREAT,
            )

        self.limit_remote_rooms = LimitRemoteRoomsConfig(
            **config.get("limit_remote_rooms", {})
        )

        bind_port = config.get("bind_port")
        if bind_port:
            if config.get("no_tls", False):
                raise ConfigError("no_tls is incompatible with bind_port")

            self.listeners = []
            bind_host = config.get("bind_host", "")
            gzip_responses = config.get("gzip_responses", True)

            self.listeners.append(
                {
                    "port": bind_port,
                    "bind_addresses": [bind_host],
                    "tls": True,
                    "type": "http",
                    "resources": [
                        {"names": ["client"], "compress": gzip_responses},
                        {"names": ["federation"], "compress": False},
                    ],
                }
            )

            unsecure_port = config.get("unsecure_port", bind_port - 400)
            if unsecure_port:
                self.listeners.append(
                    {
                        "port": unsecure_port,
                        "bind_addresses": [bind_host],
                        "tls": False,
                        "type": "http",
                        "resources": [
                            {"names": ["client"], "compress": gzip_responses},
                            {"names": ["federation"], "compress": False},
                        ],
                    }
                )

        manhole = config.get("manhole")
        if manhole:
            self.listeners.append(
                {
                    "port": manhole,
                    "bind_addresses": ["127.0.0.1"],
                    "type": "manhole",
                    "tls": False,
                }
            )

        metrics_port = config.get("metrics_port")
        if metrics_port:
            logger.warning(METRICS_PORT_WARNING)

            self.listeners.append(
                {
                    "port": metrics_port,
                    "bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
                    "tls": False,
                    "type": "http",
                    "resources": [{"names": ["metrics"], "compress": False}],
                }
            )

        _check_resource_config(self.listeners)

        self.cleanup_extremities_with_dummy_events = config.get(
            "cleanup_extremities_with_dummy_events", True
        )

    def has_tls_listener(self) -> bool:
        return any(l["tls"] for l in self.listeners)

    def generate_config_section(
        self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
    ):
        _, bind_port = parse_and_validate_server_name(server_name)
        if bind_port is not None:
            unsecure_port = bind_port - 400
        else:
            bind_port = 8448
            unsecure_port = 8008

        pid_file = os.path.join(data_dir_path, "homeserver.pid")

        # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
        # default config string
        default_room_version = DEFAULT_ROOM_VERSION
        secure_listeners = []
        unsecure_listeners = []
        private_addresses = ["::1", "127.0.0.1"]
        if listeners:
            for listener in listeners:
                if listener["tls"]:
                    secure_listeners.append(listener)
                else:
                    # If we don't want open ports we need to bind the listeners
                    # to some address other than 0.0.0.0. Here we chose to use
                    # localhost.
                    # If the addresses are already bound we won't overwrite them
                    # however.
                    if not open_private_ports:
                        listener.setdefault("bind_addresses", private_addresses)

                    unsecure_listeners.append(listener)

            secure_http_bindings = indent(
                yaml.dump(secure_listeners), " " * 10
            ).lstrip()

            unsecure_http_bindings = indent(
                yaml.dump(unsecure_listeners), " " * 10
            ).lstrip()

        if not unsecure_listeners:
            unsecure_http_bindings = (
                """- port: %(unsecure_port)s
            tls: false
            type: http
            x_forwarded: true"""
                % locals()
            )

            if not open_private_ports:
                unsecure_http_bindings += (
                    "\n            bind_addresses: ['::1', '127.0.0.1']"
                )

            unsecure_http_bindings += """

            resources:
              - names: [client, federation]
                compress: false"""

            if listeners:
                # comment out this block
                unsecure_http_bindings = "#" + re.sub(
                    "\n {10}",
                    lambda match: match.group(0) + "#",
                    unsecure_http_bindings,
                )

        if not secure_listeners:
            secure_http_bindings = (
                """#- port: %(bind_port)s
          #  type: http
          #  tls: true
          #  resources:
          #    - names: [client, federation]"""
                % locals()
            )

        return (
            """\
        ## Server ##

        # The domain name of the server, with optional explicit port.
        # This is used by remote servers to connect to this server,
        # e.g. matrix.org, localhost:8080, etc.
        # This is also the last part of your UserID.
        #
        server_name: "%(server_name)s"

        # When running as a daemon, the file to store the pid in
        #
        pid_file: %(pid_file)s

        # The path to the web client which will be served at /_matrix/client/
        # if 'webclient' is configured under the 'listeners' configuration.
        #
        #web_client_location: "/path/to/web/root"

        # The public-facing base URL that clients use to access this HS
        # (not including _matrix/...). This is the same URL a user would
        # enter into the 'custom HS URL' field on their client. If you
        # use synapse with a reverse proxy, this should be the URL to reach
        # synapse via the proxy.
        #
        #public_baseurl: https://example.com/

        # Set the soft limit on the number of file descriptors synapse can use
        # Zero is used to indicate synapse should set the soft limit to the
        # hard limit.
        #
        #soft_file_limit: 0

        # Set to false to disable presence tracking on this homeserver.
        #
        #use_presence: false

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API. Defaults to
        # 'false'. Note that profile data is also available via the federation
        # API, so this setting is of limited value if federation is enabled on
        # the server.
        #
        #require_auth_for_profile_requests: true

        # If set to 'false', requires authentication to access the server's public rooms
        # directory through the client API. Defaults to 'true'.
        #
        #allow_public_rooms_without_auth: false

        # If set to 'false', forbids any other homeserver to fetch the server's public
        # rooms directory via federation. Defaults to 'true'.
        #
        #allow_public_rooms_over_federation: false

        # The default room version for newly created rooms.
        #
        # Known room versions are listed here:
        # https://matrix.org/docs/spec/#complete-list-of-room-versions
        #
        # For example, for room version 1, default_room_version should be set
        # to "1".
        #
        #default_room_version: "%(default_room_version)s"

        # The GC threshold parameters to pass to `gc.set_threshold`, if defined
        #
        #gc_thresholds: [700, 10, 10]

        # Set the limit on the returned events in the timeline in the get
        # and sync operations. The default value is -1, means no upper limit.
        #
        #filter_timeline_limit: 5000

        # Whether room invites to users on this server should be blocked
        # (except those sent by local server admins). The default is False.
        #
        #block_non_admin_invites: true

        # Room searching
        #
        # If disabled, new messages will not be indexed for searching and users
        # will receive errors when searching for messages. Defaults to enabled.
        #
        #enable_search: false

        # Restrict federation to the following whitelist of domains.
        # N.B. we recommend also firewalling your federation listener to limit
        # inbound federation traffic as early as possible, rather than relying
        # purely on this application-layer restriction.  If not specified, the
        # default is to whitelist everything.
        #
        #federation_domain_whitelist:
        #  - lon.example.com
        #  - nyc.example.com
        #  - syd.example.com

        # Prevent federation requests from being sent to the following
        # blacklist IP address CIDR ranges. If this option is not specified, or
        # specified with an empty list, no ip range blacklist will be enforced.
        #
        # As of Synapse v1.4.0 this option also affects any outbound requests to identity
        # servers provided by user input.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        federation_ip_range_blacklist:
          - '127.0.0.0/8'
          - '10.0.0.0/8'
          - '172.16.0.0/12'
          - '192.168.0.0/16'
          - '100.64.0.0/10'
          - '169.254.0.0/16'
          - '::1/128'
          - 'fe80::/64'
          - 'fc00::/7'

        # List of ports that Synapse should listen on, their purpose and their
        # configuration.
        #
        # Options for each listener include:
        #
        #   port: the TCP port to bind to
        #
        #   bind_addresses: a list of local addresses to listen on. The default is
        #       'all local interfaces'.
        #
        #   type: the type of listener. Normally 'http', but other valid options are:
        #       'manhole' (see docs/manhole.md),
        #       'metrics' (see docs/metrics-howto.md),
        #       'replication' (see docs/workers.md).
        #
        #   tls: set to true to enable TLS for this listener. Will use the TLS
        #       key/cert specified in tls_private_key_path / tls_certificate_path.
        #
        #   x_forwarded: Only valid for an 'http' listener. Set to true to use the
        #       X-Forwarded-For header as the client IP. Useful when Synapse is
        #       behind a reverse-proxy.
        #
        #   resources: Only valid for an 'http' listener. A list of resources to host
        #       on this port. Options for each resource are:
        #
        #       names: a list of names of HTTP resources. See below for a list of
        #           valid resource names.
        #
        #       compress: set to true to enable HTTP comression for this resource.
        #
        #   additional_resources: Only valid for an 'http' listener. A map of
        #        additional endpoints which should be loaded via dynamic modules.
        #
        # Valid resource names are:
        #
        #   client: the client-server API (/_matrix/client), and the synapse admin
        #       API (/_synapse/admin). Also implies 'media' and 'static'.
        #
        #   consent: user consent forms (/_matrix/consent). See
        #       docs/consent_tracking.md.
        #
        #   federation: the server-server API (/_matrix/federation). Also implies
        #       'media', 'keys', 'openid'
        #
        #   keys: the key discovery API (/_matrix/keys).
        #
        #   media: the media API (/_matrix/media).
        #
        #   metrics: the metrics interface. See docs/metrics-howto.md.
        #
        #   openid: OpenID authentication.
        #
        #   replication: the HTTP replication API (/_synapse/replication). See
        #       docs/workers.md.
        #
        #   static: static resources under synapse/static (/_matrix/static). (Mostly
        #       useful for 'fallback authentication'.)
        #
        #   webclient: A web client. Requires web_client_location to be set.
        #
        listeners:
          # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
          #
          # Disabled by default. To enable it, uncomment the following. (Note that you
          # will also need to give Synapse a TLS key and certificate: see the TLS section
          # below.)
          #
          %(secure_http_bindings)s

          # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
          # that unwraps TLS.
          #
          # If you plan to use a reverse proxy, please see
          # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
          #
          %(unsecure_http_bindings)s

            # example additional_resources:
            #
            #additional_resources:
            #  "/_matrix/my/custom/endpoint":
            #    module: my_module.CustomRequestHandler
            #    config: {}

          # Turn on the twisted ssh manhole service on localhost on the given
          # port.
          #
          #- port: 9000
          #  bind_addresses: ['::1', '127.0.0.1']
          #  type: manhole


        ## Homeserver blocking ##

        # How to reach the server admin, used in ResourceLimitError
        #
        #admin_contact: 'mailto:[email protected]'

        # Global blocking
        #
        #hs_disabled: false
        #hs_disabled_message: 'Human readable reason for why the HS is blocked'

        # Monthly Active User Blocking
        #
        # Used in cases where the admin or server owner wants to limit to the
        # number of monthly active users.
        #
        # 'limit_usage_by_mau' disables/enables monthly active user blocking. When
        # anabled and a limit is reached the server returns a 'ResourceLimitError'
        # with error type Codes.RESOURCE_LIMIT_EXCEEDED
        #
        # 'max_mau_value' is the hard limit of monthly active users above which
        # the server will start blocking user actions.
        #
        # 'mau_trial_days' is a means to add a grace period for active users. It
        # means that users must be active for this number of days before they
        # can be considered active and guards against the case where lots of users
        # sign up in a short space of time never to return after their initial
        # session.
        #
        # 'mau_limit_alerting' is a means of limiting client side alerting
        # should the mau limit be reached. This is useful for small instances
        # where the admin has 5 mau seats (say) for 5 specific people and no
        # interest increasing the mau limit further. Defaults to True, which
        # means that alerting is enabled
        #
        #limit_usage_by_mau: false
        #max_mau_value: 50
        #mau_trial_days: 2
        #mau_limit_alerting: false

        # If enabled, the metrics for the number of monthly active users will
        # be populated, however no one will be limited. If limit_usage_by_mau
        # is true, this is implied to be true.
        #
        #mau_stats_only: false

        # Sometimes the server admin will want to ensure certain accounts are
        # never blocked by mau checking. These accounts are specified here.
        #
        #mau_limit_reserved_threepids:
        #  - medium: 'email'
        #    address: '*****@*****.**'

        # Used by phonehome stats to group together related servers.
        #server_context: context

        # Resource-constrained homeserver Settings
        #
        # If limit_remote_rooms.enabled is True, the room complexity will be
        # checked before a user joins a new remote room. If it is above
        # limit_remote_rooms.complexity, it will disallow joining or
        # instantly leave.
        #
        # limit_remote_rooms.complexity_error can be set to customise the text
        # displayed to the user when a room above the complexity threshold has
        # its join cancelled.
        #
        # Uncomment the below lines to enable:
        #limit_remote_rooms:
        #  enabled: true
        #  complexity: 1.0
        #  complexity_error: "This room is too complex."

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to 'true'.
        #
        #require_membership_for_aliases: false

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        # Defaults to 'true'.
        #
        #allow_per_room_profiles: false

        # How long to keep redacted events in unredacted form in the database. After
        # this period redacted events get replaced with their redacted form in the DB.
        #
        # Defaults to `7d`. Set to `null` to disable.
        #
        #redaction_retention_period: 28d

        # How long to track users' last seen time and IPs in the database.
        #
        # Defaults to `28d`. Set to `null` to disable clearing out of old rows.
        #
        #user_ips_max_age: 14d
        """
            % locals()
        )

    def read_arguments(self, args):
        if args.manhole is not None:
            self.manhole = args.manhole
        if args.daemonize is not None:
            self.daemonize = args.daemonize
        if args.print_pidfile is not None:
            self.print_pidfile = args.print_pidfile

    @staticmethod
    def add_arguments(parser):
        server_group = parser.add_argument_group("server")
        server_group.add_argument(
            "-D",
            "--daemonize",
            action="store_true",
            default=None,
            help="Daemonize the homeserver",
        )
        server_group.add_argument(
            "--print-pidfile",
            action="store_true",
            default=None,
            help="Print the path to the pidfile just" " before daemonizing",
        )
        server_group.add_argument(
            "--manhole",
            metavar="PORT",
            dest="manhole",
            type=int,
            help="Turn on the twisted telnet manhole" " service on the given port.",
        )
 def test_parse_alias_invalid(self):
     data = {'whatever': ['ServiceTags.ApiManagement.Invalid', '1.2.2.127', '1.2.2.128/25']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(['1.2.2.127/32', '1.2.2.128/25'])
     self.assertEqual(expected, actual)
Beispiel #32
0
    def test_client_ip_range_blacklist(self):
        """Ensure that Synapse does not try to connect to blacklisted IPs"""

        # Set up the ip_range blacklist
        self.hs.config.federation_ip_range_blacklist = IPSet(
            ["127.0.0.0/8", "fe80::/64"])
        self.reactor.lookups["internal"] = "127.0.0.1"
        self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337"
        self.reactor.lookups["fine"] = "10.20.30.40"
        cl = MatrixFederationHttpClient(self.hs, None)

        # Try making a GET request to a blacklisted IPv4 address
        # ------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.get_json("internal:8008", "foo/bar", timeout=10000))

        # Nothing happened yet
        self.assertNoResult(d)

        self.pump(1)

        # Check that it was unable to resolve the address
        clients = self.reactor.tcpClients
        self.assertEqual(len(clients), 0)

        f = self.failureResultOf(d)
        self.assertIsInstance(f.value, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception, DNSLookupError)

        # Try making a POST request to a blacklisted IPv6 address
        # -------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.post_json("internalv6:8008", "foo/bar", timeout=10000))

        # Nothing has happened yet
        self.assertNoResult(d)

        # Move the reactor forwards
        self.pump(1)

        # Check that it was unable to resolve the address
        clients = self.reactor.tcpClients
        self.assertEqual(len(clients), 0)

        # Check that it was due to a blacklisted DNS lookup
        f = self.failureResultOf(d, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception, DNSLookupError)

        # Try making a GET request to a non-blacklisted IPv4 address
        # ----------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.post_json("fine:8008", "foo/bar", timeout=10000))

        # Nothing has happened yet
        self.assertNoResult(d)

        # Move the reactor forwards
        self.pump(1)

        # Check that it was able to resolve the address
        clients = self.reactor.tcpClients
        self.assertNotEqual(len(clients), 0)

        # Connection will still fail as this IP address does not resolve to anything
        f = self.failureResultOf(d, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception,
                              ConnectingCancelledError)
 def test_query_default_allow(self):
     resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
     expected = IPSet(['0.0.0.0/0'])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #34
0
    def do_show_vlan(self, args):
        tokens = args.split()

        if len(tokens) == 1 or tokens[1] == "?":
            print "show vlan"
            print "\tfree - show first 10 free vlans"
            print "\tused - show used vlans"
            print "\t<vlan numbers> - show hosts configured for vlan and the routes for the vlan"
            return

        if tokens[1] in ["free", "used"]:
            if self.__do_show_vlan_free_used__(tokens):
                return

        elif not re.match(r'\d+', tokens[1]):
            print "need to specify an number for the vlan"
            return

        colo = None
        if len(tokens) == 2:
            vlans = self.ipm.get_vlans(tokens[1], vdomain=self.vc_domain)
            if len(vlans) == 0:
                print "No Vlans found"
                return
            elif len(vlans) > 1:
                print "More than 1 vlan found, use command show vlan NUMBER colo 3-LETTER-CODE"
                print "Vlan %s exists at the following sites" % tokens[1]
                for v in vlans:
                    m = re.match(self.vlan_regex, v['name'])
                    if m:
                        print m.group(1)
                    else:
                        print "%s INVALID VLAN NAME" % tokens[1]
                return
        elif len(tokens) == 3 and tokens[2] == "?":
            print "colo - Specify the colo for this vlan"
            return
        elif len(tokens) == 3 and tokens[2] != "colo":
            print "Unknown command"
            print "colo - Specify the colo for this vlan"
            return
        elif len(tokens) == 3 and tokens[2] == "colo":
            print "incomplete command"
            print "<3 letter code> - The 3 letter colo code for the vlan"
            return
        elif len(tokens) == 4 and tokens[3] == "?":
            print "<3 letter code> - The 3 letter colo code for the vlan"
            return
        elif len(tokens) == 4 and len(tokens[3]) != 3:
            print "Uknown 3 letter code"
            print "<3 letter code> - The 3 letter colo code for the vlan"
            return
        elif len(tokens) == 5 and tokens[4] == "?":
            print "<cr>"
            return
        else:
            colo = tokens[3]

        if colo:
            try:
                host_data = self.ipm.get_all_host_data_from_vlan(
                    tokens[1], "%s" % self.vc_domain, colo_code=colo)
            except Exception as e:
                print e
                return
        else:
            host_data = self.ipm.get_all_host_data_from_vlan(
                tokens[1], "%s" % self.vc_domain)
        print "-" * 20
        print "Hosts in vlan %s" % (tokens[1])
        print "-" * 20
        sorted_addrs = host_data['addresses'].keys()
        sorted_addrs.sort()
        for addr in sorted_addrs:
            print "Host %s ip %s Primary switch: %s Vlan: %s" % (
                host_data['addresses'][addr]['host'], addr,
                host_data['addresses'][addr]['switch'], tokens[1])
        print "-" * 20
        print "Free addresses for vlan %s - Limited to first 10" % (tokens[1])
        print "-" * 20
        used = IPSet(host_data['addresses'].keys())
        prefix = IPNetwork(host_data['prefix'])
        count = 0
        for ip in prefix:
            if ip != prefix.network and ip != prefix.broadcast and ip not in used and count < 10:
                print "%s" % ip
                count += 1

        print "-" * 20
        print "Routes for vlan %s" % (tokens[1])
        print "-" * 20
        print "%s" % self.route_vrf
        for route in self.ipm.get_vlan_routes(tokens[1],
                                              vrf=self.route_vrf,
                                              vc_domain=self.vc_domain,
                                              colo_code=colo):
            print "Route: %s Vlan: %s Route: %s via %s on switch %s" % (
                route['tag'], route['vlan'], route['prefix']['prefix'],
                route['gw'], route['switch'])
Beispiel #35
0
def main():
    db_ssl_ca_cert = os.getenv("SECRET_DB_CA_CERT")
    db_ssl_ca_file = os.getenv("DB_CA_FILE")

    if pem_is_valid(db_ssl_ca_cert) and db_ssl_ca_file:
        try:
            with open(db_ssl_ca_file, 'w') as f:
                f.write(db_ssl_ca_cert)
        except Exception as err:
            print >> sys.stderr, "Problem writing MySQL CA certficate file", err
            sys.exit(1)
    else:
        # Reset db_ssl_ca_file if we're missing one or more environment variables for SSL
        # to avoid adding an empty 'ssl_ca' option to the local.ini config.
        if not db_ssl_ca_file:
            print >> sys.stderr, "DB_CA_FILE env variable undefined, skipping MySQL SSL config."
        if not db_ssl_ca_cert:
            print >> sys.stderr, "SECRET_DB_CA_CERT env variable undefined, skipping MySQL SSL config."
            db_ssl_ca_file = None

    # Verify that we can parse the TRUSTED_IPS list
    trusted_ips = os.getenv("TRUSTED_IPS", default="127.0.0.1")
    trusted_ips = "".join(trusted_ips.split())  # remove whitespace
    trusted_list = trusted_ips.split(',')

    try:
        trusted_ip_range = IPSet(trusted_list)
    except Exception as e:
        print >> sys.stderr, trusted_list, e
        print >> sys.stderr, "Problem parsing TRUSTED_IPS environment variable"
        sys.exit(1)

    try:
        config = {
            "db_host":
            os.getenv("DB_HOST", default="localhost"),
            "db_user":
            os.getenv("DB_USER", default="vegadns"),
            "db_pass":
            os.getenv("SECRET_DB_PASS", default="secret"),
            "db_db":
            os.getenv("DB_DB", default="vegadns"),
            "db_ssl_ca":
            db_ssl_ca_file,
            "vegadns_generation":
            os.getenv("VEGADNS_GENERATION", default=""),
            "vegadns":
            os.getenv("VEGADNS",
                      default="http://127.0.0.1/1.0/export/tinydns"),
            "trusted_ips":
            trusted_ips,
            "ui_url":
            os.getenv("UI_URL", default="http://*****:*****@example.com"),
            "acl_labels":
            os.getenv("ACL_LABELS", default=""),
            "acl_emails":
            os.getenv("ACL_EMAILS", default=""),
            "enable_redis_notifications":
            os.getenv("ENABLE_REDIS_NOTIFICATIONS", default="false"),
            "redis_host":
            os.getenv("REDIS_HOST", default="127.0.0.1"),
            "redis_port":
            os.getenv("REDIS_PORT", default="6379"),
            "redis_channel":
            os.getenv("REDIS_CHANNEL", default="VEGADNS-CHANGES"),
            "enable_consul_notifications":
            os.getenv("ENABLE_CONSUL_NOTIFICATIONS", default="false"),
            "consul_host":
            os.getenv("CONSUL_HOST", default="127.0.0.1"),
            "consul_port":
            os.getenv("CONSUL_PORT", default="8500"),
            "consul_scheme":
            os.getenv("CONSUL_SCHEME", default="http"),
            "consul_verify_ssl":
            os.getenv("CONSUL_VERIFY_SSL", default=True),
            "consul_token":
            os.getenv("CONSUL_TOKEN", default=None),
            "consul_key":
            os.getenv("CONSUL_KEY", default="VEGADNS-CHANGES")
        }
    except Exception as err:
        print >> sys.stderr, "Problem reading environment", err
        sys.exit(1)

    # optionally use first argument as template, path is still
    # relative to this script
    template_file = "./local.ini.template"
    if len(sys.argv) > 1:
        template_file = sys.argv[1]

    try:
        with open(directory + "/" + template_file) as template:
            print pystache.render(template.read(), config)
    except Exception as err:
        print >> sys.stderr, "Problem rendering template", err
        sys.exit(1)
Beispiel #36
0
    def read_config(self, config, **kwargs):

        # Only enable the media repo if either the media repo is enabled or the
        # current worker app is the media repo.
        if (self.enable_media_repo is False and
                config.get("worker_app") != "synapse.app.media_repository"):
            self.can_load_media_repo = False
            return
        else:
            self.can_load_media_repo = True

        self.max_upload_size = self.parse_size(
            config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(
            config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(
            config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config.get("media_store_path", "media_store"))

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False)

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path
                },
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []  # type: List[tuple]

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend")

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config))

        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES))
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                check_requirements("url_preview")

            except DependencyException as e:
                raise ConfigError(e.message)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work")

            # netaddr is a dependency for url_preview
            from netaddr import IPSet

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"])

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ()))

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ())

            self.url_preview_accept_language = config.get(
                "url_preview_accept_language") or ["en"]
Beispiel #37
0
class ContentRepositoryConfig(Config):
    section = "media"

    def read_config(self, config, **kwargs):

        # Only enable the media repo if either the media repo is enabled or the
        # current worker app is the media repo.
        if (self.enable_media_repo is False and
                config.get("worker_app") != "synapse.app.media_repository"):
            self.can_load_media_repo = False
            return
        else:
            self.can_load_media_repo = True

        self.max_upload_size = self.parse_size(
            config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(
            config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(
            config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config.get("media_store_path", "media_store"))

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False)

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path
                },
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []  # type: List[tuple]

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend")

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config))

        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES))
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                check_requirements("url_preview")

            except DependencyException as e:
                raise ConfigError(e.message)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work")

            # netaddr is a dependency for url_preview
            from netaddr import IPSet

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"])

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ()))

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ())

            self.url_preview_accept_language = config.get(
                "url_preview_accept_language") or ["en"]

    def generate_config_section(self, data_dir_path, **kwargs):
        media_store = os.path.join(data_dir_path, "media_store")
        uploads_path = os.path.join(data_dir_path, "uploads")

        formatted_thumbnail_sizes = "".join(THUMBNAIL_SIZE_YAML % s
                                            for s in DEFAULT_THUMBNAIL_SIZES)
        # strip final NL
        formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]

        return (r"""
        ## Media Store ##

        # Enable the media store service in the Synapse master. Uncomment the
        # following if you are using a separate media store worker.
        #
        #enable_media_repo: false

        # Directory where uploaded images and attachments are stored.
        #
        media_store_path: "%(media_store)s"

        # Media storage providers allow media to be stored in different
        # locations.
        #
        #media_storage_providers:
        #  - module: file_system
        #    # Whether to store newly uploaded local files
        #    store_local: false
        #    # Whether to store newly downloaded remote files
        #    store_remote: false
        #    # Whether to wait for successful storage for local uploads
        #    store_synchronous: false
        #    config:
        #       directory: /mnt/some/other/directory

        # The largest allowed upload size in bytes
        #
        #max_upload_size: 10M

        # Maximum number of pixels that will be thumbnailed
        #
        #max_image_pixels: 32M

        # Whether to generate new thumbnails on the fly to precisely match
        # the resolution requested by the client. If true then whenever
        # a new resolution is requested by the client the server will
        # generate a new thumbnail. If false the server will pick a thumbnail
        # from a precalculated list.
        #
        #dynamic_thumbnails: false

        # List of thumbnails to precalculate when an image is uploaded.
        #
        #thumbnail_sizes:
%(formatted_thumbnail_sizes)s

        # Is the preview URL API enabled?
        #
        # 'false' by default: uncomment the following to enable it (and specify a
        # url_preview_ip_range_blacklist blacklist).
        #
        #url_preview_enabled: true

        # List of IP address CIDR ranges that the URL preview spider is denied
        # from accessing.  There are no defaults: you must explicitly
        # specify a list for URL previewing to work.  You should specify any
        # internal services in your network that you do not want synapse to try
        # to connect to, otherwise anyone in any Matrix room could cause your
        # synapse to issue arbitrary GET requests to your internal services,
        # causing serious security issues.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        # This must be specified if url_preview_enabled is set. It is recommended that
        # you uncomment the following list as a starting point.
        #
        #url_preview_ip_range_blacklist:
        #  - '127.0.0.0/8'
        #  - '10.0.0.0/8'
        #  - '172.16.0.0/12'
        #  - '192.168.0.0/16'
        #  - '100.64.0.0/10'
        #  - '169.254.0.0/16'
        #  - '::1/128'
        #  - 'fe80::/64'
        #  - 'fc00::/7'

        # List of IP address CIDR ranges that the URL preview spider is allowed
        # to access even if they are specified in url_preview_ip_range_blacklist.
        # This is useful for specifying exceptions to wide-ranging blacklisted
        # target IP ranges - e.g. for enabling URL previews for a specific private
        # website only visible in your network.
        #
        #url_preview_ip_range_whitelist:
        #   - '192.168.1.1'

        # Optional list of URL matches that the URL preview spider is
        # denied from accessing.  You should use url_preview_ip_range_blacklist
        # in preference to this, otherwise someone could define a public DNS
        # entry that points to a private IP address and circumvent the blacklist.
        # This is more useful if you know there is an entire shape of URL that
        # you know that will never want synapse to try to spider.
        #
        # Each list entry is a dictionary of url component attributes as returned
        # by urlparse.urlsplit as applied to the absolute form of the URL.  See
        # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
        # The values of the dictionary are treated as an filename match pattern
        # applied to that component of URLs, unless they start with a ^ in which
        # case they are treated as a regular expression match.  If all the
        # specified component matches for a given list item succeed, the URL is
        # blacklisted.
        #
        #url_preview_url_blacklist:
        #  # blacklist any URL with a username in its URI
        #  - username: '******'
        #
        #  # blacklist all *.google.com URLs
        #  - netloc: 'google.com'
        #  - netloc: '*.google.com'
        #
        #  # blacklist all plain HTTP URLs
        #  - scheme: 'http'
        #
        #  # blacklist http(s)://www.acme.com/foo
        #  - netloc: 'www.acme.com'
        #    path: '/foo'
        #
        #  # blacklist any URL with a literal IPv4 address
        #  - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'

        # The largest allowed URL preview spidering size in bytes
        #
        #max_spider_size: 10M

        # A list of values for the Accept-Language HTTP header used when
        # downloading webpages during URL preview generation. This allows
        # Synapse to specify the preferred languages that URL previews should
        # be in when communicating with remote servers.
        #
        # Each value is a IETF language tag; a 2-3 letter identifier for a
        # language, optionally followed by subtags separated by '-', specifying
        # a country or region variant.
        #
        # Multiple values can be provided, and a weight can be added to each by
        # using quality value syntax (;q=). '*' translates to any language.
        #
        # Defaults to "en".
        #
        # Example:
        #
        # url_preview_accept_language:
        #   - en-UK
        #   - en-US;q=0.9
        #   - fr;q=0.8
        #   - *;q=0.7
        #
        url_preview_accept_language:
        #   - en
        """ % locals())
Beispiel #38
0
def test_ipset_member_insertion_and_deletion():
    s1 = IPSet()
    s1.add('192.0.2.0')
    assert s1 == IPSet(['192.0.2.0/32'])

    s1.remove('192.0.2.0')
    assert s1 == IPSet([])

    s1.add(IPRange("10.0.0.0", "10.0.0.255"))
    assert s1 == IPSet(['10.0.0.0/24'])

    s1.remove(IPRange("10.0.0.128", "10.10.10.10"))
    assert s1 == IPSet(['10.0.0.0/25'])
 def test_parse_alias_and_blocks(self):
     data = {'whatever': ['ServiceTags.ApiManagement.WestUS', '1.2.2.127', '1.2.2.128/25']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(['13.64.39.16/32', '40.112.242.148/31', '40.112.243.240/28',
                       '1.2.2.127/32', '1.2.2.128/25'])
     self.assertEqual(expected, actual)
Beispiel #40
0
                       dest='timeout',
                       help='Seconds to wait before timeout, default is 2')
argparser.add_argument('-s',
                       '--shuffle',
                       action='store_true',
                       default=False,
                       dest='shuffle',
                       help='Shuffle the target list')
args = argparser.parse_args()

# Check if we are running in a pipe and read from STDIN
if not sys.stdin.isatty():
    args.targets = sys.stdin.readlines()

# Add target IPs/Networks to a netaddr-IPSet
targetSet = IPSet()
for t in args.targets:
    targetSet.add(t)

#output dir
check_output()

# Render IPSets to a list
targetlist = list()
for ip in targetSet:
    targetlist.append(str(ip))

# Check for shuffle argument
if args.shuffle:
    shuffle(targetlist)
 def test_empty(self):
     data = {'whatever': []}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet()
     self.assertEqual(expected, actual)
Beispiel #42
0
re_file_rv = re.compile('rib.(\d+).(\d\d\d\d).bz2')
re_file_rr = re.compile('bview.(\d+).(\d\d\d\d).gz')

re_path_rv = re.compile('.*/([a-z0-9\.-]+)/bgpdata/\d\d\d\d.\d\d/RIBS.*')
re_path_rr = re.compile('.*/(rrc\d\d)/\d\d\d\d.\d\d.*')

reserved_ipv4 = IPSet([
    '0.0.0.0/8',  # host on this network (RFC1122)
    '10.0.0.0/8',
    '172.16.0.0/12',
    '192.168.0.0/16',  # private address space (RFC1918)
    '100.64.0.0/10',  # shared address space (RFC6598)
    '127.0.0.0/8',  # loopback (RFC1122)
    '169.254.0.0/16',  # linklocal (RFC3927)
    '192.0.0.0/24',  # special purpose (RFC6890)
    '192.0.0.0/29',  # DS-lite (RFC6333)
    '192.0.2.0/24',
    '198.51.100.0/24',
    '203.0.113.0/24',  # test net 1-3 (RFC5737)
    '224.0.0.0/4',  # multicast address space
    '240.0.0.0/4',  # future use (RFC1122)
    '255.255.255.255/32'  # limited broadcast
])
'''
OUTPUT FORMAT:

timestamp|date ; input type (RIB|UPDATE) ; source (route-views.xyz| rrcXY) ; \
    #ipv4-prefixes/pfxlength (1..32) ; #ipv4 moas ; #ipv4 bogus \
    [; #ipv6-prefix/pfxlength ; #ipv6 moas ; #ipv6 bogus ]
 def test_query_regular_plus_partial_cloud(self):
     extra = ','.join(PORTAL_IPS[1:])
     resource = {'properties': {'ipRangeFilter': extra + ',10.0.0.0/16,8.8.8.8',
                                'isVirtualNetworkFilterEnabled': False}}
     expected = IPSet(['10.0.0.0/16', '8.8.8.8'] + PORTAL_IPS[1:])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #44
0
def test_ipset_ipv4_and_ipv4_separation():
    assert list(IPSet([IPAddress(1, 4), IPAddress(1, 6)]).iter_ipranges()) == [
        IPRange('0.0.0.1', '0.0.0.1'),
        IPRange('::1', '::1')
    ]
Beispiel #45
0
    def read_config(self, config):
        self.server_name = config["server_name"]
        self.server_context = config.get("server_context", None)

        try:
            parse_and_validate_server_name(self.server_name)
        except ValueError as e:
            raise ConfigError(str(e))

        self.pid_file = self.abspath(config.get("pid_file"))
        self.web_client_location = config.get("web_client_location", None)
        self.soft_file_limit = config.get("soft_file_limit", 0)
        self.daemonize = config.get("daemonize")
        self.print_pidfile = config.get("print_pidfile")
        self.user_agent_suffix = config.get("user_agent_suffix")
        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
        self.public_baseurl = config.get("public_baseurl")
        self.cpu_affinity = config.get("cpu_affinity")

        # Whether to send federation traffic out in this process. This only
        # applies to some federation traffic, and so shouldn't be used to
        # "disable" federation
        self.send_federation = config.get("send_federation", True)

        # Whether to enable user presence.
        self.use_presence = config.get("use_presence", True)

        # Whether to update the user directory or not. This should be set to
        # false only if we are updating the user directory in a worker
        self.update_user_directory = config.get("update_user_directory", True)

        # whether to enable the media repository endpoints. This should be set
        # to false if the media repository is running as a separate endpoint;
        # doing so ensures that we will not run cache cleanup jobs on the
        # master, potentially causing inconsistency.
        self.enable_media_repo = config.get("enable_media_repo", True)

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API.
        self.require_auth_for_profile_requests = config.get(
            "require_auth_for_profile_requests", False,
        )

        # If set to 'True', requires authentication to access the server's
        # public rooms directory through the client API, and forbids any other
        # homeserver to fetch it via federation.
        self.restrict_public_rooms_to_local_users = config.get(
            "restrict_public_rooms_to_local_users", False,
        )

        # whether to enable search. If disabled, new entries will not be inserted
        # into the search tables and they will not be indexed. Users will receive
        # errors when attempting to search for messages.
        self.enable_search = config.get("enable_search", True)

        self.filter_timeline_limit = config.get("filter_timeline_limit", -1)

        # Whether we should block invites sent to users on this server
        # (other than those sent by local server admins)
        self.block_non_admin_invites = config.get(
            "block_non_admin_invites", False,
        )

        # Whether to enable experimental MSC1849 (aka relations) support
        self.experimental_msc1849_support_enabled = config.get(
            "experimental_msc1849_support_enabled", False,
        )

        # Options to control access by tracking MAU
        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
        self.max_mau_value = 0
        if self.limit_usage_by_mau:
            self.max_mau_value = config.get(
                "max_mau_value", 0,
            )
        self.mau_stats_only = config.get("mau_stats_only", False)

        self.mau_limits_reserved_threepids = config.get(
            "mau_limit_reserved_threepids", []
        )

        self.mau_trial_days = config.get(
            "mau_trial_days", 0,
        )

        # Options to disable HS
        self.hs_disabled = config.get("hs_disabled", False)
        self.hs_disabled_message = config.get("hs_disabled_message", "")
        self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")

        # Admin uri to direct users at should their instance become blocked
        # due to resource constraints
        self.admin_contact = config.get("admin_contact", None)

        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None
        federation_domain_whitelist = config.get(
            "federation_domain_whitelist", None,
        )

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", [],
        )

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist
            )

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError(
                "Invalid range(s) provided in "
                "federation_ip_range_blacklist: %s" % e
            )

        if self.public_baseurl is not None:
            if self.public_baseurl[-1] != '/':
                self.public_baseurl += '/'
        self.start_pushers = config.get("start_pushers", True)

        # (undocumented) option for torturing the worker-mode replication a bit,
        # for testing. The value defines the number of milliseconds to pause before
        # sending out any replication updates.
        self.replication_torture_level = config.get("replication_torture_level")

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to True.
        self.require_membership_for_aliases = config.get(
            "require_membership_for_aliases", True,
        )

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)

        self.listeners = []
        for listener in config.get("listeners", []):
            if not isinstance(listener.get("port", None), int):
                raise ConfigError(
                    "Listener configuration is lacking a valid 'port' option"
                )

            if listener.setdefault("tls", False):
                # no_tls is not really supported any more, but let's grandfather it in
                # here.
                if config.get("no_tls", False):
                    logger.info(
                        "Ignoring TLS-enabled listener on port %i due to no_tls"
                    )
                    continue

            bind_address = listener.pop("bind_address", None)
            bind_addresses = listener.setdefault("bind_addresses", [])

            # if bind_address was specified, add it to the list of addresses
            if bind_address:
                bind_addresses.append(bind_address)

            # if we still have an empty list of addresses, use the default list
            if not bind_addresses:
                if listener['type'] == 'metrics':
                    # the metrics listener doesn't support IPv6
                    bind_addresses.append('0.0.0.0')
                else:
                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)

            self.listeners.append(listener)

        if not self.web_client_location:
            _warn_if_webclient_configured(self.listeners)

        self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))

        bind_port = config.get("bind_port")
        if bind_port:
            if config.get("no_tls", False):
                raise ConfigError("no_tls is incompatible with bind_port")

            self.listeners = []
            bind_host = config.get("bind_host", "")
            gzip_responses = config.get("gzip_responses", True)

            self.listeners.append({
                "port": bind_port,
                "bind_addresses": [bind_host],
                "tls": True,
                "type": "http",
                "resources": [
                    {
                        "names": ["client"],
                        "compress": gzip_responses,
                    },
                    {
                        "names": ["federation"],
                        "compress": False,
                    }
                ]
            })

            unsecure_port = config.get("unsecure_port", bind_port - 400)
            if unsecure_port:
                self.listeners.append({
                    "port": unsecure_port,
                    "bind_addresses": [bind_host],
                    "tls": False,
                    "type": "http",
                    "resources": [
                        {
                            "names": ["client"],
                            "compress": gzip_responses,
                        },
                        {
                            "names": ["federation"],
                            "compress": False,
                        }
                    ]
                })

        manhole = config.get("manhole")
        if manhole:
            self.listeners.append({
                "port": manhole,
                "bind_addresses": ["127.0.0.1"],
                "type": "manhole",
                "tls": False,
            })

        metrics_port = config.get("metrics_port")
        if metrics_port:
            logger.warn(
                ("The metrics_port configuration option is deprecated in Synapse 0.31 "
                 "in favour of a listener. Please see "
                 "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
                 " on how to configure the new listener."))

            self.listeners.append({
                "port": metrics_port,
                "bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
                "tls": False,
                "type": "http",
                "resources": [
                    {
                        "names": ["metrics"],
                        "compress": False,
                    },
                ]
            })

        _check_resource_config(self.listeners)
Beispiel #46
0
def parse_ip_set(ipaddrs):
    """Parse a string specification into an IPSet.

    This function takes a string representing a set of IP addresses and
    parses it into an IPSet object.  Acceptable formats for the string
    include:

        * "all":        all possible IPv4 and IPv6 addresses
        * "local":      all local addresses of the machine
        * "A.B.C.D"     a single IP address
        * "A.B.C.D/N"   a network address specification
        * "A.B.C.*"     a glob matching against all possible numbers
        * "A.B.C.D-E"   a glob matching against a range of numbers
        * a whitespace- or comma-separated string of the above

    """
    ipset = IPSet()
    ipaddrs = ipaddrs.lower().strip()
    if not ipaddrs:
        return ipset
    for ipspec in _COMMA_OR_WHITESPACE.split(ipaddrs):
        # The string "local" maps to all local addresses on the machine.
        if ipspec == "local":
            ipset.add(IPNetwork("127.0.0.0/8"))
            for addr in get_local_ip_addresses():
                ipset.add(addr)
        # The string "all" maps to app IPv4 and IPv6 addresses.
        elif ipspec == "all":
            ipset.add(IPNetwork("0.0.0.0/0"))
            ipset.add(IPNetwork("::"))
        # Strings containing a "/" are assumed to be network specs
        elif "/" in ipspec:
            ipset.add(IPNetwork(ipspec))
        # Strings containing a "*" or "-" are assumed to be glob patterns
        elif "*" in ipspec or "-" in ipspec:
            for cidr in IPGlob(ipspec).cidrs():
                ipset.add(cidr)
        # Anything else must be a single address
        else:
            ipset.add(IPAddress(ipspec))
    return ipset
 def test_query_firewall_disabled(self):
     resource = {'properties': {'ipRangeFilter': '', 'isVirtualNetworkFilterEnabled': False}}
     expected = IPSet(['0.0.0.0/0'])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #48
0
def test_ipset_adding_and_removing_members_ip_addresses_as_ints():
    s1 = IPSet(['10.0.0.0/25'])

    s1.add('10.0.0.0/24')
    assert s1 == IPSet(['10.0.0.0/24'])

    integer1 = int(IPAddress('10.0.0.1'))
    integer2 = int(IPAddress('fe80::'))
    integer3 = int(IPAddress('10.0.0.2'))

    s2 = IPSet([integer1, integer2])
    assert s2 == IPSet(['10.0.0.1/32', 'fe80::/128'])

    s2.add(integer3)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])

    s2.remove(integer2)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32'])

    s2.update([integer2])
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])
Beispiel #49
0
def get_ip_set(cidr):
    """ Returns a list of ip's in cidr for use in juju's no-proxy setting
    """
    ips = list(IPSet([cidr]))
    return ",".join(str(x) for x in ips)
Beispiel #50
0
    def test_https_request_via_proxy_with_blacklist(self):
        # The blacklist includes the configured proxy IP.
        agent = ProxyAgent(
            BlacklistingReactorWrapper(
                self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"])
            ),
            self.reactor,
            contextFactory=get_test_https_policy(),
            https_proxy=b"proxy.com",
        )

        self.reactor.lookups["proxy.com"] = "1.2.3.5"
        d = agent.request(b"GET", b"https://test.com/abc")

        # there should be a pending TCP connection
        clients = self.reactor.tcpClients
        self.assertEqual(len(clients), 1)
        (host, port, client_factory, _timeout, _bindAddress) = clients[0]
        self.assertEqual(host, "1.2.3.5")
        self.assertEqual(port, 1080)

        # make a test HTTP server, and wire up the client
        proxy_server = self._make_connection(
            client_factory, _get_test_protocol_factory()
        )

        # fish the transports back out so that we can do the old switcheroo
        s2c_transport = proxy_server.transport
        client_protocol = s2c_transport.other
        c2s_transport = client_protocol.transport

        # the FakeTransport is async, so we need to pump the reactor
        self.reactor.advance(0)

        # now there should be a pending CONNECT request
        self.assertEqual(len(proxy_server.requests), 1)

        request = proxy_server.requests[0]
        self.assertEqual(request.method, b"CONNECT")
        self.assertEqual(request.path, b"test.com:443")

        # tell the proxy server not to close the connection
        proxy_server.persistent = True

        # this just stops the http Request trying to do a chunked response
        # request.setHeader(b"Content-Length", b"0")
        request.finish()

        # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel
        ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory())
        ssl_protocol = ssl_factory.buildProtocol(None)
        http_server = ssl_protocol.wrappedProtocol

        ssl_protocol.makeConnection(
            FakeTransport(client_protocol, self.reactor, ssl_protocol)
        )
        c2s_transport.other = ssl_protocol

        self.reactor.advance(0)

        server_name = ssl_protocol._tlsConnection.get_servername()
        expected_sni = b"test.com"
        self.assertEqual(
            server_name,
            expected_sni,
            "Expected SNI %s but got %s" % (expected_sni, server_name),
        )

        # now there should be a pending request
        self.assertEqual(len(http_server.requests), 1)

        request = http_server.requests[0]
        self.assertEqual(request.method, b"GET")
        self.assertEqual(request.path, b"/abc")
        self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
        request.write(b"result")
        request.finish()

        self.reactor.advance(0)

        resp = self.successResultOf(d)
        body = self.successResultOf(treq.content(resp))
        self.assertEqual(body, b"result")
Beispiel #51
0
def lossy_supernet(ip_list, match_quality=30, ip_mismatch_limit=20):
    """
    Take a list of IP / subnets and return an optimized list.
    :param ip_list: list of IPv4 strings or IPv4/prefix strings
    :param match_quality: minimum % to be a good match
    :param ip_mismatch_limit: Do not match if IPs mismatched are > this amount (even if good match.)
    :return: dict with results.
    """

    # Default storage objects
    working_dict = {}
    output_list = []
    output_dict = {}

    input_ip_list_len = len(ip_list)

    # sanity data
    if not isinstance(ip_list, list):
        print("ERROR: ip_list is not a list (is '{0}').".format(type(ip_list)))
        return {
            'parsing_details': None,
            'output_list': [],
            'input_list': ip_list,
            'original_network_entries': input_ip_list_len,
            'final_network_entries': None,
            'total_mismatch_ip': None,
            'total_ip': None,
            'input_loss': ip_mismatch_limit,
            'input_quality': match_quality
        }
    if isinstance(match_quality, (float, int)):
        # match_quality is correct type. Is percent, convert int to float (%)
        if isinstance(match_quality, int):
            if match_quality >= 100:
                match_quality = 1.00
            elif match_quality <= 0:
                match_quality = 0.00
            else:
                # convert
                match_quality = float(match_quality * 0.01)
        else:
            # float
            if match_quality >= 1.00:
                match_quality = 1.00
            elif match_quality <= 0.00:
                match_quality = 0.00

    else:
        # invalid match quality, set to default
        match_quality = 0.50
    if not isinstance(ip_mismatch_limit, int):
        # invalid ip match limit, set to default.
        ip_mismatch_limit = 20

    # expanded_ip_list = []
    # for ip_entry in ip_list:
    #     # expand all /32s in IP ranges
    #     ip_net = IPNetwork(ip_entry)
    #     for ip_addr in ip_net:
    #         expanded_ip_list.append(text_type(ip_addr) + "/32")
    #         print(expanded_ip_list[-1])

    # Enumerate Expanded IP list
    for ip_addr in ip_list:
        original_ip_network = IPNetwork(ip_addr)
        # For every IP, determine supernet chain to root (0.0.0.0/0)
        try:
            ip_nets = original_ip_network.supernet()
        except AddrFormatError:
            # misformatted error. Continue.
            print("ERROR: '{0}' is not a correctly formatted ipv4 address or address/prefix. Skipping."
                  "".format(ip_addr))
            continue
        # /32 or last is important, add it to the list.
        ip_nets.append(original_ip_network)
        # Iterate through each supernet chain, building giant relational dictionary
        for ip_net in ip_nets:
            # Create a string for key value
            ip_net_key = text_type(ip_net.ip) + '/' + text_type(ip_net.prefixlen)
            # If key does not exist, add key and template values
            if not working_dict.get(ip_net_key):
                working_dict[ip_net_key] = {'matches': 0.00, 'iplist': [], 'dead': False}
            # Add IP to match list
            working_dict[ip_net_key]['iplist'].append(str(ip_addr))
            # Get matching addresses, update with new subnet.
            working_dict[ip_net_key]['matches'] = IPSet(working_dict[ip_net_key]['iplist']).size
            # Calculate match quality %
            working_dict[ip_net_key]['quality'] = float(working_dict[ip_net_key]['matches']) / float(ip_net.size)
            # Save size of supernet for later use.
            working_dict[ip_net_key]['size'] = ip_net.size

            # Add bitmask to value for parsing
            working_dict[ip_net_key]['bitmask'] = int(ip_net.prefixlen)

    # print json.dumps(working_dict, sort_keys=True,indent=4, separators=(',', ': '))
    parsing_dict = copy.deepcopy(working_dict)

    from cloudgenix import jd
    # jd(parsing_dict)
    # iterate supernets from largest (/0) to smallest (/32)
    for root_iter in range(0, 33):
        for key, value in parsing_dict.items():
            # If dict entry's bitmask is at the level we are parsing and not dead, act.
            if value['bitmask'] == root_iter and value['dead'] is not True:
                # Determine # of IPs that are not supposed to match
                mismatched_ips = value['size'] - value['matches']
                # If this supernet meets criteria, pop it to result and remove matching from parsing_dict
                if value['quality'] > match_quality and mismatched_ips <= ip_mismatch_limit:
                    # Add supernet to output
                    output_list.append(key)
                    output_dict[key] = {'quality': value['quality'], 'mismatch': mismatched_ips}
                    # put ip match list in remove queue
                    removal_queue_list = list(value['iplist'])
                    # delete (mark dead) this key
                    value['dead'] = True
                    # Reiterate parsing_dict, removing and recalculating IPs
                    for key2, value2 in parsing_dict.items():
                        # If an IP in removeList exists in supernet match list
                        for match in removal_queue_list:
                            if match in reversed(value2['iplist']):
                                # Remove the IP from the match list
                                value2['iplist'].remove(match)
                                # Decrement the match value
                                value2['matches'] = IPSet(value2['iplist']).size
                        # Recalculate quality after removing IPs
                        value2['quality'] = float(value2['matches']) / float(value2['size'])
                        # If all match IPs have been removed, mark key deleted (dead)
                        if value2['matches'] == 0 and working_dict[key2]['matches'] != 0:
                            # print '**'+key2+"** now has 0 IPs, originally "+str(working_dict[key2]['matches'])
                            value2['dead'] = True

    # print key + ' matches with a '+str(value['quality'] * 100) +
    #             '% match ('+str(value['matches'])+'/'+str(value['size'])+')'
    # print json.dumps(parsing_dict, sort_keys=True,indent=4, separators=(',', ': '))
    # print output_list
    mismatch_total = 0
    for key, value in output_dict.items():
        mismatch_total = mismatch_total + value['mismatch']

    return {
        'parsing_details': output_dict,
        'output_list': output_list,
        'input_list': ip_list,
        'original_network_entries': input_ip_list_len,
        'final_network_entries': len(output_dict.keys()),
        'total_mismatch_ip': mismatch_total,
        'total_ip': len(ip_list),
        'input_loss': ip_mismatch_limit,
        'input_quality': match_quality
    }
Beispiel #52
0
def test_ipset_pickling():
    ip_data = IPSet(['10.0.0.0/16', 'fe80::/64'])
    buf = pickle.dumps(ip_data)
    ip_data_unpickled = pickle.loads(buf)
    assert ip_data == ip_data_unpickled
Beispiel #53
0
def test_ipset_operations_with_combined_ipv4_and_ipv6():
    s1 = IPSet(['192.0.2.0', '::192.0.2.0', '192.0.2.2', '::192.0.2.2'])
    s2 = IPSet(['192.0.2.2', '::192.0.2.2', '192.0.2.4', '::192.0.2.4'])
    s3 = IPSet(['0.0.0.1', '10.0.0.64/30', '255.255.255.1'])
    s4 = IPSet(['10.0.0.64', '10.0.0.66'])
    s4b = IPSet(['10.0.0.64', '10.0.0.66', '111.111.111.111'])
    s5 = IPSet(['10.0.0.65', '10.0.0.67'])
    s6 = IPSet(['2405:8100::/32'])

    assert bool(s6)
    assert not bool(IPSet())

    #   set intersection
    assert s2 & s1 == IPSet(['192.0.2.2/32', '::192.0.2.2/128'])
    assert s3 & s4 == IPSet(['10.0.0.64/32', '10.0.0.66/32'])
    assert s4 & s3 == IPSet(['10.0.0.64/32', '10.0.0.66/32'])
    assert s3 & s5 == IPSet(['10.0.0.65/32', '10.0.0.67/32'])
    assert s5 & s3 == IPSet(['10.0.0.65/32', '10.0.0.67/32'])

    #   set difference
    assert s3 - s4 == IPSet(
        ['0.0.0.1/32', '10.0.0.65/32', '10.0.0.67/32', '255.255.255.1/32'])
    assert s4 - s3 == IPSet([])
    assert s3 - s4b == IPSet(
        ['0.0.0.1/32', '10.0.0.65/32', '10.0.0.67/32', '255.255.255.1/32'])
    assert s3 - s5 == IPSet(
        ['0.0.0.1/32', '10.0.0.64/32', '10.0.0.66/32', '255.255.255.1/32'])
    assert s5 - s3 == IPSet([])

    #   set symmetric difference
    assert s2 ^ s1 == IPSet(
        ['192.0.2.0/32', '192.0.2.4/32', '::192.0.2.0/128', '::192.0.2.4/128'])
    assert IPSet([]) ^ IPSet([]) == IPSet([])
    assert IPSet(['0.0.0.1/32']) ^ IPSet([]) == IPSet(['0.0.0.1/32'])
    assert IPSet(['0.0.0.1/32']) ^ IPSet(['0.0.0.1/32']) == IPSet([])
    assert s3 ^ s4 == IPSet(
        ['0.0.0.1/32', '10.0.0.65/32', '10.0.0.67/32', '255.255.255.1/32'])
    assert s4 ^ s3 == IPSet(
        ['0.0.0.1/32', '10.0.0.65/32', '10.0.0.67/32', '255.255.255.1/32'])
    assert s3 ^ s4b == IPSet([
        '0.0.0.1/32', '10.0.0.65/32', '10.0.0.67/32', '111.111.111.111/32',
        '255.255.255.1/32'
    ])
    assert s3 ^ s5 == IPSet(
        ['0.0.0.1/32', '10.0.0.64/32', '10.0.0.66/32', '255.255.255.1/32'])
    assert s5 ^ s3 == IPSet(
        ['0.0.0.1/32', '10.0.0.64/32', '10.0.0.66/32', '255.255.255.1/32'])
 def test_parse_multi_net(self):
     data = {'whatever': ['1.2.2.127/32', '1.2.2.128/25']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('1.2.2.127', '1.2.2.127')) | \
         IPSet(IPRange('1.2.2.128', '1.2.2.255'))
     self.assertEqual(expected, actual)
def scan_fn_ip():
    country_code = {}
    for line in open('input/country_code', 'r'):
        code, name = line.split(" ")
        country_code[code] = name.strip().decode("utf-8")
        logger.info(code + ' ' + country_code[code])

    rtree = ipRadixDB()
    ip_area_list = ["input/delegated-arin-latest", "input/delegated-ripencc-latest", "input/delegated-lacnic-latest", "input/delegated-afrinic-latest", "input/delegated-apnic-latest"]
    dft = defaultdict(list)
    availableIPs = []
    for f in ip_area_list:
        seed_file = open(f,'r')
        for l in seed_file.readlines():
            params = l.split('|')
            if len(params) >= 4 and params[2] == "ipv4" and params[3] != "*" and params[1] != "CN":
                startIP = params[3]
                endIP = ip_integer_to_string(ip_integer_from_string(startIP) + int(params[4]) - 1)
                logger.info(startIP + ' ' + endIP + ' ' + params[4])
                iprange = IPRange(startIP, endIP)
                if params[1] == '':
                    availableIPs += map(str, iprange.cidrs())
                else:
                    dft[params[1]] += map(str, iprange.cidrs())
    for key in dft:
        prefix = dft[key][-1]
        network,masklen = prefix.split('/')
        masklen = int(masklen)
        ip = generate_random_ip(network,masklen)
        ipset = IPSet(dft[key])
        for prefix in ipset.iter_cidrs():
            network,masklen = str(prefix).split('/')
            masklen = int(masklen)
            rtree.addPrefix(network,masklen)
            data = rtree.rnode.data
            country = country_code[key]
            logger.info(str(prefix) + ' ' + country)
            data['country'] = country #jsonData.get('country','')
            data['ip'] = ip
            data['ip_amount'] = prefix.size
            data['province'] = ''
            data['city'] = ''
            data['isp'] = ''
    for prefix in availableIPs:
        network,masklen = prefix.split("/")
        masklen = int(masklen)
        ip = generate_random_ip(network,masklen)
        jsonData = None;
        while jsonData == None:
            try:
                jsonData = query_ip(ip)
            except Exception, e:
                logger.error(e)
                time.sleep(0.5)
        rtree.addPrefix(network,masklen)
        data = rtree.rnode.data
        data['country'] = jsonData.get('country','')
        data['ip'] = ip
        data['ip_amount'] = IPNetwork(prefix).size
        data['province'] = ''
        data['city'] = ''
        data['isp'] = ''
        logger.info(prefix + ' ' + data['country'])
 def test_parse_spaces(self):
     data = {'whatever': ['0.0.0.0 - 10.10.10.10', '10.20.20.0 / 24']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('0.0.0.0', '10.10.10.10')) | \
         IPSet(IPRange('10.20.20.0', '10.20.20.255'))
     self.assertEqual(expected, actual)
#! /usr/bin/env python
# Calculates the disjunction of two sets of IP ranges
from sys import argv
from netaddr import IPSet

if len(argv) != 3:
    print('Usage: {0} include.txt exclude.txt'.format(argv[0]))
    exit()

net = IPSet()

with open(argv[1], 'r') as incfile:
    for line in incfile:
        net = net | IPSet([line])

with open(argv[2], 'r') as exfile:
    for line in exfile:
        net.remove(line)

for cidr in net.iter_cidrs():
    print(cidr)
 def test_parse_single_ip(self):
     data = {'whatever': ['1.2.2.127']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('1.2.2.127', '1.2.2.127'))
     self.assertEqual(expected, actual)
Beispiel #59
0
    def _query_rules(self, resource):
        rules = IPSet()
        for r in resource['rules']:
            rules.add(r)

        return rules
Beispiel #60
0
def test_ipset_comparison_with_int_is_invalid():
    s1 = IPSet(['10.0.0.1'])
    assert not s1 == 42
    s1 != 42