Beispiel #1
0
 def test_parse_spaces(self):
     data = {'whatever': ['0.0.0.0 - 10.10.10.10', '10.20.20.0 / 24']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('0.0.0.0', '10.10.10.10')) | \
         IPSet(IPRange('10.20.20.0', '10.20.20.255'))
     self.assertEqual(expected, actual)
Beispiel #2
0
import argparse
from netaddr import IPAddress, IPNetwork, IPSet, IPRange


def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('--range', nargs=1)
    parser.add_argument('--exclude', nargs='*')
    args = parser.parse_args()
    return args.range, args.exclude


def list_scan(range, exclude):
    for ip in range:
        if ip in exclude:
            range.remove(ip)
    return range


if __name__ == '__main__':
    r, e = parse_arguments()
    range = IPSet(r)
    exclude = IPSet(e)
    print(range)
    print(exclude)
    final_list = list_scan(range, exclude)
    for ip in final_list:
        print(ip)
Beispiel #3
0
        'NAME': 'nsupdate.sqlite',               # Or path to database file if using sqlite3.
        # The following settings are not used with sqlite3:
        'USER': '',
        'PASSWORD': '',
        'HOST': '',             # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
        'PORT': ''              # Set to empty string for default.
    }
}

# these useragents are unacceptable for /nic/update service
BAD_AGENTS = set([])  # list can have str elements

# these IPAdresses and/or IPNetworks are unacceptable for /nic/update service
# like e.g. IPs of servers related to illegal activities
from netaddr import IPSet, IPAddress, IPNetwork
BAD_IPS_HOST = IPSet([])  # inner list can have IPAddress and IPNetwork elements

# when encountering these hostnames (fqdn), block them early/silently from
# api usage. avoid any database access, so if someone tries to update
# every 5s, the database won't be locked all the time and we can at least
# delete the host from django admin.
BAD_HOSTS = set([])

# nameservers used e.g. for MX lookups in the registration email validation.
# google / cloudflare DNS IPs are only given as example / fallback -
# please configure your own nameservers in your local settings file.
NAMESERVERS = ['8.8.8.8', '1.1.1.1', ]

# registration email validation: disallow specific email domains,
# e.g. domains that have a non-working mx / that are frequently abused.
# we use a multiline string here with one regex per line (used with re.search).
 def test_query_default_allow(self):
     resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
     expected = IPSet(['0.0.0.0/0'])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #5
0
 def test_query_empty_rules(self):
     rules = []
     expected = IPSet()
     self.assertEqual(expected, self._get_filter(rules)._query_rules(self.resource))
 def test_close_one(self, database, ip_ticket_manager2):
     assert database.tickets.find({"open": True}).count() == 2
     ip_ticket_manager2.ips = IPSet([IPS[0]])
     ip_ticket_manager2.close_tickets()
     assert database.tickets.find({"open": True}).count() == 1
     assert database.tickets.find({"open": False}).count() == 1
Beispiel #7
0
def _load_appservice(hostname, as_info, config_filename):
    required_string_fields = ["id", "as_token", "hs_token", "sender_localpart"]
    for field in required_string_fields:
        if not isinstance(as_info.get(field), str):
            raise KeyError("Required string field: '%s' (%s)" %
                           (field, config_filename))

    # 'url' must either be a string or explicitly null, not missing
    # to avoid accidentally turning off push for ASes.
    if not isinstance(as_info.get("url"), str) and as_info.get("url",
                                                               "") is not None:
        raise KeyError("Required string field or explicit null: 'url' (%s)" %
                       (config_filename, ))

    localpart = as_info["sender_localpart"]
    if urlparse.quote(localpart) != localpart:
        raise ValueError(
            "sender_localpart needs characters which are not URL encoded.")
    user = UserID(localpart, hostname)
    user_id = user.to_string()

    # Rate limiting for users of this AS is on by default (excludes sender)
    rate_limited = True
    if isinstance(as_info.get("rate_limited"), bool):
        rate_limited = as_info.get("rate_limited")

    # namespace checks
    if not isinstance(as_info.get("namespaces"), dict):
        raise KeyError("Requires 'namespaces' object.")
    for ns in ApplicationService.NS_LIST:
        # specific namespaces are optional
        if ns in as_info["namespaces"]:
            # expect a list of dicts with exclusive and regex keys
            for regex_obj in as_info["namespaces"][ns]:
                if not isinstance(regex_obj, dict):
                    raise ValueError(
                        "Expected namespace entry in %s to be an object, but got %s",
                        ns,
                        regex_obj,
                    )
                if not isinstance(regex_obj.get("regex"), str):
                    raise ValueError("Missing/bad type 'regex' key in %s",
                                     regex_obj)
                if not isinstance(regex_obj.get("exclusive"), bool):
                    raise ValueError("Missing/bad type 'exclusive' key in %s",
                                     regex_obj)
    # protocols check
    protocols = as_info.get("protocols")
    if protocols:
        # Because strings are lists in python
        if isinstance(protocols, str) or not isinstance(protocols, list):
            raise KeyError("Optional 'protocols' must be a list if present.")
        for p in protocols:
            if not isinstance(p, str):
                raise KeyError("Bad value for 'protocols' item")

    if as_info["url"] is None:
        logger.info(
            "(%s) Explicitly empty 'url' provided. This application service"
            " will not receive events or queries.",
            config_filename,
        )

    ip_range_whitelist = None
    if as_info.get("ip_range_whitelist"):
        ip_range_whitelist = IPSet(as_info.get("ip_range_whitelist"))

    return ApplicationService(
        token=as_info["as_token"],
        hostname=hostname,
        url=as_info["url"],
        namespaces=as_info["namespaces"],
        hs_token=as_info["hs_token"],
        sender=user_id,
        id=as_info["id"],
        protocols=protocols,
        rate_limited=rate_limited,
        ip_range_whitelist=ip_range_whitelist,
    )
Beispiel #8
0
    def _query_rules(self, resource):
        rules = IPSet()
        for r in resource['rules']:
            rules.add(r)

        return rules
Beispiel #9
0
def main():
    '''
    Our main application
    '''

    parser = op("usage ipblisted.py --ip [ip]")
    parser.add_option('--proxy',
                      action="store",
                      dest="proxy",
                      help="Useful for when behind a proxy")
    parser.add_option('--proxy_user', action="store", dest="proxy_user")
    parser.add_option('--proxy_pass', action="store", dest="proxy_pass")
    parser.add_option('--good',
                      default=False,
                      action="store_true",
                      dest="show_good",
                      help="Displays lists that the IP did NOT show up on.")
    parser.add_option('--skip-dnsbl',
                      default=False,
                      action="store_true",
                      dest="skip_dnsbl",
                      help="Skips the checking DNS Blacklists")
    parser.add_option('--skip-bl',
                      default=False,
                      action="store_true",
                      dest="skip_bl",
                      help="Skips the checking of text based blacklists")
    parser.add_option(
        '--no-cache',
        default=False,
        action="store_true",
        dest="no_cache",
        help="This will prevent caching of text based blacklists")
    parser.add_option('--clear-cache',
                      default=False,
                      action="store_true",
                      dest="clear_cache",
                      help="This will clear the existing cache")
    parser.add_option(
        '--cache-timeout',
        default=60 * 60 * 12,
        action="store",
        dest="cache_timeout",
        help=
        "Number of seconds before cache results are to expire (Default: 12 hours)"
    )
    parser.add_option('--threads',
                      default=5,
                      action="store",
                      dest="threads",
                      help="Sets the number of feed search threads")
    parser.add_option('--infile',
                      default=None,
                      action="store",
                      dest="infile",
                      help="A newline separated list of IP addresses")
    parser.add_option('--ip', action="store", dest="ip")
    parser.add_option(
        '-w',
        '--wan',
        action="store_true",
        dest="wan",
        default=False,
        help="Will add your WAN ip to the list of IP addresses being checked.")
    parser.add_option('-f',
                      '--format',
                      action="store",
                      dest="format",
                      help="Set the output format for an outfile",
                      default="csv")
    parser.add_option('-o',
                      '--outfile',
                      action="store",
                      dest="outfile",
                      help="Where to write the results",
                      default=None)
    (options, args) = parser.parse_args()

    if options.format:
        allowed_formats = ['csv', 'xls', 'xlsx', 'txt']
        if not options.format in allowed_formats:
            cprint(
                "[!] Invalid format \"{}\".  Please select a valid format {}".
                format(options.format, ', '.join(allowed_formats)), RED)
            sys.exit(1)

    if options.outfile:
        print("[*] Results will be saved to {} in {} format".format(
            options.outfile, options.format))

    # Check if the user supplied an IP address or IP block
    if options.ip is None and options.infile is None and options.wan is False:
        print(
            "[!] You must supply an IP address, the WAN flag or a file containing IP addresses."
        )
        sys.exit(1)

    # Set our list of IPs to an empty list
    ips = []

    # Load up the IP in the --ip flag
    if options.ip:
        if '\\' in options.ip or '/' in options.ip:
            cprint(
                "[!] Detected CIDR notation, adding all IP addresses in this range",
                BLUE)
            for ip in IPSet([options.ip]):
                ips += [str(ip)]
        elif len(options.ip.split(',')) > 0:
            ips += [ip for ip in options.ip.split(',')
                    if ip != '']  # Handles when user does ,%20
        else:
            ips += [options.ip]

    # If the user supplied a file load these as well
    if options.infile:
        ips += [
            ip for ip in file(options.infile).read().split('\n') if ip != ''
        ]

    if options.wan:
        ip = wan_ip()
        if ip:
            ips += [ip]
        else:
            cprint(
                "[!] There was an issue trying to gather the WAN IP address.",
                RED)

    # Check if the user set their credentials when using a proxy
    if options.proxy:
        if options.proxy_user is None or options.proxy_pass is None:
            cprint(
                "[!] Warning, no proxy credentials supplied.  Authenticated proxies may not work.",
                BLUE)
        else:
            options.proxy_pass = urllib.quote(options.proxy_pass)

    # Initialize a queue for the feeds to go in
    fq = Queue()

    # Load in all the feeds from the feed configuration file
    feeds = load_feeds({
        "skip_bl": options.skip_bl,
        "skip_dnsbl": options.skip_dnsbl
    })

    # Establish the requests cache
    if not options.no_cache:
        requests_cache.install_cache('ipblisted',
                                     expire_after=int(options.cache_timeout))

        # If the user wants to manually clear the cache, do it now
        if options.clear_cache:
            requests_cache.clear()

    # If there are no feeds set, just exit the program
    if len(feeds) == 0:
        cprint(
            "[!] No feeds were defined, please define them in feeds.json or don't skip them all.",
            RED)
        sys.exit(1)

    # Final check to make sure we actually have a list of IP addresses to check
    if len(ips) == 0:
        cprint(
            "[!] No IP addresses were listed to check.  Please check your syntax and try again.",
            RED)

    feed_results = []

    # Loop through each IP and find it
    print("[*] Checking {} IP addresses against {} lists".format(
        len(ips), len(feeds)))
    for ip in ips:

        print("[*] Searching Blacklist feeds for IP {ip}".format(ip=ip))

        # Build the feed requests queue
        oq = Queue()

        # Create a queue of all the feeds we want to check
        [fq.put(f) for f in feeds]
        qsize = fq.qsize()

        # Start up our threads and start checking the feeds
        threads = [
            FeedThread(ip, options, fq, oq) for i in range(0, options.threads)
        ]
        [t.start() for t in threads]
        [t.join() for t in threads]

        # Set the number of lists we have found to 0
        find_count = 0

        # Go through each feed and see if we find the IP or block
        results = [r for r in oq.queue]

        if options.outfile:
            convert_results(results, ip, options.outfile)

        # Print out if the IP was found in any of the feeds
        for result in results:

            output = "[*] {name}: {found}".format(**result)

            if result["found"] == "Found":
                find_count += 1
                cprint(output, RED)
                continue

            if options.show_good:
                cprint(output)

        if find_count == 0:
            cprint("[*] Not found on any defined lists.", GREEN)
        else:
            cprint("[*] Found on {}/{} lists.".format(find_count, qsize), RED)
        print("[-]")
Beispiel #10
0
 def blank_account(cls, env, address, initial_nonce=0):
     env.db.put(BLANK_HASH, b'')
     balance = Balance(IPSet())
     o = cls(initial_nonce, object_to_bin(balance), env, address)
     o.existent_at_start = False
     return o
Beispiel #11
0
def main():
    db_ssl_ca_cert = os.getenv("SECRET_DB_CA_CERT")
    db_ssl_ca_file = os.getenv("DB_CA_FILE")

    if pem_is_valid(db_ssl_ca_cert) and db_ssl_ca_file:
        try:
            with open(db_ssl_ca_file, 'w') as f:
                f.write(db_ssl_ca_cert)
        except Exception as err:
            print >> sys.stderr, "Problem writing MySQL CA certficate file", err
            sys.exit(1)
    else:
        # Reset db_ssl_ca_file if we're missing one or more environment variables for SSL
        # to avoid adding an empty 'ssl_ca' option to the local.ini config.
        if not db_ssl_ca_file:
            print >> sys.stderr, "DB_CA_FILE env variable undefined, skipping MySQL SSL config."
        if not db_ssl_ca_cert:
            print >> sys.stderr, "SECRET_DB_CA_CERT env variable undefined, skipping MySQL SSL config."
            db_ssl_ca_file = None

    # Verify that we can parse the TRUSTED_IPS list
    trusted_ips = os.getenv("TRUSTED_IPS", default="127.0.0.1")
    trusted_ips = "".join(trusted_ips.split())  # remove whitespace
    trusted_list = trusted_ips.split(',')

    try:
        trusted_ip_range = IPSet(trusted_list)
    except Exception as e:
        print >> sys.stderr, trusted_list, e
        print >> sys.stderr, "Problem parsing TRUSTED_IPS environment variable"
        sys.exit(1)

    try:
        config = {
            "db_host":
            os.getenv("DB_HOST", default="localhost"),
            "db_user":
            os.getenv("DB_USER", default="vegadns"),
            "db_pass":
            os.getenv("SECRET_DB_PASS", default="secret"),
            "db_db":
            os.getenv("DB_DB", default="vegadns"),
            "db_ssl_ca":
            db_ssl_ca_file,
            "vegadns_generation":
            os.getenv("VEGADNS_GENERATION", default=""),
            "vegadns":
            os.getenv("VEGADNS",
                      default="http://127.0.0.1/1.0/export/tinydns"),
            "trusted_ips":
            trusted_ips,
            "ui_url":
            os.getenv("UI_URL", default="http://*****:*****@example.com"),
        }
    except Exception as err:
        print >> sys.stderr, "Problem reading environment", err
        sys.exit(1)

    # optionally use first argument as template, path is still
    # relative to this script
    template_file = "./local.ini.template"
    if len(sys.argv) > 1:
        template_file = sys.argv[1]

    try:
        with open(directory + "/" + template_file) as template:
            print pystache.render(template.read(), config)
    except Exception as err:
        print >> sys.stderr, "Problem rendering template", err
        sys.exit(1)
Beispiel #12
0
 def test_empty(self):
     data = {'whatever': []}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet()
     self.assertEqual(expected, actual)
Beispiel #13
0
 def test_parse_alias(self):
     data = {'whatever': ['ServiceTags.ApiManagement.WestUS']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(
         ['13.64.39.16/32', '40.112.242.148/31', '40.112.243.240/28'])
     self.assertEqual(expected, actual)
Beispiel #14
0
 def test_parse_single_ip(self):
     data = {'whatever': ['1.2.2.127']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('1.2.2.127', '1.2.2.127'))
     self.assertEqual(expected, actual)
 def test_close_none_2(self, database, ip_ticket_manager2):
     assert database.tickets.find({"open": True}).count() == 2
     ip_ticket_manager2.ips = IPSet(IPS[2:])
     ip_ticket_manager2.close_tickets()
     assert database.tickets.find({"open": True}).count() == 2
Beispiel #16
0
    def print_org_format(self, cvss_min='4.0', cvss_max='10.0'):
        """
        Print to standard output extracted information in '.org' format (Emacs)
        """

        # Print reports parsed
        print "* Nessus files parsed"
        for report in self._xml_source:
            print "\t%s" % report

        # Print scan's information
        print "* Parsing info"
        print "\tResults filtered by: %s" % cvss_min
        print "\tTotal targets analized: %s" % len(self._results.keys())

        # Print targets
        print "* Targets"
        for host in IPSet(self._results.keys()):
            print "\t%s" % str(host)

        print "* Results"
        for host in self._results.keys():
            print "** %s" % host
            # Print specific system's information
            print "\tScan started at: %s" % self._results[host][0]['scan_start']
            print "\tScan stopped at: %s" % self._results[host][0]['scan_stop']
            hostname = self._results[host][0]['hostname']
            if hostname is not '':
                print "\tHostname: %s" % hostname
            netbios = self._results[host][0]['netbios_name']
            if netbios is not '':
                print "\tNetbios Name: %s" % netbios
            os = self._results[host][0]['os']
            if os is not '':
                print "\tOperating System: %s" % os
            mac = self._results[host][0]['mac_address']
            if mac is not '':
                print "\tMAC: %s" % mac

            # Sort vulnerabilities by CVSS score
            for vuln in sorted(self._results[host][1:],
                               key=lambda cvss: float(cvss['cvss_base_score']),
                               reverse=True):
                cvss = vuln['cvss_base_score']
                if cvss is not "":
                    # Apply CVSS filter
                    if float(cvss) >= float(cvss_min) and float(cvss) <= float(
                            cvss_max):
                        # CVSS - Plugin name - Plugin ID
                        print "*** TODO [CVSS %04s][%s] %s [ID: %s]" % (
                            cvss, vuln['service_name'], vuln['plugin_name'],
                            vuln['plugin_id'])
                        # Port , Protocol
                        print "\tPort: %s/%s" % (vuln['port'],
                                                 vuln['protocol'])

                        # Service name
                        # service = vuln['service_name']
                        # if service is not '':
                        #     print "\tService: %s" % service

                        # Description
                        # print "\tDescription: %s" % vuln['description']

                        # Public exploits available
                        exploit = vuln['exploit_available']
                        metasploit = vuln['metasploit']
                        if exploit is 'true':
                            print "\tExploit available!"
                        if metasploit is 'true':
                            print "\tMetasploit module available!"

                        # CVSS Vector
                        cvss_vector = vuln['cvss_vector']
                        if cvss_vector is not '':
                            print "\tCVSS Vector %s" % cvss_vector.split(
                                "#")[1]

                        # CVE
                        cve = vuln['cve']
                        if cve is not '':
                            print "\tCVE %s" % cve
 def test_close_all(self, database, ip_ticket_manager2):
     assert database.tickets.find({"open": True}).count() == 2
     ip_ticket_manager2.ips = IPSet(IPS)
     ip_ticket_manager2.close_tickets()
     assert database.tickets.find({"open": True}).count() == 0
     assert database.tickets.find({"open": False}).count() == 2
Beispiel #18
0
 def update(self):
     self.list = [
         IPNetwork(item.target, False) for item in self.scope_source()
     ]
     self.set = IPSet(self.list)
     self.size = self.set.size
Beispiel #19
0
def _to_enos_networks(networks):
    """Transform the networks returned by deploy5k.

    Args:
        networks (dict): networks returned by
            :py:func:`enoslib.infra.provider.Provider.init`
    """
    nets = []
    for network in networks:
        net = {
            "cidr": str(network["network"]),
            "gateway": str(network["gateway"]),
            # NOTE(msimonin): This will point to the nameserver of the site
            # where the deployment is launched regardless the actual site in
            # the network description. Until now we used the global DNS IP
            # here. Finally this information couldn't be found in the API (dec.
            # 18) otherwise we'd move this logic in utils.concretize_networks
            # (like network and gateway)
            "dns": socket.gethostbyname(NAMESERVER),
            "roles": get_roles_as_list(network)
        }
        if network["type"] in KAVLAN_TYPE:
            # On the network, the first IP are reserved to g5k machines.
            # For a routed vlan I don't know exactly how many ip are
            # reserved. However, the specification is clear about global
            # vlan: "A global VLAN is a /18 subnet (16382 IP addresses).
            # It is split -- contiguously -- so that every site gets one
            # /23 (510 ip) in the global VLAN address space". There are 12
            # site. This means that we could take ip from 13th subnetwork.
            # Lets consider the strategy is the same for routed vlan. See,
            # https://www.grid5000.fr/mediawiki/index.php/Grid5000:Network#KaVLAN
            #
            # First, split network in /23 this leads to 32 subnetworks.
            # Then, (i) drops the 12 first subnetworks because they are
            # dedicated to g5k machines, and (ii) drops the last one
            # because some of ips are used for specific stuff such as
            # gateway, kavlan server...
            subnets = IPNetwork(network["network"])
            if network["vlan_id"] < 4:
                # vlan local
                subnets = list(subnets.subnet(24))
                subnets = subnets[4:7]
            else:
                subnets = list(subnets.subnet(23))
                subnets = subnets[13:31]

            # Finally, compute the range of available ips
            ips = IPSet(subnets).iprange()

            net.update({
                "start": str(IPAddress(ips.first)),
                "end": str(IPAddress(ips.last))
            })
        elif network["type"] in SUBNET_TYPE:
            start_ip, start_mac = network["ipmac"][0]
            end_ip, end_mac = network["ipmac"][-1]
            net.update({
                "start": start_ip,
                "end": end_ip,
                "mac_start": start_mac,
                "mac_end": end_mac
            })

        net.update({"roles": get_roles_as_list(network)})
        nets.append(net)
    logger.debug(nets)
    return nets
Beispiel #20
0
def resolve_service_tag_alias(rule):
    if rule.lower().startswith('servicetags'):
        p = rule.split('.')
        resource_name = p[1] if 1 < len(p) else None
        resource_region = p[2] if 2 < len(p) else None
        return IPSet(get_service_tag_ip_space(resource_name, resource_region))
Beispiel #21
0
    def read_config(self, config, **kwargs):

        # Only enable the media repo if either the media repo is enabled or the
        # current worker app is the media repo.
        if (
            self.enable_media_repo is False
            and config.get("worker_app") != "synapse.app.media_repository"
        ):
            self.can_load_media_repo = False
            return
        else:
            self.can_load_media_repo = True

        # Whether this instance should be the one to run the background jobs to
        # e.g clean up old URL previews.
        self.media_instance_running_background_jobs = config.get(
            "media_instance_running_background_jobs",
        )

        self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config.get("media_store_path", "media_store")
        )

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False
        )

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [
                {
                    "module": "file_system",
                    "store_local": True,
                    "store_synchronous": synchronous_backup_media_store,
                    "store_remote": True,
                    "config": {"directory": backup_media_store_path},
                }
            ]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []  # type: List[tuple]

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend"
                )

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config)
            )

        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)
        )
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                check_requirements("url_preview")

            except DependencyException as e:
                raise ConfigError(e.message)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work"
                )

            # netaddr is a dependency for url_preview
            from netaddr import IPSet

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"]
            )

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ())
            )

            self.url_preview_url_blacklist = config.get("url_preview_url_blacklist", ())

            self.url_preview_accept_language = config.get(
                "url_preview_accept_language"
            ) or ["en"]
Beispiel #22
0
 def test_query_empty_network_acl(self):
     resource = {'properties': {}}
     expected = IPSet(['0.0.0.0/0'])
     self.assertEqual(expected, self._get_filter()._query_rules(resource))
Beispiel #23
0
 def load_data(key):
     cmd = "git cat-file -p " + key
     data = subprocess.check_output(cmd.split(' '))
     data = json.loads(data)
     return IPSet(
         [IPNetwork(x["ip_prefix"]) for x in data["prefixes"]])
Beispiel #24
0
    def read_config(self, config):
        self.max_upload_size = self.parse_size(
            config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(
            config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(
            config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config["media_store_path"])

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False)

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path,
                }
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend")

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append((
                provider_class,
                parsed_config,
                wrapper_config,
            ))

        self.uploads_path = self.ensure_directory(config["uploads_path"])
        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES), )
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                import lxml
                lxml  # To stop unused lint.
            except ImportError:
                raise ConfigError(MISSING_LXML)

            try:
                from netaddr import IPSet
            except ImportError:
                raise ConfigError(MISSING_NETADDR)

            if "url_preview_ip_range_blacklist" in config:
                self.url_preview_ip_range_blacklist = IPSet(
                    config["url_preview_ip_range_blacklist"])
            else:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work")

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ()))

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ())
Beispiel #25
0
 def test_query_regular_rules_with_magic(self):
     rules = [IpRange(start_ip_address='10.0.0.0', end_ip_address='10.0.255.255'),
              IpRange(start_ip_address='8.8.8.8', end_ip_address='8.8.8.8'),
              IpRange(start_ip_address='0.0.0.0', end_ip_address='0.0.0.0')]
     expected = IPSet(['8.8.8.8', '10.0.0.0/16'])
     self.assertEqual(expected, self._get_filter(rules)._query_rules(self.resource))
Beispiel #26
0
def main(username, device, ipaddr):
    '''Get password and create device
    '''
    pwd = getpass('Password: '******'device_type': 'a10',
        'ip': device,
        'username': username,
        'password': pwd,
        'global_delay_factor': 3,
        'verbose': True,
        'secret': pwd,
    }
    '''Detect query, use it as a partition. Please adjust to your network. 
    '''

    net1 = IPSet(['172.29.1.0/24']) | IPSet(['172.29.2.0/24'])
    net2 = IPSet(['172.31.1.0/24'])
    net3 = IPSet(['172.29.3.0/24']) | IPSet(['172.29.4.0/24'])
    net4 = IPSet(['172.31.3.0/24'])
    '''We use ip address for partition selection
       Adjust to your needs.
    '''

    if ipaddr in net1:
        network = 'First_Partition'

    if ipaddr in net2:
        network = 'Second_partition'

    if ipaddr in net3:
        network = 'Third_Partition'

    if ipaddr in net4:
        network = 'Fourth_Partition'
    '''Connect to device
    '''

    net_connect = ConnectHandler(**dev)

    print("\nStart time: {}".format(str(datetime.now())))
    print("---------------------------------")
    print('Connected to: {}'.format(device))

    net = 'active-partition %s' % network
    net_connect.send_command_timing(net)
    '''Look for ip
    '''

    cmdip = 'show run | section %s' % (ipaddr)
    out = net_connect.send_command(cmdip)
    ports = re.findall('port\s(.*)', out)
    vip = re.findall('service-group\s(.*)', out)
    print(' ')
    print('Searching {} ports {}'.format(vip, ports))

    for m in vip:
        members = 'show slb service-group %s config | include Member' % (m)
        cmdmembers = net_connect.send_command(members)
        regex = re.findall(r'Member[0-9]:([^\t][^:]+)', cmdmembers)

        print(' ')
        print('Hosts for {} are:'.format(m))
        print(' ')
        for host in regex:
            host = host.strip()
            iphosts = 'show running-config | include %s' % (host)
            findhosts = net_connect.send_command(iphosts)
            hosts = re.findall('slb\sserver\s(.*)', findhosts)
            if hosts:
                print(hosts)

    print("---------------------------------")
Beispiel #27
0
    def test_client_ip_range_blacklist(self):
        """Ensure that Synapse does not try to connect to blacklisted IPs"""

        # Set up the ip_range blacklist
        self.hs.config.federation_ip_range_blacklist = IPSet(
            ["127.0.0.0/8", "fe80::/64"])
        self.reactor.lookups["internal"] = "127.0.0.1"
        self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337"
        self.reactor.lookups["fine"] = "10.20.30.40"
        cl = MatrixFederationHttpClient(self.hs, None)

        # Try making a GET request to a blacklisted IPv4 address
        # ------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.get_json("internal:8008", "foo/bar", timeout=10000))

        # Nothing happened yet
        self.assertNoResult(d)

        self.pump(1)

        # Check that it was unable to resolve the address
        clients = self.reactor.tcpClients
        self.assertEqual(len(clients), 0)

        f = self.failureResultOf(d)
        self.assertIsInstance(f.value, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception, DNSLookupError)

        # Try making a POST request to a blacklisted IPv6 address
        # -------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.post_json("internalv6:8008", "foo/bar", timeout=10000))

        # Nothing has happened yet
        self.assertNoResult(d)

        # Move the reactor forwards
        self.pump(1)

        # Check that it was unable to resolve the address
        clients = self.reactor.tcpClients
        self.assertEqual(len(clients), 0)

        # Check that it was due to a blacklisted DNS lookup
        f = self.failureResultOf(d, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception, DNSLookupError)

        # Try making a GET request to a non-blacklisted IPv4 address
        # ----------------------------------------------------------
        # Make the request
        d = defer.ensureDeferred(
            cl.post_json("fine:8008", "foo/bar", timeout=10000))

        # Nothing has happened yet
        self.assertNoResult(d)

        # Move the reactor forwards
        self.pump(1)

        # Check that it was able to resolve the address
        clients = self.reactor.tcpClients
        self.assertNotEqual(len(clients), 0)

        # Connection will still fail as this IP address does not resolve to anything
        f = self.failureResultOf(d, RequestSendFailed)
        self.assertIsInstance(f.value.inner_exception,
                              ConnectingCancelledError)
 def test_init(self, ip_ticket_manager1):
     ptm = ip_ticket_manager1
     assert ptm.ips == IPSet(), "ips should be an empty IPSet at init"
Beispiel #29
0
def generate_carbon_black(feed, start, num, desc, value, **kwargs):
    zrange = SR.zrange
    ilist = zrange(feed, 0, (1 << 32) - 1)
    mm_to_cb = {"IPv4": "ipv4", "domain": "dns", "md5": "md5"}
    ind_by_type = {"dns": [], "md5": []}

    # Let's stream the information as soon as we have it
    yield "{\n\"feedinfo\": {\n"
    cb_feed_info = CbFeedInfo(name=feed)
    for cb_info_parts in cb_feed_info.iterate():
        yield "  " + cb_info_parts
    yield "\n},\n\"reports\": [{"

    report_args = dict()
    report_args["id"] = feed + "_report"

    report_title = kwargs.get('rt', ["MieneMeld Generated Report"])
    if report_title is not None:
        report_title = report_title[0]
    report_args["title"] = report_title

    report_score = kwargs.get('rs', None)
    if report_score is not None:
        try:
            report_score = int(report_score[0])
        except ValueError:
            report_score = None
    report_args["score"] = report_score

    cb_report = CbReport(**report_args)

    for cb_report_parts in cb_report.iterate():
        yield "  " + cb_report_parts
    yield ",    \"iocs\": {"
    yield "        \"ipv4\": ["

    # Loop though all indicators
    # Only indicators of type IPv4, domain and md5 can be exported to Carbon Black
    ipv4_line = None
    for i in ilist:
        sleep(0)
        v = SR.hget(feed + '.value', i)
        v = None if v is None else json.loads(v)
        if v is None:
            continue
        v_type = v.get("type", None)
        if v_type not in mm_to_cb:
            continue
        if v_type in ("domain", "md5"):
            ind_by_type[mm_to_cb[v_type]].append(i.lower())
            continue

        # Carbon Black do not support IPv4 networks not ranges. We must expand them.
        ip_range = None
        if _IPV4_MASK_RE.match(i):
            ip_range = IPSet(IPNetwork(i))
        elif _IPV4_RANGE_RE.match(i):
            range_parts = i.split("-")
            ip_range = IPRange(range_parts[0], range_parts[1])
        for ip_addr in ip_range:
            if ipv4_line is not None:
                yield ipv4_line + ","
            ipv4_line = "\"{}\"".format(str(ip_addr))
    yield ("" if ipv4_line is None else ipv4_line) + "],"
    yield "\"dns\": {},".format(json.dumps(ind_by_type["dns"]))
    yield "\"md5\": {}".format(json.dumps(ind_by_type["md5"]))
    yield "}}]}"
Beispiel #30
0
 def test_parse_multi_net(self):
     data = {'whatever': ['1.2.2.127/32', '1.2.2.128/25']}
     actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
     expected = IPSet(IPRange('1.2.2.127', '1.2.2.127')) | \
         IPSet(IPRange('1.2.2.128', '1.2.2.255'))
     self.assertEqual(expected, actual)