Ejemplo n.º 1
0
def scan(domain, options):
    logging.debug("[%s][pshtt]" % domain)

    # cache output from pshtt
    cache_pshtt = utils.cache_path(domain, "pshtt", ext="json")

    force = options.get("force", False)
    data = None

    if (force is False) and (os.path.exists(cache_pshtt)):
        logging.debug("\tCached.")
        raw = open(cache_pshtt).read()
        data = json.loads(raw)
        if (data.__class__ is dict) and data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s" % (command, domain))

        flags = "--json --user-agent \"%s\" --timeout %i --preload-cache %s" % (
            user_agent, timeout, preload_cache)

        # Only useful when debugging interaction between projects.
        # flags = "%s --debug" % flags

        # Give the Python shell environment a pyenv environment.
        pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version
        # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare.
        # I don't think this tool's threat model includes untrusted CSV, either.
        raw = utils.unsafe_execute("%s && %s %s %s" %
                                   (pyenv_init, command, domain, flags))

        if not raw:
            utils.write(utils.invalid({}), cache_pshtt)
            logging.warn("\tBad news scanning, sorry!")
            return None

        data = json.loads(raw)
        utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt"))

    # pshtt scanner uses JSON arrays, even for single items
    data = data[0]

    row = []
    for field in headers:
        value = data[field]

        # TODO: Fix this upstream
        if (field != "HSTS Header") and (field != "HSTS Max Age") and (
                field != "Redirect To"):
            if value is None:
                value = False

        row.append(value)

    yield row
Ejemplo n.º 2
0
def scan(domain, options):
    logging.debug("[%s][pshtt]" % domain)

    # cache output from pshtt
    cache_pshtt = utils.cache_path(domain, "pshtt", ext="json")

    force = options.get("force", False)
    data = None

    if (force is False) and (os.path.exists(cache_pshtt)):
        logging.debug("\tCached.")
        raw = open(cache_pshtt).read()
        data = json.loads(raw)
        if (data.__class__ is dict) and data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s" % (command, domain))

        flags = "--json --user-agent \"%s\" --timeout %i --preload-cache %s" % (user_agent, timeout, preload_cache)

        # Only useful when debugging interaction between projects.
        # flags = "%s --debug" % flags

        # Give the Python shell environment a pyenv environment.
        pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version
        # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare.
        # I don't think this tool's threat model includes untrusted CSV, either.
        raw = utils.unsafe_execute("%s && %s %s %s" % (pyenv_init, command, domain, flags))

        if not raw:
            utils.write(utils.invalid({}), cache_pshtt)
            logging.warn("\tBad news scanning, sorry!")
            return None

        data = json.loads(raw)
        utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt"))

    # pshtt scanner uses JSON arrays, even for single items
    data = data[0]

    row = []
    for field in headers:
        value = data[field]

        # TODO: Fix this upstream
        if (field != "HSTS Header") and (field != "HSTS Max Age") and (field != "Redirect To"):
            if value is None:
                value = False

        row.append(value)

    yield row
Ejemplo n.º 3
0
def scan(domain, options):
    logging.debug("[%s][pshtt]" % domain)

    # cache output from pshtt
    cache_pshtt = utils.cache_path(domain, "pshtt", ext="json")

    force = options.get("force", False)
    data = None

    if (force is False) and (os.path.exists(cache_pshtt)):
        logging.debug("\tCached.")
        raw = open(cache_pshtt).read()
        data = json.loads(raw)
        if (data.__class__ is dict) and data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s" % (command, domain))

        raw = utils.scan([
            command,
            domain,
            '--json',
            '--user-agent', '\"%s\"' % user_agent,
            '--timeout', str(timeout),
            '--preload-cache', preload_cache
        ])

        if not raw:
            utils.write(utils.invalid({}), cache_pshtt)
            logging.warn("\tBad news scanning, sorry!")
            return None

        data = json.loads(raw)
        utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt"))

    # pshtt scanner uses JSON arrays, even for single items
    data = data[0]

    row = []
    for field in headers:
        value = data[field]

        # TODO: Fix this upstream
        if (field != "HSTS Header") and (field != "HSTS Max Age") and (field != "Redirect To"):
            if value is None:
                value = False

        row.append(value)

    yield row
Ejemplo n.º 4
0
def get_from_pshtt_cache(domain):
    pshtt_cache = utils.cache_path(domain, "pshtt")
    pshtt_raw = open(pshtt_cache).read()
    pshtt_data = json.loads(pshtt_raw)
    if type(pshtt_data) == list:
        pshtt_data = pshtt_data[0]
    return pshtt_data
Ejemplo n.º 5
0
def scan(domain, options):
    logging.debug("[%s][trustymail]" % domain)

    # cache output from pshtt
    cache_trustymail = utils.cache_path(domain, "trustymail", ext="json")

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache_trustymail)):
        logging.debug("\tCached.")
        raw = open(cache_trustymail).read()
        data = json.loads(raw)
        if (data.__class__ is dict) and data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s" % (command, domain))

        raw = utils.scan([
            command,
            domain,
            '--json',
            '--timeout',
            str(timeout),
        ])

        if not raw:
            utils.write(utils.invalid({}), cache_trustymail)
            logging.warn("\tBad news scanning, sorry!")
            return None

        data = json.loads(raw)
        utils.write(utils.json_for(data),
                    utils.cache_path(domain, "trustymail"))

    # trustymail scanner follows pshtt in  using JSON arrays, even for single items
    data = data[0]

    row = []
    for field in headers:
        value = data[field]

        row.append(value)

    yield row
Ejemplo n.º 6
0
def scan(domain, options):
    logging.debug("[%s][analytics]" % domain)
    logging.debug("\tChecking file.")

    data = {'participating': (domain in analytics_domains)}

    cache = utils.cache_path(domain, "analytics")
    utils.write(utils.json_for(data), cache)

    yield [data['participating']]
Ejemplo n.º 7
0
def scan(domain, options):
    logging.debug("[%s][pageload]" % domain)

    inspection = utils.data_for(domain, "inspect")

    # If we have data from inspect, skip if it's not a live domain.
    if inspection and (not inspection.get("up")):
        logging.debug("\tSkipping, domain not reachable during inspection.")
        return None

    # If we have data from inspect, skip if it's just a redirector.
    if inspection and (inspection.get("redirect") is True):
        logging.debug(
            "\tSkipping, domain seen as just a redirector during inspection.")
        return None

    # phantomas needs a URL, not just a domain.
    if not (domain.startswith('http://') or domain.startswith('https://')):

        # If we have data from inspect, use the canonical endpoint.
        if inspection and inspection.get("canonical"):
            url = inspection.get("canonical")

        # Otherwise, well, whatever.
        else:
            url = 'http://' + domain
    else:
        url = domain

    # We'll cache prettified JSON from the output.
    cache = utils.cache_path(domain, "pageload")

    # If we've got it cached, use that.
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)
        if data.get('invalid'):
            return None

    # If no cache, or we should run anyway, do the scan.
    else:
        logging.debug("\t %s %s --reporter=json --ignore-ssl-errors" %
                      (command, url))
        raw = utils.scan(
            [command, url, "--reporter=json", "--ignore-ssl-errors"])
        if not raw:
            utils.write(utils.invalid({}), cache)
            return None

        # It had better be JSON, which we can cache in prettified form.
        data = json.loads(raw)
        utils.write(utils.json_for(data), cache)

    yield [data['metrics'][metric] for metric in interesting_metrics]
Ejemplo n.º 8
0
def scan(domain, options):
    logging.debug("[%s][analytics]" % domain)
    logging.debug("\tChecking file.")

    data = {
        'participating': (domain in analytics_domains)
    }

    cache = utils.cache_path(domain, "analytics")
    utils.write(utils.json_for(data), cache)

    yield [data['participating']]
Ejemplo n.º 9
0
def scan(domain, options):
    logging.debug("[%s][pageload]" % domain)

    inspection = utils.data_for(domain, "inspect")

    # If we have data from inspect, skip if it's not a live domain.
    if inspection and (not inspection.get("up")):
        logging.debug("\tSkipping, domain not reachable during inspection.")
        return None

    # If we have data from inspect, skip if it's just a redirector.
    if inspection and (inspection.get("redirect") is True):
        logging.debug("\tSkipping, domain seen as just a redirector during inspection.")
        return None

    # phantomas needs a URL, not just a domain.
    if not (domain.startswith('http://') or domain.startswith('https://')):

        # If we have data from inspect, use the canonical endpoint.
        if inspection and inspection.get("canonical"):
            url = inspection.get("canonical")

        # Otherwise, well, whatever.
        else:
            url = 'http://' + domain
    else:
        url = domain

    # We'll cache prettified JSON from the output.
    cache = utils.cache_path(domain, "pageload")

    # If we've got it cached, use that.
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)
        if data.get('invalid'):
            return None

    # If no cache, or we should run anyway, do the scan.
    else:
        logging.debug("\t %s %s --reporter=json --ignore-ssl-errors" % (command, url))
        raw = utils.scan([command, url, "--reporter=json", "--ignore-ssl-errors"])
        if not raw:
            utils.write(utils.invalid({}), cache)
            return None

        # It had better be JSON, which we can cache in prettified form.
        data = json.loads(raw)
        utils.write(utils.json_for(data), cache)

    yield [data['metrics'][metric] for metric in interesting_metrics]
Ejemplo n.º 10
0
def scan(domain, options):
    logging.debug("[%s][inspect]" % domain)

    # cache JSON as it comes back from site-inspector
    cache = utils.cache_path(domain, "inspect")
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)
        if data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s --http" % (command, domain))
        raw = utils.scan([command, domain, "--http"])
        if not raw:
            utils.write(utils.invalid({}), cache)
            return None
        utils.write(raw, cache)
        data = json.loads(raw)

    # TODO: get this from a site-inspector field directly
    canonical_https = data['endpoints']['https'][data['canonical_endpoint']]
    # TODO: guarantee these as present in site-inspector
    https_valid = canonical_https.get('https_valid', False)
    https_bad_chain = canonical_https.get('https_bad_chain', False)
    https_bad_name = canonical_https.get('https_bad_name', False)
    # TODO: site-inspector should float this up
    hsts_details = canonical_https.get('hsts_details', {})
    max_age = hsts_details.get('max_age', None)

    yield [
        data['canonical'], data['up'],
        data['redirect'], data['redirect_to'],
        https_valid, data['default_https'], data['downgrade_https'],
        data['enforce_https'],
        https_bad_chain, https_bad_name,
        data['hsts'], data['hsts_header'],
        max_age,
        data['hsts_entire_domain'],
        data['hsts_entire_domain_preload'],
        domain in chrome_preload_list,
        data['broken_root'], data['broken_www']
    ]
Ejemplo n.º 11
0
def check_wildcard(subdomain, options):

    wildcard = wildcard_for(subdomain)

    cache = utils.cache_path(subdomain, "subdomains")
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tDNS info cached.")
        raw = open(cache).read()
        data = json.loads(raw)

    else:
        logging.debug("\t dig +short '%s'" % wildcard)
        raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard)

        if raw_wild == "":
            raw_wild = None
            raw_self = None
        else:
            logging.debug("\t dig +short '%s'" % subdomain)
            raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain)

        if raw_wild:
            parsed_wild = raw_wild.split("\n")
            parsed_wild.sort()
        else:
            parsed_wild = None

        if raw_self:
            parsed_self = raw_self.split("\n")
            parsed_self.sort()
        else:
            parsed_self = None

        data = {'response': {'wild': parsed_wild, 'itself': parsed_self}}
        utils.write(
            utils.json_for(data),
            cache
        )

    return data['response']
Ejemplo n.º 12
0
def scan(domain, options):
    logging.debug("[%s][inspect]" % domain)

    # cache JSON as it comes back from site-inspector
    cache = utils.cache_path(domain, "inspect")
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)
        if data.get('invalid'):
            return None

    else:
        logging.debug("\t %s %s --http" % (command, domain))
        raw = utils.scan([command, domain, "--http"])
        if not raw:
            utils.write(utils.invalid({}), cache)
            return None
        utils.write(raw, cache)
        data = json.loads(raw)

    # TODO: get this from a site-inspector field directly
    canonical_https = data['endpoints']['https'][data['canonical_endpoint']]
    # TODO: guarantee these as present in site-inspector
    https_valid = canonical_https.get('https_valid', False)
    https_bad_chain = canonical_https.get('https_bad_chain', False)
    https_bad_name = canonical_https.get('https_bad_name', False)
    # TODO: site-inspector should float this up
    hsts_details = canonical_https.get('hsts_details', {})
    max_age = hsts_details.get('max_age', None)

    yield [
        data['canonical'], data['up'], data['redirect'], data['redirect_to'],
        https_valid, data['default_https'], data['downgrade_https'],
        data['enforce_https'], https_bad_chain, https_bad_name, data['hsts'],
        data['hsts_header'], max_age, data['hsts_entire_domain'],
        data['hsts_entire_domain_preload'], domain in chrome_preload_list,
        data['broken_root'], data['broken_www']
    ]
Ejemplo n.º 13
0
def check_wildcard(subdomain, options):

    wildcard = wildcard_for(subdomain)

    cache = utils.cache_path(subdomain, "subdomains")
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tDNS info cached.")
        raw = open(cache).read()
        data = json.loads(raw)

    else:
        logging.debug("\t dig +short '%s'" % wildcard)
        raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard)

        if raw_wild == "":
            raw_wild = None
            raw_self = None
        else:
            logging.debug("\t dig +short '%s'" % subdomain)
            raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain)

        if raw_wild:
            parsed_wild = raw_wild.split("\n")
            parsed_wild.sort()
        else:
            parsed_wild = None

        if raw_self:
            parsed_self = raw_self.split("\n")
            parsed_self.sort()
        else:
            parsed_self = None

        data = {'response': {'wild': parsed_wild, 'itself': parsed_self}}
        utils.write(utils.json_for(data), cache)

    return data['response']
Ejemplo n.º 14
0
def scan(domain, options):
	logging.debug("[%s][sslyze]" % domain)

	# Optional: skip domains which don't support HTTPS in prior inspection
	inspection = utils.data_for(domain, "inspect")
	if inspection and (not inspection.get("support_https")):
		logging.debug("\tSkipping, HTTPS not supported in inspection.")
		return None

	# Optional: if inspect data says canonical endpoint uses www and this domain
	# doesn't have it, add it.
	if inspection and (inspection.get("canonical_endpoint") == "www") and (not domain.startswith("www.")):
		scan_domain = "www.%s" % domain
	else:
		scan_domain = domain

	# cache XML from sslyze
	cache_xml = utils.cache_path(domain, "sslyze", ext="xml")
	# because sslyze manages its own output (can't yet print to stdout),
	# we have to mkdir_p the path ourselves
	utils.mkdir_p(os.path.dirname(cache_xml))

	force = options.get("force", False)

	if (force is False) and (os.path.exists(cache_xml)):
		logging.debug("\tCached.")
		xml = open(cache_xml).read()

	else:
		logging.debug("\t %s %s" % (command, domain))
		# use scan_domain (possibly www-prefixed) to do actual scan
		raw = utils.scan([command, "--regular", "--quiet", scan_domain, "--xml_out=%s" % cache_xml], env=command_env)
		
		if raw is None:
			# TODO: save standard invalid XML data...?
			logging.warn("\tBad news scanning, sorry!")
			return None

		xml = utils.scan(["cat", cache_xml])
		if not xml:
			logging.warn("\tBad news reading XML, sorry!")
			return None

		utils.write(xml, cache_xml)

	data = parse_sslyze(xml)

	if data is None:
		logging.warn("\tNo valid target for scanning, couldn't connect.")
		return None

	utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze"))

	yield [
		data['protocols']['sslv2'], data['protocols']['sslv3'], 
		data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'], 
		data['protocols']['tlsv1.2'], 

		data['config'].get('any_dhe'), data['config'].get('all_dhe'),
		data['config'].get('weakest_dh'),
		data['config'].get('any_rc4'), data['config'].get('all_rc4'),

		data['config'].get('ocsp_stapling'),
		
		data['certs'].get('key_type'), data['certs'].get('key_length'),
		data['certs'].get('leaf_signature'), data['certs'].get('any_sha1'),
		data['certs'].get('not_before'), data['certs'].get('not_after'),
		data['certs'].get('served_issuer'), 

		data.get('errors')
	]
Ejemplo n.º 15
0
def scan(domain, options):
    logging.debug("[%s][sslyze]" % domain)

    # Optional: skip domains which don't support HTTPS in pshtt scan.
    if utils.domain_doesnt_support_https(domain):
        logging.debug("\tSkipping, HTTPS not supported.")
        return None

    # Optional: if pshtt data says canonical endpoint uses www and this domain
    # doesn't have it, add it.
    if utils.domain_uses_www(domain):
        scan_domain = "www.%s" % domain
    else:
        scan_domain = domain

    # cache JSON from sslyze
    cache_json = utils.cache_path(domain, "sslyze")
    # because sslyze manages its own output (can't yet print to stdout),
    # we have to mkdir_p the path ourselves
    utils.mkdir_p(os.path.dirname(cache_json))

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache_json)):
        logging.debug("\tCached.")
        raw_json = open(cache_json).read()
        try:
            data = json.loads(raw_json)
            if (data.__class__ is dict) and data.get('invalid'):
                return None
        except json.decoder.JSONDecodeError as err:
            logging.warn("Error decoding JSON.  Cache probably corrupted.")
            return None

    else:
        # use scan_domain (possibly www-prefixed) to do actual scan
        logging.debug("\t %s %s" % (command, scan_domain))

        # This is --regular minus --heartbleed
        # See: https://github.com/nabla-c0d3/sslyze/issues/217
        raw_response = utils.scan([
            command,
            "--sslv2", "--sslv3", "--tlsv1", "--tlsv1_1", "--tlsv1_2",
            "--reneg", "--resum", "--certinfo",
            "--http_get", "--hide_rejected_ciphers",
            "--compression", "--openssl_ccs",
            "--fallback", "--quiet",
            scan_domain, "--json_out=%s" % cache_json
        ])

        if raw_response is None:
            # TODO: save standard invalid JSON data...?
            utils.write(utils.invalid({}), cache_json)
            logging.warn("\tBad news scanning, sorry!")
            return None

        raw_json = utils.scan(["cat", cache_json])
        if not raw_json:
            logging.warn("\tBad news reading JSON, sorry!")
            return None

        utils.write(raw_json, cache_json)

    data = parse_sslyze(raw_json)

    if data is None:
        logging.warn("\tNo valid target for scanning, couldn't connect.")
        return None

    yield [
        scan_domain,
        data['protocols']['sslv2'], data['protocols']['sslv3'],
        data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'],
        data['protocols']['tlsv1.2'],

        data['config'].get('any_dhe'), data['config'].get('all_dhe'),
        data['config'].get('weakest_dh'),
        data['config'].get('any_rc4'), data['config'].get('all_rc4'),

        data['certs'].get('key_type'), data['certs'].get('key_length'),
        data['certs'].get('leaf_signature'),
        data['certs'].get('any_sha1_served'),
        data['certs'].get('any_sha1_constructed'),
        data['certs'].get('not_before'), data['certs'].get('not_after'),
        data['certs'].get('served_issuer'), data['certs'].get('constructed_issuer'),

        data.get('errors')
    ]
Ejemplo n.º 16
0
def gather(suffix, options):
    # Register a (free) Censys.io account to get a UID and API key.
    uid = options.get("censys_id", None)
    api_key = options.get("censys_key", None)

    if (uid is None) or (api_key is None):
        uid = os.environ.get("CENSYS_UID", None)
        api_key = os.environ.get("CENSYS_API_KEY", None)

    if (uid is None) or (api_key is None):
        logging.warn(
            "No Censys credentials set. API key required to use the Censys API."
        )
        exit(1)

    certificate_api = certificates.CensysCertificates(uid, api_key)

    query = "parsed.subject.common_name:\"%s\" or parsed.extensions.subject_alt_name.dns_names:\"%s\"" % (
        suffix, suffix)
    logging.debug("Censys query:\n%s\n" % query)

    # Hostnames beginning with a wildcard prefix will have the prefix stripped.
    wildcard_pattern = re.compile("^\*\.")
    redacted_pattern = re.compile("^(\?\.)+")

    # time to sleep between requests (defaults to 5s)
    delay = int(options.get("delay", 5))

    # Censys page size, fixed
    page_size = 100

    # Start page defaults to 1.
    start_page = int(options.get("start", 1))

    # End page defaults to whatever the API says is the last one.
    end_page = options.get("end", None)
    if end_page is None:
        end_page = get_end_page(query, certificate_api)
        if end_page is None:
            logging.warn("Error looking up number of pages.")
            exit(1)
    else:
        end_page = int(end_page)

    max_records = ((end_page - start_page) + 1) * page_size

    # Cache hostnames in a dict for de-duping.
    hostnames_map = {}

    fields = [
        "parsed.subject.common_name",
        "parsed.extensions.subject_alt_name.dns_names"
    ]

    current_page = start_page

    logging.warn("Fetching up to %i records, starting at page %i." %
                 (max_records, start_page))
    last_cached = False
    force = options.get("force", False)

    while current_page <= end_page:
        if (not last_cached) and (current_page > start_page):
            logging.debug("(Waiting %is before fetching page %i.)" %
                          (delay, current_page))
            last_cached = False
            time.sleep(delay)

        logging.debug("Fetching page %i." % current_page)

        cache_page = utils.cache_path(str(current_page), "censys")
        if (force is False) and (os.path.exists(cache_page)):
            logging.warn("\t[%i] Cached page." % current_page)
            last_cached = True

            certs_raw = open(cache_page).read()
            certs = json.loads(certs_raw)
            if (certs.__class__ is dict) and certs.get('invalid'):
                continue
        else:
            try:
                certs = list(
                    certificate_api.search(query,
                                           fields=fields,
                                           page=current_page,
                                           max_records=page_size))
                utils.write(utils.json_for(certs), cache_page)
            except censys.base.CensysException:
                logging.warn(utils.format_last_exception())
                logging.warn("Censys error, skipping page %i." % current_page)
                utils.write(utils.invalid({}), cache_page)
                continue
            except:
                logging.warn(utils.format_last_exception())
                logging.warn("Unexpected error, skipping page %i." %
                             current_page)
                utils.write(utils.invalid({}), cache_page)
                exit(1)

        for cert in certs:
            # Common name + SANs
            names = cert.get('parsed.subject.common_name', []) + cert.get(
                'parsed.extensions.subject_alt_name.dns_names', [])
            logging.debug(names)

            for name in names:
                # Strip off any wildcard prefix.
                name = re.sub(wildcard_pattern, '', name).lower().strip()
                # Strip off any redacted ? prefixes. (Ugh.)
                name = re.sub(redacted_pattern, '', name).lower().strip()
                hostnames_map[name] = None

        current_page += 1

    logging.debug("Done fetching from API.")

    # Iterator doesn't buy much efficiency, since we paginated already.
    # Necessary evil to de-dupe before returning hostnames, though.
    for hostname in hostnames_map.keys():
        yield hostname
Ejemplo n.º 17
0
def scan(domain, options):
    logging.debug("[%s][tls]" % domain)

    # If inspection data exists, check to see if we can skip.
    if utils.domain_doesnt_support_https(domain):
        logging.debug("\tSkipping, HTTPS not supported in inspection.")
        return None

    # cache reformatted JSON from ssllabs
    cache = utils.cache_path(domain, "tls")

    # Optional: if pshtt data says canonical endpoint uses www and this domain
    # doesn't have it, add it.
    if utils.domain_uses_www(domain):
        scan_domain = "www.%s" % domain
    else:
        scan_domain = domain

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)

        if data.get('invalid'):
            return None
    else:
        logging.debug("\t %s %s" % (command, scan_domain))

        usecache = str(not force).lower()

        if options.get("debug"):
            cmd = [command, "--usecache=%s" % usecache,
                   "--verbosity=debug", scan_domain]
        else:
            cmd = [command, "--usecache=%s" % usecache,
                   "--quiet", scan_domain]

        raw = utils.scan(cmd)
        if raw:
            data = json.loads(raw)

            # if SSL Labs gave us back an error response, cache this
            # as an invalid entry.
            if len(data) < 1:
                utils.write(utils.invalid({'response': data}), cache)
                return None

            # we only give ssllabs-scan one at a time,
            # so we can de-pluralize this
            data = data[0]

            # if SSL Labs had an error hitting the site, cache this
            # as an invalid entry.
            if data["status"] == "ERROR":
                utils.write(utils.invalid(data), cache)
                return None

            utils.write(utils.json_for(data), cache)
        else:
            return None
            # raise Exception("Invalid data from ssllabs-scan: %s" % raw)

    # can return multiple rows, one for each 'endpoint'
    for endpoint in data['endpoints']:

        # this meant it couldn't connect to the endpoint
        if not endpoint.get("grade"):
            continue

        sslv3 = False
        tlsv12 = False
        for protocol in endpoint['details']['protocols']:
            if ((protocol['name'] == "SSL") and
                    (protocol['version'] == '3.0')):
                sslv3 = True
            if ((protocol['name'] == "TLS") and
                    (protocol['version'] == '1.2')):
                tlsv12 = True

        spdy = False
        h2 = False
        npn = endpoint['details'].get('npnProtocols', None)
        if npn:
            spdy = ("spdy" in npn)
            h2 = ("h2" in npn)

        yield [
            endpoint['grade'],
            endpoint['details']['cert']['sigAlg'],
            endpoint['details']['key']['alg'],
            endpoint['details']['key']['size'],
            endpoint['details']['forwardSecrecy'],
            endpoint['details']['ocspStapling'],
            endpoint['details'].get('fallbackScsv', "N/A"),
            endpoint['details']['supportsRc4'],
            sslv3,
            tlsv12,
            spdy,
            endpoint['details']['sniRequired'],
            h2
        ]
Ejemplo n.º 18
0
def paginated_mode(suffix, options, uid, api_key):
    # Cache hostnames in a dict for de-duping.
    hostnames_map = {}

    certificate_api = certificates.CensysCertificates(uid, api_key)

    if 'query' in options and options['query']:
        query = options['query']
    else:
        query = "parsed.subject.common_name:\"%s\" or parsed.extensions.subject_alt_name.dns_names:\"%s\"" % (
            suffix, suffix)
    logging.debug("Censys query:\n%s\n" % query)

    # time to sleep between requests (defaults to 5s)
    delay = int(options.get("delay", 5))

    # Censys page size, fixed
    page_size = 100

    # Start page defaults to 1.
    start_page = int(options.get("start", 1))

    # End page defaults to whatever the API says is the last one.
    end_page = options.get("end", None)
    if end_page is None:
        end_page = get_end_page(query, certificate_api)
        if end_page is None:
            logging.warn("Error looking up number of pages.")
            exit(1)
    else:
        end_page = int(end_page)

    max_records = ((end_page - start_page) + 1) * page_size

    fields = [
        "parsed.subject.common_name",
        "parsed.extensions.subject_alt_name.dns_names"
    ]

    current_page = start_page

    logging.warn("Fetching up to %i records, starting at page %i." %
                 (max_records, start_page))
    last_cached = False
    force = options.get("force", False)

    while current_page <= end_page:
        if (not last_cached) and (current_page > start_page):
            logging.debug("(Waiting %is before fetching page %i.)" %
                          (delay, current_page))
            last_cached = False
            time.sleep(delay)

        logging.debug("Fetching page %i." % current_page)

        cache_page = utils.cache_path(str(current_page), "censys")
        if (force is False) and (os.path.exists(cache_page)):
            logging.warn("\t[%i] Cached page." % current_page)
            last_cached = True

            certs_raw = open(cache_page).read()
            certs = json.loads(certs_raw)
            if (certs.__class__ is dict) and certs.get('invalid'):
                continue
        else:
            try:
                certs = list(
                    certificate_api.search(query,
                                           fields=fields,
                                           page=current_page,
                                           max_records=page_size))
                utils.write(utils.json_for(certs), cache_page)
            except censys.base.CensysException:
                logging.warn(utils.format_last_exception())
                logging.warn("Censys error, skipping page %i." % current_page)
                utils.write(utils.invalid({}), cache_page)
                continue
            except:
                logging.warn(utils.format_last_exception())
                logging.warn("Unexpected error, skipping page %i." %
                             current_page)
                utils.write(utils.invalid({}), cache_page)
                exit(1)

        for cert in certs:
            # Common name + SANs
            names = cert.get('parsed.subject.common_name', []) + cert.get(
                'parsed.extensions.subject_alt_name.dns_names', [])
            logging.debug(names)

            for name in names:
                hostnames_map[sanitize_name(name)] = None

        current_page += 1

    logging.debug("Done fetching from API.")

    return hostnames_map
Ejemplo n.º 19
0
def export_mode(suffix, options, uid, api_key):
    # Cache hostnames in a dict for de-duping.
    hostnames_map = {}

    # Default timeout to 20 minutes.
    timeout = int(options.get("timeout", (60 * 60 * 20)))

    # Wait 5 seconds between checking on the job.
    between_jobs = 5

    try:
        export_api = export.CensysExport(uid, api_key)
    except censys.base.CensysUnauthorizedException:
        logging.warn(
            "The Censys.io Export API rejected the provided Censys credentials. The credentials may be inaccurate, or you may need to request access from the Censys.io team."
        )
        exit(1)

    # Uses a FLATTEN command in order to work around a BigQuery
    # error around multiple "repeated" fields. *shrug*
    query = "SELECT parsed.subject.common_name, parsed.extensions.subject_alt_name.dns_names from FLATTEN([certificates.certificates], parsed.extensions.subject_alt_name.dns_names) where parsed.subject.common_name LIKE \"%%%s\" OR parsed.extensions.subject_alt_name.dns_names LIKE \"%%%s\";" % (
        suffix, suffix)
    logging.debug("Censys query:\n%s\n" % query)

    download_file = utils.cache_path("export", "censys", ext="csv")

    force = options.get("force", False)

    if (force is False) and os.path.exists(download_file):
        logging.warn("Using cached download data.")
    else:
        logging.warn("Kicking off SQL query job.")
        results_url = None

        try:
            job = export_api.new_job(query, format='csv', flatten=True)
            job_id = job['job_id']

            started = datetime.datetime.now()
            while True:
                elapsed = (datetime.datetime.now() - started).seconds

                status = export_api.check_job(job_id)
                if status['status'] == 'error':
                    logging.warn("Error from Censys: %s" % status['error'])
                    exit(1)

                # Not expected, but better to explicitly handle.
                elif status['status'] == 'expired':
                    logging.warn("Results are somehow expired, bailing.")
                    exit(1)

                elif status['status'] == 'pending':
                    logging.debug("[%is] Job still pending." % elapsed)
                    time.sleep(between_jobs)

                elif status['status'] == 'success':
                    logging.warn("[%is] Job complete!" % elapsed)
                    results_url = status['download_paths'][0]
                    break

                if (elapsed > timeout):
                    logging.warn("Timeout waiting for job to complete.")
                    exit(1)

        except censys.base.CensysException:
            logging.warn(utils.format_last_exception())
            logging.warn("Censys error, aborting.")

        # At this point, the job is complete and we need to download
        # the resulting CSV URL in results_url.
        logging.warn("Downloading results of SQL query.")
        utils.download(results_url, download_file)

    # Read in downloaded CSV file, run any hostnames in each line
    # through the sanitizer, and de-dupe using the map.
    with open(download_file, newline='') as csvfile:
        for row in csv.reader(csvfile):
            if (not row[0]) or (
                    row[0].lower().startswith("parsed_subject_common_name")):
                continue

            names = [row[0].lower(), row[1].lower()]
            # logging.debug(names)

            for name in names:
                if name:
                    hostnames_map[sanitize_name(name)] = None

    return hostnames_map
Ejemplo n.º 20
0
def network_check(subdomain, endpoint, options):
    cache = utils.cache_path(subdomain, "subdomains")

    wildcard = wildcard_for(subdomain)

    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tDNS and content cached.")
        raw = open(cache).read()
        data = json.loads(raw)

    # Hit DNS and HTTP.
    else:
        # HTTP content: just use curl.
        #
        # Turn on --insecure because we want to see the content even at sites
        # where the certificate isn't right or proper.
        logging.debug("\t curl --silent --insecure %s" % endpoint)
        content = utils.scan(["curl", "--silent", "--insecure", endpoint])

        # DNS content: just use dig.
        #
        # Not awesome - uses an unsafe shell execution of `dig` to look up DNS,
        # as I couldn't figure out a way to get "+short" to play nice with
        # the more secure execution methods available to me. Since this system
        # isn't expected to process untrusted input, this should be okay.
        logging.debug("\t dig +short '%s'" % wildcard)
        raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard)

        if raw_wild == "":
            raw_wild = None
            raw_self = None
        else:
            logging.debug("\t dig +short '%s'" % subdomain)
            raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain)

        if raw_wild:
            parsed_wild = raw_wild.split("\n")
            parsed_wild.sort()
        else:
            parsed_wild = None

        if raw_self:
            parsed_self = raw_self.split("\n")
            parsed_self.sort()
        else:
            parsed_self = None

        # Cache HTTP and DNS data to disk.
        data = {
            'response': {
                'content': content,
                'wildcard_dns': parsed_wild,
                'self_dns': parsed_self
            }
        }

        if (parsed_wild) and (parsed_wild == parsed_self):
            data['response']['matched_wild'] = True
        else:
            data['response']['matched_wild'] = False

        utils.write(utils.json_for(data), cache)

    return data['response']
Ejemplo n.º 21
0
def get_from_inspect_cache(domain):
    inspect_cache = utils.cache_path(domain, "inspect")
    inspect_raw = open(inspect_cache).read()
    inspect_data = json.loads(inspect_raw)
    return inspect_data
Ejemplo n.º 22
0
def scan(domain, options):
    logging.debug("[%s][sslyze]" % domain)

    # Optional: skip domains which don't support HTTPS in prior inspection
    if utils.domain_doesnt_support_https(domain):
        logging.debug("\tSkipping, HTTPS not supported in inspection.")
        return None

    # Optional: if pshtt data says canonical endpoint uses www and this domain
    # doesn't have it, add it.
    if utils.domain_uses_www(domain):
        scan_domain = "www.%s" % domain
    else:
        scan_domain = domain

    # cache XML from sslyze
    cache_xml = utils.cache_path(domain, "sslyze", ext="xml")
    # because sslyze manages its own output (can't yet print to stdout),
    # we have to mkdir_p the path ourselves
    utils.mkdir_p(os.path.dirname(cache_xml))

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache_xml)):
        logging.debug("\tCached.")
        xml = open(cache_xml).read()

    else:
        logging.debug("\t %s %s" % (command, scan_domain))
        # use scan_domain (possibly www-prefixed) to do actual scan

        # Give the Python shell environment a pyenv environment.
        pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version
        # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare.
        # I don't think this tool's threat model includes untrusted CSV, either.
        raw = utils.unsafe_execute(
            "%s && %s --regular --quiet %s --xml_out=%s" %
            (pyenv_init, command, scan_domain, cache_xml))

        if raw is None:
            # TODO: save standard invalid XML data...?
            logging.warn("\tBad news scanning, sorry!")
            return None

        xml = utils.scan(["cat", cache_xml])
        if not xml:
            logging.warn("\tBad news reading XML, sorry!")
            return None

        utils.write(xml, cache_xml)

    data = parse_sslyze(xml)

    if data is None:
        logging.warn("\tNo valid target for scanning, couldn't connect.")
        return None

    utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze"))

    yield [
        data['protocols']['sslv2'], data['protocols']['sslv3'],
        data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'],
        data['protocols']['tlsv1.2'], data['config'].get('any_dhe'),
        data['config'].get('all_dhe'), data['config'].get('weakest_dh'),
        data['config'].get('any_rc4'), data['config'].get('all_rc4'),
        data['config'].get('ocsp_stapling'), data['certs'].get('key_type'),
        data['certs'].get('key_length'), data['certs'].get('leaf_signature'),
        data['certs'].get('any_sha1'), data['certs'].get('not_before'),
        data['certs'].get('not_after'), data['certs'].get('served_issuer'),
        data.get('errors')
    ]
Ejemplo n.º 23
0
def get_a11y_cache(domain):
    return utils.cache_path(domain, "a11y")
Ejemplo n.º 24
0
def scan(domain, options):
    logging.debug("[%s][sslyze]" % domain)

    # Optional: skip domains which don't support HTTPS in prior inspection
    if utils.domain_doesnt_support_https(domain):
        logging.debug("\tSkipping, HTTPS not supported in inspection.")
        return None

    # Optional: if pshtt data says canonical endpoint uses www and this domain
    # doesn't have it, add it.
    if utils.domain_uses_www(domain):
        scan_domain = "www.%s" % domain
    else:
        scan_domain = domain

    # cache XML from sslyze
    cache_xml = utils.cache_path(domain, "sslyze", ext="xml")
    # because sslyze manages its own output (can't yet print to stdout),
    # we have to mkdir_p the path ourselves
    utils.mkdir_p(os.path.dirname(cache_xml))

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache_xml)):
        logging.debug("\tCached.")
        xml = open(cache_xml).read()

    else:
        logging.debug("\t %s %s" % (command, scan_domain))
        # use scan_domain (possibly www-prefixed) to do actual scan

        # Give the Python shell environment a pyenv environment.
        pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version
        # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare.
        # I don't think this tool's threat model includes untrusted CSV, either.
        raw = utils.unsafe_execute("%s && %s --regular --quiet %s --xml_out=%s" % (pyenv_init, command, scan_domain, cache_xml))

        if raw is None:
            # TODO: save standard invalid XML data...?
            logging.warn("\tBad news scanning, sorry!")
            return None

        xml = utils.scan(["cat", cache_xml])
        if not xml:
            logging.warn("\tBad news reading XML, sorry!")
            return None

        utils.write(xml, cache_xml)

    data = parse_sslyze(xml)

    if data is None:
        logging.warn("\tNo valid target for scanning, couldn't connect.")
        return None

    utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze"))

    yield [
        data['protocols']['sslv2'], data['protocols']['sslv3'],
        data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'],
        data['protocols']['tlsv1.2'],

        data['config'].get('any_dhe'), data['config'].get('all_dhe'),
        data['config'].get('weakest_dh'),
        data['config'].get('any_rc4'), data['config'].get('all_rc4'),

        data['config'].get('ocsp_stapling'),

        data['certs'].get('key_type'), data['certs'].get('key_length'),
        data['certs'].get('leaf_signature'), data['certs'].get('any_sha1'),
        data['certs'].get('not_before'), data['certs'].get('not_after'),
        data['certs'].get('served_issuer'),

        data.get('errors')
    ]
Ejemplo n.º 25
0
def scan(domain, options):
    logging.debug("[%s][third_parties]" % domain)

    # Default timeout is 15s, too little.
    timeout = int(options.get("timeout", 60))

    # If we have data from pshtt, skip if it's not a live domain.
    if utils.domain_not_live(domain):
        logging.debug("\tSkipping, domain not reachable during inspection.")
        return None

    # If we have data from pshtt, skip if it's just a redirector.
    if utils.domain_is_redirect(domain):
        logging.debug(
            "\tSkipping, domain seen as just an external redirector during inspection."
        )
        return None

    # phantomas needs a URL, not just a domain.
    if not (domain.startswith('http://') or domain.startswith('https://')):

        # If we have data from pshtt, use the canonical endpoint.
        if utils.domain_canonical(domain):
            url = utils.domain_canonical(domain)

        # Otherwise, well, whatever.
        else:
            url = 'http://' + domain
    else:
        url = domain

    # calculated_domain = re.sub("https?:\/\/", "", url)

    # We'll cache prettified JSON from the output.
    cache = utils.cache_path(domain, "third_parties")

    # If we've got it cached, use that.
    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)
        if data.get('invalid'):
            return None

    # If no cache, or we should run anyway, do the scan.
    else:
        logging.debug(
            "\t %s %s --modules=domains --reporter=json --timeout=%i --ignore-ssl-errors"
            % (command, url, timeout))
        raw = utils.scan([
            command, url, "--modules=domains", "--reporter=json",
            "--timeout=%i" % timeout, "--ignore-ssl-errors"
        ],
                         allowed_return_codes=[252])
        if not raw:
            utils.write(utils.invalid({}), cache)
            return None

        # It had better be JSON, which we can cache in prettified form.
        data = json.loads(raw)
        utils.write(utils.json_for(data), cache)

    services = services_for(data, domain, options)

    # Convert to CSV row
    known_names = list(known_services.keys())
    known_names.sort()
    known_matches = [
        'Yes' if host in services['known'] else 'No' for host in known_names
    ]

    yield [
        len(services['external']),
        len(services['internal']),
        services['external_requests'],
        services['internal_requests'],
        serialize(services['external']),
        serialize(services['internal']),
        # services['affiliated'],
        # services['unknown']
    ] + known_matches
Ejemplo n.º 26
0
def network_check(subdomain, endpoint, options):
    cache = utils.cache_path(subdomain, "subdomains")

    wildcard = wildcard_for(subdomain)

    if (options.get("force", False) is False) and (os.path.exists(cache)):
        logging.debug("\tDNS and content cached.")
        raw = open(cache).read()
        data = json.loads(raw)

    # Hit DNS and HTTP.
    else:
        # HTTP content: just use curl.
        #
        # Turn on --insecure because we want to see the content even at sites
        # where the certificate isn't right or proper.
        logging.debug("\t curl --silent --insecure %s" % endpoint)
        content = utils.scan(["curl", "--silent", "--insecure", endpoint])

        # DNS content: just use dig.
        #
        # Not awesome - uses an unsafe shell execution of `dig` to look up DNS,
        # as I couldn't figure out a way to get "+short" to play nice with
        # the more secure execution methods available to me. Since this system
        # isn't expected to process untrusted input, this should be okay.
        logging.debug("\t dig +short '%s'" % wildcard)
        raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard)

        if raw_wild == "":
            raw_wild = None
            raw_self = None
        else:
            logging.debug("\t dig +short '%s'" % subdomain)
            raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain)

        if raw_wild:
            parsed_wild = raw_wild.split("\n")
            parsed_wild.sort()
        else:
            parsed_wild = None

        if raw_self:
            parsed_self = raw_self.split("\n")
            parsed_self.sort()
        else:
            parsed_self = None

        # Cache HTTP and DNS data to disk.
        data = {'response': {
            'content': content,
            'wildcard_dns': parsed_wild,
            'self_dns': parsed_self
        }}

        if (parsed_wild) and (parsed_wild == parsed_self):
            data['response']['matched_wild'] = True
        else:
            data['response']['matched_wild'] = False

        utils.write(utils.json_for(data), cache)

    return data['response']
Ejemplo n.º 27
0
def scan(domain, options):
    logging.debug("[%s][tls]" % domain)

    # If pshtt data exists, check to see if we can skip.
    if utils.domain_doesnt_support_https(domain):
        logging.debug("\tSkipping, HTTPS not supported.")
        return None

    # cache reformatted JSON from ssllabs
    cache = utils.cache_path(domain, "tls")

    # Optional: if pshtt data says canonical endpoint uses www and this domain
    # doesn't have it, add it.
    if utils.domain_uses_www(domain):
        scan_domain = "www.%s" % domain
    else:
        scan_domain = domain

    force = options.get("force", False)

    if (force is False) and (os.path.exists(cache)):
        logging.debug("\tCached.")
        raw = open(cache).read()
        data = json.loads(raw)

        if data.get('invalid'):
            return None
    else:
        logging.debug("\t %s %s" % (command, scan_domain))

        usecache = str(not force).lower()

        if options.get("debug"):
            cmd = [
                command,
                "--usecache=%s" % usecache, "--verbosity=debug", scan_domain
            ]
        else:
            cmd = [command, "--usecache=%s" % usecache, "--quiet", scan_domain]

        raw = utils.scan(cmd)
        if raw:
            data = json.loads(raw)

            # if SSL Labs gave us back an error response, cache this
            # as an invalid entry.
            if len(data) < 1:
                utils.write(utils.invalid({'response': data}), cache)
                return None

            # we only give ssllabs-scan one at a time,
            # so we can de-pluralize this
            data = data[0]

            # if SSL Labs had an error hitting the site, cache this
            # as an invalid entry.
            if data["status"] == "ERROR":
                utils.write(utils.invalid(data), cache)
                return None

            utils.write(utils.json_for(data), cache)
        else:
            return None
            # raise Exception("Invalid data from ssllabs-scan: %s" % raw)

    # can return multiple rows, one for each 'endpoint'
    for endpoint in data['endpoints']:

        # this meant it couldn't connect to the endpoint
        if not endpoint.get("grade"):
            continue

        sslv3 = False
        tlsv12 = False
        for protocol in endpoint['details']['protocols']:
            if ((protocol['name'] == "SSL")
                    and (protocol['version'] == '3.0')):
                sslv3 = True
            if ((protocol['name'] == "TLS")
                    and (protocol['version'] == '1.2')):
                tlsv12 = True

        spdy = False
        h2 = False
        npn = endpoint['details'].get('npnProtocols', None)
        if npn:
            spdy = ("spdy" in npn)
            h2 = ("h2" in npn)

        yield [
            endpoint['grade'], endpoint['details']['cert']['sigAlg'],
            endpoint['details']['key']['alg'],
            endpoint['details']['key']['size'],
            endpoint['details']['forwardSecrecy'],
            endpoint['details']['ocspStapling'],
            endpoint['details'].get('fallbackScsv',
                                    "N/A"), endpoint['details']['supportsRc4'],
            sslv3, tlsv12, spdy, endpoint['details']['sniRequired'], h2
        ]
Ejemplo n.º 28
0
def scan(domain, options):
    logging.debug("[%s][tls]" % domain)

    # If inspection data exists, check to see if we can skip.
    inspection = utils.data_for(domain, "inspect")
    if inspection and (not inspection.get("support_https")):
        logging.debug("\tSkipping, HTTPS not supported in inspection.")
        yield None

    else:
        # cache reformatted JSON from ssllabs
        cache = utils.cache_path(domain, "tls")

        force = options.get("force", False)

        if (force is False) and (os.path.exists(cache)):
            logging.debug("\tCached.")
            raw = open(cache).read()
            data = json.loads(raw)

            if data.get('invalid'):
                return None
        else:
            logging.debug("\t %s %s" % (command, domain))

            usecache = str(not force).lower()

            if options.get("debug"):
                cmd = [command, "--usecache=%s" % usecache,
                       "--verbosity=debug", domain]
            else:
                cmd = [command, "--usecache=%s" % usecache,
                       "--quiet", domain]
            raw = utils.scan(cmd)
            if raw:
                data = json.loads(raw)

                # we only give ssllabs-scan one at a time,
                # so we can de-pluralize this
                data = data[0]

                # if SSL Labs had an error hitting the site, cache this
                # as an invalid entry.
                if data["status"] == "ERROR":
                    utils.write(utils.invalid(data), cache)
                    return None

                utils.write(utils.json_for(data), cache)
            else:
                return None
                # raise Exception("Invalid data from ssllabs-scan: %s" % raw)

        # can return multiple rows, one for each 'endpoint'
        for endpoint in data['endpoints']:

            # this meant it couldn't connect to the endpoint
            if not endpoint.get("grade"):
                continue

            sslv3 = False
            tlsv12 = False
            for protocol in endpoint['details']['protocols']:
                if ((protocol['name'] == "SSL") and
                        (protocol['version'] == '3.0')):
                    sslv3 = True
                if ((protocol['name'] == "TLS") and
                        (protocol['version'] == '1.2')):
                    tlsv12 = True

            spdy = False
            h2 = False
            npn = endpoint['details'].get('npnProtocols', None)
            if npn:
                spdy = ("spdy" in npn)
                h2 = ("h2-" in npn)

            def ccs_map(n):
                return {
                    -1: "N/A (Error)",
                    0: "N/A (Unknown)",
                    1: "No (not vulnerable)",
                    2: "No (not exploitable)",
                    3: "Yes"
                }[n]

            def fs_map(n):
                return {
                    0: "0 - No",
                    1: "1 - Some",
                    2: "2 - Modern",
                    4: "3 - Robust"
                }[n]

            yield [
                endpoint['grade'],
                endpoint['details']['cert']['sigAlg'],
                endpoint['details']['key']['alg'],
                endpoint['details']['key']['size'],
                fs_map(endpoint['details']['forwardSecrecy']),
                endpoint['details']['ocspStapling'],
                endpoint['details'].get('fallbackScsv', "N/A"),
                endpoint['details'].get('freak'),
                ccs_map(endpoint['details']['openSslCcs']),
                sslv3,
                tlsv12,
                spdy,
                endpoint['details']['sniRequired'],
                h2
            ]
Ejemplo n.º 29
0
def gather(suffixes, options, extra={}):

    # Returns a parsed, processed Google service credentials object.
    credentials = load_credentials()

    if credentials is None:
        logging.warn("No BigQuery credentials provided.")
        logging.warn(
            "Set BIGQUERY_CREDENTIALS or BIGQUERY_CREDENTIALS_PATH environment variables."
        )
        exit(1)

    # When using this form of instantiation, the client won't pull
    # the project_id out of the creds, has to be set explicitly.
    client = bigquery.Client(project=credentials.project_id,
                             credentials=credentials)

    # Allow override of default timeout (in seconds).
    timeout = int(options.get("timeout", default_timeout))

    # Construct the query.
    query = query_for(suffixes)
    logging.debug("Censys query:\n%s\n" % query)

    # Plan to store in cache/censys/export.csv.
    download_path = utils.cache_path("export", "censys", ext="csv")

    # Reuse of cached data can be turned on with --cache.
    cache = options.get("cache", False)
    if (cache is True) and os.path.exists(download_path):
        logging.warn("Using cached download data.")

    # But by default, fetch new data from the BigQuery API,
    # and write it to the expected download location.
    else:
        logging.warn("Kicking off SQL query job.")

        rows = None

        # Actually execute the query.
        try:
            # Executes query and loads all results into memory.
            query_job = client.query(query)
            iterator = query_job.result(timeout=timeout)
            rows = list(iterator)
        except google.api_core.exceptions.Forbidden:
            logging.warn("Access denied to Censys' BigQuery tables.")
        except:
            logging.warn(utils.format_last_exception())
            logging.warn("Error talking to BigQuery, aborting.")

        # At this point, the job is complete and we need to download
        # the resulting CSV URL in results_url.
        logging.warn("Caching results of SQL query.")

        download_file = open(download_path, 'w', newline='')
        download_writer = csv.writer(download_file)
        download_writer.writerow(["Domain"])  # will be skipped on read

        # Parse the rows and write them out as they were returned (dupes
        # and all), to be de-duped by the central gathering script.
        for row in rows:
            domains = row['common_name'] + row['dns_names']
            for domain in domains:
                download_writer.writerow([domain])

        # End CSV writing.
        download_file.close()

    # Whether we downloaded it fresh or not, read from the cached data.
    for domain in utils.load_domains(download_path):
        if domain:
            yield domain
Ejemplo n.º 30
0
def scan(domain, options):
    logging.debug("[%s][tls]" % domain)

    # If inspection data exists, check to see if we can skip.
    inspection = utils.data_for(domain, "inspect")
    if inspection and (not inspection.get("support_https")):
        logging.debug("\tSkipping, HTTPS not supported in inspection.")
        return None

    else:
        # cache reformatted JSON from ssllabs
        cache = utils.cache_path(domain, "tls")

        force = options.get("force", False)

        if (force is False) and (os.path.exists(cache)):
            logging.debug("\tCached.")
            raw = open(cache).read()
            data = json.loads(raw)

            if data.get("invalid"):
                return None
        else:
            logging.debug("\t %s %s" % (command, domain))

            usecache = str(not force).lower()

            if options.get("debug"):
                cmd = [command, "--usecache=%s" % usecache, "--verbosity=debug", domain]
            else:
                cmd = [command, "--usecache=%s" % usecache, "--quiet", domain]
            raw = utils.scan(cmd)
            if raw:
                data = json.loads(raw)

                # we only give ssllabs-scan one at a time,
                # so we can de-pluralize this
                data = data[0]

                # if SSL Labs had an error hitting the site, cache this
                # as an invalid entry.
                if data["status"] == "ERROR":
                    utils.write(utils.invalid(data), cache)
                    return None

                utils.write(utils.json_for(data), cache)
            else:
                return None
                # raise Exception("Invalid data from ssllabs-scan: %s" % raw)

        # can return multiple rows, one for each 'endpoint'
        for endpoint in data["endpoints"]:

            # this meant it couldn't connect to the endpoint
            if not endpoint.get("grade"):
                continue

            sslv3 = False
            tlsv12 = False
            for protocol in endpoint["details"]["protocols"]:
                if (protocol["name"] == "SSL") and (protocol["version"] == "3.0"):
                    sslv3 = True
                if (protocol["name"] == "TLS") and (protocol["version"] == "1.2"):
                    tlsv12 = True

            spdy = False
            h2 = False
            npn = endpoint["details"].get("npnProtocols", None)
            if npn:
                spdy = "spdy" in npn
                h2 = "h2-" in npn

            yield [
                endpoint["grade"],
                endpoint["details"]["cert"]["sigAlg"],
                endpoint["details"]["key"]["alg"],
                endpoint["details"]["key"]["size"],
                endpoint["details"]["forwardSecrecy"],
                endpoint["details"]["ocspStapling"],
                endpoint["details"].get("fallbackScsv", "N/A"),
                endpoint["details"]["supportsRc4"],
                sslv3,
                tlsv12,
                spdy,
                endpoint["details"]["sniRequired"],
                h2,
            ]