def get_chrome_preload_list(options): preload_cache = utils.cache_single("preload-list.json") preload_json = None if (not options.get("force", False)) and os.path.exists(preload_cache): logging.debug("Using cached Chrome preload list.") preload_json = json.loads(open(preload_cache).read()) else: logging.debug("Fetching Chrome preload list from source...") preload_list_url = 'https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json' preload_list_url_as_text = preload_list_url + '?format=text' with urllib.request.urlopen(preload_list_url_as_text) as response: raw = response.read() # To avoid parsing the contents of the file out of the source tree viewer's # HTML, we download it as a raw file. googlesource.com Base64-encodes the # file to avoid potential content injection issues, so we need to decode it # before using it. https://code.google.com/p/gitiles/issues/detail?id=7 raw = base64.b64decode(raw).decode('utf-8') # The .json file contains '//' comments, which are not actually valid JSON, # and confuse Python's JSON decoder. Begone, foul comments! raw = ''.join([re.sub(r'^\s*//.*$', '', line) for line in raw.splitlines()]) preload_json = json.loads(raw) utils.write(utils.json_for(preload_json), preload_cache) return {entry['name'] for entry in preload_json['entries']}
def get_chrome_preload_list(options): preload_cache = utils.cache_single("preload-list.json") preload_json = None if (not options.get("force", False)) and os.path.exists(preload_cache): logging.debug("Using cached Chrome preload list.") preload_json = json.loads(open(preload_cache).read()) else: logging.debug("Fetching Chrome preload list from source...") preload_list_url = 'https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json' preload_list_url_as_text = preload_list_url + '?format=text' with urllib.request.urlopen(preload_list_url_as_text) as response: raw = response.read() # To avoid parsing the contents of the file out of the source tree viewer's # HTML, we download it as a raw file. googlesource.com Base64-encodes the # file to avoid potential content injection issues, so we need to decode it # before using it. https://code.google.com/p/gitiles/issues/detail?id=7 raw = base64.b64decode(raw).decode('utf-8') # The .json file contains '//' comments, which are not actually valid JSON, # and confuse Python's JSON decoder. Begone, foul comments! raw = ''.join( [re.sub(r'^\s*//.*$', '', line) for line in raw.splitlines()]) preload_json = json.loads(raw) utils.write(utils.json_for(preload_json), preload_cache) return {entry['name'] for entry in preload_json['entries']}
def scan(domain, options): logging.debug("[%s][pshtt]" % domain) # cache output from pshtt cache_pshtt = utils.cache_path(domain, "pshtt", ext="json") force = options.get("force", False) data = None if (force is False) and (os.path.exists(cache_pshtt)): logging.debug("\tCached.") raw = open(cache_pshtt).read() data = json.loads(raw) if (data.__class__ is dict) and data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, domain)) flags = "--json --user-agent \"%s\" --timeout %i --preload-cache %s" % ( user_agent, timeout, preload_cache) # Only useful when debugging interaction between projects. # flags = "%s --debug" % flags # Give the Python shell environment a pyenv environment. pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare. # I don't think this tool's threat model includes untrusted CSV, either. raw = utils.unsafe_execute("%s && %s %s %s" % (pyenv_init, command, domain, flags)) if not raw: utils.write(utils.invalid({}), cache_pshtt) logging.warn("\tBad news scanning, sorry!") return None data = json.loads(raw) utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt")) # pshtt scanner uses JSON arrays, even for single items data = data[0] row = [] for field in headers: value = data[field] # TODO: Fix this upstream if (field != "HSTS Header") and (field != "HSTS Max Age") and ( field != "Redirect To"): if value is None: value = False row.append(value) yield row
def scan(domain, options): logging.debug("[%s][analytics]" % domain) logging.debug("\tChecking file.") data = {'participating': (domain in analytics_domains)} cache = utils.cache_path(domain, "analytics") utils.write(utils.json_for(data), cache) yield [data['participating']]
def scan(domain, options): logging.debug("[%s][pageload]" % domain) inspection = utils.data_for(domain, "inspect") # If we have data from inspect, skip if it's not a live domain. if inspection and (not inspection.get("up")): logging.debug("\tSkipping, domain not reachable during inspection.") return None # If we have data from inspect, skip if it's just a redirector. if inspection and (inspection.get("redirect") is True): logging.debug( "\tSkipping, domain seen as just a redirector during inspection.") return None # phantomas needs a URL, not just a domain. if not (domain.startswith('http://') or domain.startswith('https://')): # If we have data from inspect, use the canonical endpoint. if inspection and inspection.get("canonical"): url = inspection.get("canonical") # Otherwise, well, whatever. else: url = 'http://' + domain else: url = domain # We'll cache prettified JSON from the output. cache = utils.cache_path(domain, "pageload") # If we've got it cached, use that. if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None # If no cache, or we should run anyway, do the scan. else: logging.debug("\t %s %s --reporter=json --ignore-ssl-errors" % (command, url)) raw = utils.scan( [command, url, "--reporter=json", "--ignore-ssl-errors"]) if not raw: utils.write(utils.invalid({}), cache) return None # It had better be JSON, which we can cache in prettified form. data = json.loads(raw) utils.write(utils.json_for(data), cache) yield [data['metrics'][metric] for metric in interesting_metrics]
def scan(domain, options): logging.debug("[%s][pshtt]" % domain) # cache output from pshtt cache_pshtt = utils.cache_path(domain, "pshtt", ext="json") force = options.get("force", False) data = None if (force is False) and (os.path.exists(cache_pshtt)): logging.debug("\tCached.") raw = open(cache_pshtt).read() data = json.loads(raw) if (data.__class__ is dict) and data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, domain)) flags = "--json --user-agent \"%s\" --timeout %i --preload-cache %s" % (user_agent, timeout, preload_cache) # Only useful when debugging interaction between projects. # flags = "%s --debug" % flags # Give the Python shell environment a pyenv environment. pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare. # I don't think this tool's threat model includes untrusted CSV, either. raw = utils.unsafe_execute("%s && %s %s %s" % (pyenv_init, command, domain, flags)) if not raw: utils.write(utils.invalid({}), cache_pshtt) logging.warn("\tBad news scanning, sorry!") return None data = json.loads(raw) utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt")) # pshtt scanner uses JSON arrays, even for single items data = data[0] row = [] for field in headers: value = data[field] # TODO: Fix this upstream if (field != "HSTS Header") and (field != "HSTS Max Age") and (field != "Redirect To"): if value is None: value = False row.append(value) yield row
def scan(domain, options): logging.debug("[%s][analytics]" % domain) logging.debug("\tChecking file.") data = { 'participating': (domain in analytics_domains) } cache = utils.cache_path(domain, "analytics") utils.write(utils.json_for(data), cache) yield [data['participating']]
def scan(domain, options): logging.debug("[%s][pageload]" % domain) inspection = utils.data_for(domain, "inspect") # If we have data from inspect, skip if it's not a live domain. if inspection and (not inspection.get("up")): logging.debug("\tSkipping, domain not reachable during inspection.") return None # If we have data from inspect, skip if it's just a redirector. if inspection and (inspection.get("redirect") is True): logging.debug("\tSkipping, domain seen as just a redirector during inspection.") return None # phantomas needs a URL, not just a domain. if not (domain.startswith('http://') or domain.startswith('https://')): # If we have data from inspect, use the canonical endpoint. if inspection and inspection.get("canonical"): url = inspection.get("canonical") # Otherwise, well, whatever. else: url = 'http://' + domain else: url = domain # We'll cache prettified JSON from the output. cache = utils.cache_path(domain, "pageload") # If we've got it cached, use that. if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None # If no cache, or we should run anyway, do the scan. else: logging.debug("\t %s %s --reporter=json --ignore-ssl-errors" % (command, url)) raw = utils.scan([command, url, "--reporter=json", "--ignore-ssl-errors"]) if not raw: utils.write(utils.invalid({}), cache) return None # It had better be JSON, which we can cache in prettified form. data = json.loads(raw) utils.write(utils.json_for(data), cache) yield [data['metrics'][metric] for metric in interesting_metrics]
def scan(domain, options): logging.debug("[%s][pshtt]" % domain) # cache output from pshtt cache_pshtt = utils.cache_path(domain, "pshtt", ext="json") force = options.get("force", False) data = None if (force is False) and (os.path.exists(cache_pshtt)): logging.debug("\tCached.") raw = open(cache_pshtt).read() data = json.loads(raw) if (data.__class__ is dict) and data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, domain)) raw = utils.scan([ command, domain, '--json', '--user-agent', '\"%s\"' % user_agent, '--timeout', str(timeout), '--preload-cache', preload_cache ]) if not raw: utils.write(utils.invalid({}), cache_pshtt) logging.warn("\tBad news scanning, sorry!") return None data = json.loads(raw) utils.write(utils.json_for(data), utils.cache_path(domain, "pshtt")) # pshtt scanner uses JSON arrays, even for single items data = data[0] row = [] for field in headers: value = data[field] # TODO: Fix this upstream if (field != "HSTS Header") and (field != "HSTS Max Age") and (field != "Redirect To"): if value is None: value = False row.append(value) yield row
def handler(event, context): start_time = utils.local_now() domain = event.get('domain') options = event.get('options') name = event.get('scanner') environment = event.get('environment') # Log all sent events, for the record. utils.configure_logging(options) logging.warn(event) # Might be acceptable to let this crash the module, in Lambda. try: scanner = importlib.import_module("scanners.%s" % name) except ImportError: exc_type, exc_value, exc_traceback = sys.exc_info() logging.error( "[%s] Scanner not found, or had an error during loading.\n\tERROR: %s\n\t%s" % (name, exc_type, exc_value)) exit(1) # ? # Same method call as when run locally. data = scanner.scan(domain, environment, options) # We capture start and end times locally as well, but it's # useful to know the start/end from Lambda's vantage point. end_time = utils.local_now() duration = end_time - start_time response = { 'lambda': { 'log_group_name': context.log_group_name, 'log_stream_name': context.log_stream_name, 'request_id': context.aws_request_id, 'memory_limit': context.memory_limit_in_mb, 'start_time': start_time, 'end_time': end_time, 'measured_duration': duration }, 'data': data } # Serialize and re-parse the JSON, so that we run our own # date transform functions in one place, before Amazon's built-in # JSON serialization prepares the data for transport. return utils.from_json(utils.json_for(response))
def scan(domain, options): logging.debug("[%s][trustymail]" % domain) # cache output from pshtt cache_trustymail = utils.cache_path(domain, "trustymail", ext="json") force = options.get("force", False) if (force is False) and (os.path.exists(cache_trustymail)): logging.debug("\tCached.") raw = open(cache_trustymail).read() data = json.loads(raw) if (data.__class__ is dict) and data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, domain)) raw = utils.scan([ command, domain, '--json', '--timeout', str(timeout), ]) if not raw: utils.write(utils.invalid({}), cache_trustymail) logging.warn("\tBad news scanning, sorry!") return None data = json.loads(raw) utils.write(utils.json_for(data), utils.cache_path(domain, "trustymail")) # trustymail scanner follows pshtt in using JSON arrays, even for single items data = data[0] row = [] for field in headers: value = data[field] row.append(value) yield row
def check_wildcard(subdomain, options): wildcard = wildcard_for(subdomain) cache = utils.cache_path(subdomain, "subdomains") if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tDNS info cached.") raw = open(cache).read() data = json.loads(raw) else: logging.debug("\t dig +short '%s'" % wildcard) raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard) if raw_wild == "": raw_wild = None raw_self = None else: logging.debug("\t dig +short '%s'" % subdomain) raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain) if raw_wild: parsed_wild = raw_wild.split("\n") parsed_wild.sort() else: parsed_wild = None if raw_self: parsed_self = raw_self.split("\n") parsed_self.sort() else: parsed_self = None data = {'response': {'wild': parsed_wild, 'itself': parsed_self}} utils.write( utils.json_for(data), cache ) return data['response']
def check_wildcard(subdomain, options): wildcard = wildcard_for(subdomain) cache = utils.cache_path(subdomain, "subdomains") if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tDNS info cached.") raw = open(cache).read() data = json.loads(raw) else: logging.debug("\t dig +short '%s'" % wildcard) raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard) if raw_wild == "": raw_wild = None raw_self = None else: logging.debug("\t dig +short '%s'" % subdomain) raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain) if raw_wild: parsed_wild = raw_wild.split("\n") parsed_wild.sort() else: parsed_wild = None if raw_self: parsed_self = raw_self.split("\n") parsed_self.sort() else: parsed_self = None data = {'response': {'wild': parsed_wild, 'itself': parsed_self}} utils.write(utils.json_for(data), cache) return data['response']
def scan(domain, options): logging.debug("[%s][sslyze]" % domain) # Optional: skip domains which don't support HTTPS in prior inspection inspection = utils.data_for(domain, "inspect") if inspection and (not inspection.get("support_https")): logging.debug("\tSkipping, HTTPS not supported in inspection.") return None # Optional: if inspect data says canonical endpoint uses www and this domain # doesn't have it, add it. if inspection and (inspection.get("canonical_endpoint") == "www") and (not domain.startswith("www.")): scan_domain = "www.%s" % domain else: scan_domain = domain # cache XML from sslyze cache_xml = utils.cache_path(domain, "sslyze", ext="xml") # because sslyze manages its own output (can't yet print to stdout), # we have to mkdir_p the path ourselves utils.mkdir_p(os.path.dirname(cache_xml)) force = options.get("force", False) if (force is False) and (os.path.exists(cache_xml)): logging.debug("\tCached.") xml = open(cache_xml).read() else: logging.debug("\t %s %s" % (command, domain)) # use scan_domain (possibly www-prefixed) to do actual scan raw = utils.scan([command, "--regular", "--quiet", scan_domain, "--xml_out=%s" % cache_xml], env=command_env) if raw is None: # TODO: save standard invalid XML data...? logging.warn("\tBad news scanning, sorry!") return None xml = utils.scan(["cat", cache_xml]) if not xml: logging.warn("\tBad news reading XML, sorry!") return None utils.write(xml, cache_xml) data = parse_sslyze(xml) if data is None: logging.warn("\tNo valid target for scanning, couldn't connect.") return None utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze")) yield [ data['protocols']['sslv2'], data['protocols']['sslv3'], data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'], data['protocols']['tlsv1.2'], data['config'].get('any_dhe'), data['config'].get('all_dhe'), data['config'].get('weakest_dh'), data['config'].get('any_rc4'), data['config'].get('all_rc4'), data['config'].get('ocsp_stapling'), data['certs'].get('key_type'), data['certs'].get('key_length'), data['certs'].get('leaf_signature'), data['certs'].get('any_sha1'), data['certs'].get('not_before'), data['certs'].get('not_after'), data['certs'].get('served_issuer'), data.get('errors') ]
def network_check(subdomain, endpoint, options): cache = utils.cache_path(subdomain, "subdomains") wildcard = wildcard_for(subdomain) if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tDNS and content cached.") raw = open(cache).read() data = json.loads(raw) # Hit DNS and HTTP. else: # HTTP content: just use curl. # # Turn on --insecure because we want to see the content even at sites # where the certificate isn't right or proper. logging.debug("\t curl --silent --insecure %s" % endpoint) content = utils.scan(["curl", "--silent", "--insecure", endpoint]) # DNS content: just use dig. # # Not awesome - uses an unsafe shell execution of `dig` to look up DNS, # as I couldn't figure out a way to get "+short" to play nice with # the more secure execution methods available to me. Since this system # isn't expected to process untrusted input, this should be okay. logging.debug("\t dig +short '%s'" % wildcard) raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard) if raw_wild == "": raw_wild = None raw_self = None else: logging.debug("\t dig +short '%s'" % subdomain) raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain) if raw_wild: parsed_wild = raw_wild.split("\n") parsed_wild.sort() else: parsed_wild = None if raw_self: parsed_self = raw_self.split("\n") parsed_self.sort() else: parsed_self = None # Cache HTTP and DNS data to disk. data = { 'response': { 'content': content, 'wildcard_dns': parsed_wild, 'self_dns': parsed_self } } if (parsed_wild) and (parsed_wild == parsed_self): data['response']['matched_wild'] = True else: data['response']['matched_wild'] = False utils.write(utils.json_for(data), cache) return data['response']
def paginated_mode(suffix, options, uid, api_key): # Cache hostnames in a dict for de-duping. hostnames_map = {} certificate_api = certificates.CensysCertificates(uid, api_key) if 'query' in options and options['query']: query = options['query'] else: query = "parsed.subject.common_name:\"%s\" or parsed.extensions.subject_alt_name.dns_names:\"%s\"" % ( suffix, suffix) logging.debug("Censys query:\n%s\n" % query) # time to sleep between requests (defaults to 5s) delay = int(options.get("delay", 5)) # Censys page size, fixed page_size = 100 # Start page defaults to 1. start_page = int(options.get("start", 1)) # End page defaults to whatever the API says is the last one. end_page = options.get("end", None) if end_page is None: end_page = get_end_page(query, certificate_api) if end_page is None: logging.warn("Error looking up number of pages.") exit(1) else: end_page = int(end_page) max_records = ((end_page - start_page) + 1) * page_size fields = [ "parsed.subject.common_name", "parsed.extensions.subject_alt_name.dns_names" ] current_page = start_page logging.warn("Fetching up to %i records, starting at page %i." % (max_records, start_page)) last_cached = False force = options.get("force", False) while current_page <= end_page: if (not last_cached) and (current_page > start_page): logging.debug("(Waiting %is before fetching page %i.)" % (delay, current_page)) last_cached = False time.sleep(delay) logging.debug("Fetching page %i." % current_page) cache_page = utils.cache_path(str(current_page), "censys") if (force is False) and (os.path.exists(cache_page)): logging.warn("\t[%i] Cached page." % current_page) last_cached = True certs_raw = open(cache_page).read() certs = json.loads(certs_raw) if (certs.__class__ is dict) and certs.get('invalid'): continue else: try: certs = list( certificate_api.search(query, fields=fields, page=current_page, max_records=page_size)) utils.write(utils.json_for(certs), cache_page) except censys.base.CensysException: logging.warn(utils.format_last_exception()) logging.warn("Censys error, skipping page %i." % current_page) utils.write(utils.invalid({}), cache_page) continue except: logging.warn(utils.format_last_exception()) logging.warn("Unexpected error, skipping page %i." % current_page) utils.write(utils.invalid({}), cache_page) exit(1) for cert in certs: # Common name + SANs names = cert.get('parsed.subject.common_name', []) + cert.get( 'parsed.extensions.subject_alt_name.dns_names', []) logging.debug(names) for name in names: hostnames_map[sanitize_name(name)] = None current_page += 1 logging.debug("Done fetching from API.") return hostnames_map
def scan(domain, options): logging.debug("[%s][sslyze]" % domain) # Optional: skip domains which don't support HTTPS in prior inspection if utils.domain_doesnt_support_https(domain): logging.debug("\tSkipping, HTTPS not supported in inspection.") return None # Optional: if pshtt data says canonical endpoint uses www and this domain # doesn't have it, add it. if utils.domain_uses_www(domain): scan_domain = "www.%s" % domain else: scan_domain = domain # cache XML from sslyze cache_xml = utils.cache_path(domain, "sslyze", ext="xml") # because sslyze manages its own output (can't yet print to stdout), # we have to mkdir_p the path ourselves utils.mkdir_p(os.path.dirname(cache_xml)) force = options.get("force", False) if (force is False) and (os.path.exists(cache_xml)): logging.debug("\tCached.") xml = open(cache_xml).read() else: logging.debug("\t %s %s" % (command, scan_domain)) # use scan_domain (possibly www-prefixed) to do actual scan # Give the Python shell environment a pyenv environment. pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare. # I don't think this tool's threat model includes untrusted CSV, either. raw = utils.unsafe_execute( "%s && %s --regular --quiet %s --xml_out=%s" % (pyenv_init, command, scan_domain, cache_xml)) if raw is None: # TODO: save standard invalid XML data...? logging.warn("\tBad news scanning, sorry!") return None xml = utils.scan(["cat", cache_xml]) if not xml: logging.warn("\tBad news reading XML, sorry!") return None utils.write(xml, cache_xml) data = parse_sslyze(xml) if data is None: logging.warn("\tNo valid target for scanning, couldn't connect.") return None utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze")) yield [ data['protocols']['sslv2'], data['protocols']['sslv3'], data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'], data['protocols']['tlsv1.2'], data['config'].get('any_dhe'), data['config'].get('all_dhe'), data['config'].get('weakest_dh'), data['config'].get('any_rc4'), data['config'].get('all_rc4'), data['config'].get('ocsp_stapling'), data['certs'].get('key_type'), data['certs'].get('key_length'), data['certs'].get('leaf_signature'), data['certs'].get('any_sha1'), data['certs'].get('not_before'), data['certs'].get('not_after'), data['certs'].get('served_issuer'), data.get('errors') ]
def scan(domain, options): logging.debug("[%s][tls]" % domain) # If inspection data exists, check to see if we can skip. if utils.domain_doesnt_support_https(domain): logging.debug("\tSkipping, HTTPS not supported in inspection.") return None # cache reformatted JSON from ssllabs cache = utils.cache_path(domain, "tls") # Optional: if pshtt data says canonical endpoint uses www and this domain # doesn't have it, add it. if utils.domain_uses_www(domain): scan_domain = "www.%s" % domain else: scan_domain = domain force = options.get("force", False) if (force is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, scan_domain)) usecache = str(not force).lower() if options.get("debug"): cmd = [command, "--usecache=%s" % usecache, "--verbosity=debug", scan_domain] else: cmd = [command, "--usecache=%s" % usecache, "--quiet", scan_domain] raw = utils.scan(cmd) if raw: data = json.loads(raw) # if SSL Labs gave us back an error response, cache this # as an invalid entry. if len(data) < 1: utils.write(utils.invalid({'response': data}), cache) return None # we only give ssllabs-scan one at a time, # so we can de-pluralize this data = data[0] # if SSL Labs had an error hitting the site, cache this # as an invalid entry. if data["status"] == "ERROR": utils.write(utils.invalid(data), cache) return None utils.write(utils.json_for(data), cache) else: return None # raise Exception("Invalid data from ssllabs-scan: %s" % raw) # can return multiple rows, one for each 'endpoint' for endpoint in data['endpoints']: # this meant it couldn't connect to the endpoint if not endpoint.get("grade"): continue sslv3 = False tlsv12 = False for protocol in endpoint['details']['protocols']: if ((protocol['name'] == "SSL") and (protocol['version'] == '3.0')): sslv3 = True if ((protocol['name'] == "TLS") and (protocol['version'] == '1.2')): tlsv12 = True spdy = False h2 = False npn = endpoint['details'].get('npnProtocols', None) if npn: spdy = ("spdy" in npn) h2 = ("h2" in npn) yield [ endpoint['grade'], endpoint['details']['cert']['sigAlg'], endpoint['details']['key']['alg'], endpoint['details']['key']['size'], endpoint['details']['forwardSecrecy'], endpoint['details']['ocspStapling'], endpoint['details'].get('fallbackScsv', "N/A"), endpoint['details']['supportsRc4'], sslv3, tlsv12, spdy, endpoint['details']['sniRequired'], h2 ]
def scan(domain, options): logging.debug("[%s][tls]" % domain) # If inspection data exists, check to see if we can skip. inspection = utils.data_for(domain, "inspect") if inspection and (not inspection.get("support_https")): logging.debug("\tSkipping, HTTPS not supported in inspection.") yield None else: # cache reformatted JSON from ssllabs cache = utils.cache_path(domain, "tls") force = options.get("force", False) if (force is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, domain)) usecache = str(not force).lower() if options.get("debug"): cmd = [command, "--usecache=%s" % usecache, "--verbosity=debug", domain] else: cmd = [command, "--usecache=%s" % usecache, "--quiet", domain] raw = utils.scan(cmd) if raw: data = json.loads(raw) # we only give ssllabs-scan one at a time, # so we can de-pluralize this data = data[0] # if SSL Labs had an error hitting the site, cache this # as an invalid entry. if data["status"] == "ERROR": utils.write(utils.invalid(data), cache) return None utils.write(utils.json_for(data), cache) else: return None # raise Exception("Invalid data from ssllabs-scan: %s" % raw) # can return multiple rows, one for each 'endpoint' for endpoint in data['endpoints']: # this meant it couldn't connect to the endpoint if not endpoint.get("grade"): continue sslv3 = False tlsv12 = False for protocol in endpoint['details']['protocols']: if ((protocol['name'] == "SSL") and (protocol['version'] == '3.0')): sslv3 = True if ((protocol['name'] == "TLS") and (protocol['version'] == '1.2')): tlsv12 = True spdy = False h2 = False npn = endpoint['details'].get('npnProtocols', None) if npn: spdy = ("spdy" in npn) h2 = ("h2-" in npn) def ccs_map(n): return { -1: "N/A (Error)", 0: "N/A (Unknown)", 1: "No (not vulnerable)", 2: "No (not exploitable)", 3: "Yes" }[n] def fs_map(n): return { 0: "0 - No", 1: "1 - Some", 2: "2 - Modern", 4: "3 - Robust" }[n] yield [ endpoint['grade'], endpoint['details']['cert']['sigAlg'], endpoint['details']['key']['alg'], endpoint['details']['key']['size'], fs_map(endpoint['details']['forwardSecrecy']), endpoint['details']['ocspStapling'], endpoint['details'].get('fallbackScsv', "N/A"), endpoint['details'].get('freak'), ccs_map(endpoint['details']['openSslCcs']), sslv3, tlsv12, spdy, endpoint['details']['sniRequired'], h2 ]
def scan(domain, options): logging.debug("[%s][third_parties]" % domain) # Default timeout is 15s, too little. timeout = int(options.get("timeout", 60)) # If we have data from pshtt, skip if it's not a live domain. if utils.domain_not_live(domain): logging.debug("\tSkipping, domain not reachable during inspection.") return None # If we have data from pshtt, skip if it's just a redirector. if utils.domain_is_redirect(domain): logging.debug( "\tSkipping, domain seen as just an external redirector during inspection." ) return None # phantomas needs a URL, not just a domain. if not (domain.startswith('http://') or domain.startswith('https://')): # If we have data from pshtt, use the canonical endpoint. if utils.domain_canonical(domain): url = utils.domain_canonical(domain) # Otherwise, well, whatever. else: url = 'http://' + domain else: url = domain # calculated_domain = re.sub("https?:\/\/", "", url) # We'll cache prettified JSON from the output. cache = utils.cache_path(domain, "third_parties") # If we've got it cached, use that. if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None # If no cache, or we should run anyway, do the scan. else: logging.debug( "\t %s %s --modules=domains --reporter=json --timeout=%i --ignore-ssl-errors" % (command, url, timeout)) raw = utils.scan([ command, url, "--modules=domains", "--reporter=json", "--timeout=%i" % timeout, "--ignore-ssl-errors" ], allowed_return_codes=[252]) if not raw: utils.write(utils.invalid({}), cache) return None # It had better be JSON, which we can cache in prettified form. data = json.loads(raw) utils.write(utils.json_for(data), cache) services = services_for(data, domain, options) # Convert to CSV row known_names = list(known_services.keys()) known_names.sort() known_matches = [ 'Yes' if host in services['known'] else 'No' for host in known_names ] yield [ len(services['external']), len(services['internal']), services['external_requests'], services['internal_requests'], serialize(services['external']), serialize(services['internal']), # services['affiliated'], # services['unknown'] ] + known_matches
def gather(suffix, options): # Register a (free) Censys.io account to get a UID and API key. uid = options.get("censys_id", None) api_key = options.get("censys_key", None) if (uid is None) or (api_key is None): uid = os.environ.get("CENSYS_UID", None) api_key = os.environ.get("CENSYS_API_KEY", None) if (uid is None) or (api_key is None): logging.warn( "No Censys credentials set. API key required to use the Censys API." ) exit(1) certificate_api = certificates.CensysCertificates(uid, api_key) query = "parsed.subject.common_name:\"%s\" or parsed.extensions.subject_alt_name.dns_names:\"%s\"" % ( suffix, suffix) logging.debug("Censys query:\n%s\n" % query) # Hostnames beginning with a wildcard prefix will have the prefix stripped. wildcard_pattern = re.compile("^\*\.") redacted_pattern = re.compile("^(\?\.)+") # time to sleep between requests (defaults to 5s) delay = int(options.get("delay", 5)) # Censys page size, fixed page_size = 100 # Start page defaults to 1. start_page = int(options.get("start", 1)) # End page defaults to whatever the API says is the last one. end_page = options.get("end", None) if end_page is None: end_page = get_end_page(query, certificate_api) if end_page is None: logging.warn("Error looking up number of pages.") exit(1) else: end_page = int(end_page) max_records = ((end_page - start_page) + 1) * page_size # Cache hostnames in a dict for de-duping. hostnames_map = {} fields = [ "parsed.subject.common_name", "parsed.extensions.subject_alt_name.dns_names" ] current_page = start_page logging.warn("Fetching up to %i records, starting at page %i." % (max_records, start_page)) last_cached = False force = options.get("force", False) while current_page <= end_page: if (not last_cached) and (current_page > start_page): logging.debug("(Waiting %is before fetching page %i.)" % (delay, current_page)) last_cached = False time.sleep(delay) logging.debug("Fetching page %i." % current_page) cache_page = utils.cache_path(str(current_page), "censys") if (force is False) and (os.path.exists(cache_page)): logging.warn("\t[%i] Cached page." % current_page) last_cached = True certs_raw = open(cache_page).read() certs = json.loads(certs_raw) if (certs.__class__ is dict) and certs.get('invalid'): continue else: try: certs = list( certificate_api.search(query, fields=fields, page=current_page, max_records=page_size)) utils.write(utils.json_for(certs), cache_page) except censys.base.CensysException: logging.warn(utils.format_last_exception()) logging.warn("Censys error, skipping page %i." % current_page) utils.write(utils.invalid({}), cache_page) continue except: logging.warn(utils.format_last_exception()) logging.warn("Unexpected error, skipping page %i." % current_page) utils.write(utils.invalid({}), cache_page) exit(1) for cert in certs: # Common name + SANs names = cert.get('parsed.subject.common_name', []) + cert.get( 'parsed.extensions.subject_alt_name.dns_names', []) logging.debug(names) for name in names: # Strip off any wildcard prefix. name = re.sub(wildcard_pattern, '', name).lower().strip() # Strip off any redacted ? prefixes. (Ugh.) name = re.sub(redacted_pattern, '', name).lower().strip() hostnames_map[name] = None current_page += 1 logging.debug("Done fetching from API.") # Iterator doesn't buy much efficiency, since we paginated already. # Necessary evil to de-dupe before returning hostnames, though. for hostname in hostnames_map.keys(): yield hostname
def scan(domain, options): logging.debug("[%s][tls]" % domain) # If inspection data exists, check to see if we can skip. inspection = utils.data_for(domain, "inspect") if inspection and (not inspection.get("support_https")): logging.debug("\tSkipping, HTTPS not supported in inspection.") return None else: # cache reformatted JSON from ssllabs cache = utils.cache_path(domain, "tls") force = options.get("force", False) if (force is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get("invalid"): return None else: logging.debug("\t %s %s" % (command, domain)) usecache = str(not force).lower() if options.get("debug"): cmd = [command, "--usecache=%s" % usecache, "--verbosity=debug", domain] else: cmd = [command, "--usecache=%s" % usecache, "--quiet", domain] raw = utils.scan(cmd) if raw: data = json.loads(raw) # we only give ssllabs-scan one at a time, # so we can de-pluralize this data = data[0] # if SSL Labs had an error hitting the site, cache this # as an invalid entry. if data["status"] == "ERROR": utils.write(utils.invalid(data), cache) return None utils.write(utils.json_for(data), cache) else: return None # raise Exception("Invalid data from ssllabs-scan: %s" % raw) # can return multiple rows, one for each 'endpoint' for endpoint in data["endpoints"]: # this meant it couldn't connect to the endpoint if not endpoint.get("grade"): continue sslv3 = False tlsv12 = False for protocol in endpoint["details"]["protocols"]: if (protocol["name"] == "SSL") and (protocol["version"] == "3.0"): sslv3 = True if (protocol["name"] == "TLS") and (protocol["version"] == "1.2"): tlsv12 = True spdy = False h2 = False npn = endpoint["details"].get("npnProtocols", None) if npn: spdy = "spdy" in npn h2 = "h2-" in npn yield [ endpoint["grade"], endpoint["details"]["cert"]["sigAlg"], endpoint["details"]["key"]["alg"], endpoint["details"]["key"]["size"], endpoint["details"]["forwardSecrecy"], endpoint["details"]["ocspStapling"], endpoint["details"].get("fallbackScsv", "N/A"), endpoint["details"]["supportsRc4"], sslv3, tlsv12, spdy, endpoint["details"]["sniRequired"], h2, ]
def network_check(subdomain, endpoint, options): cache = utils.cache_path(subdomain, "subdomains") wildcard = wildcard_for(subdomain) if (options.get("force", False) is False) and (os.path.exists(cache)): logging.debug("\tDNS and content cached.") raw = open(cache).read() data = json.loads(raw) # Hit DNS and HTTP. else: # HTTP content: just use curl. # # Turn on --insecure because we want to see the content even at sites # where the certificate isn't right or proper. logging.debug("\t curl --silent --insecure %s" % endpoint) content = utils.scan(["curl", "--silent", "--insecure", endpoint]) # DNS content: just use dig. # # Not awesome - uses an unsafe shell execution of `dig` to look up DNS, # as I couldn't figure out a way to get "+short" to play nice with # the more secure execution methods available to me. Since this system # isn't expected to process untrusted input, this should be okay. logging.debug("\t dig +short '%s'" % wildcard) raw_wild = utils.unsafe_execute("dig +short '%s'" % wildcard) if raw_wild == "": raw_wild = None raw_self = None else: logging.debug("\t dig +short '%s'" % subdomain) raw_self = utils.unsafe_execute("dig +short '%s'" % subdomain) if raw_wild: parsed_wild = raw_wild.split("\n") parsed_wild.sort() else: parsed_wild = None if raw_self: parsed_self = raw_self.split("\n") parsed_self.sort() else: parsed_self = None # Cache HTTP and DNS data to disk. data = {'response': { 'content': content, 'wildcard_dns': parsed_wild, 'self_dns': parsed_self }} if (parsed_wild) and (parsed_wild == parsed_self): data['response']['matched_wild'] = True else: data['response']['matched_wild'] = False utils.write(utils.json_for(data), cache) return data['response']
def scan(domain, options): logging.debug("[%s][tls]" % domain) # If pshtt data exists, check to see if we can skip. if utils.domain_doesnt_support_https(domain): logging.debug("\tSkipping, HTTPS not supported.") return None # cache reformatted JSON from ssllabs cache = utils.cache_path(domain, "tls") # Optional: if pshtt data says canonical endpoint uses www and this domain # doesn't have it, add it. if utils.domain_uses_www(domain): scan_domain = "www.%s" % domain else: scan_domain = domain force = options.get("force", False) if (force is False) and (os.path.exists(cache)): logging.debug("\tCached.") raw = open(cache).read() data = json.loads(raw) if data.get('invalid'): return None else: logging.debug("\t %s %s" % (command, scan_domain)) usecache = str(not force).lower() if options.get("debug"): cmd = [ command, "--usecache=%s" % usecache, "--verbosity=debug", scan_domain ] else: cmd = [command, "--usecache=%s" % usecache, "--quiet", scan_domain] raw = utils.scan(cmd) if raw: data = json.loads(raw) # if SSL Labs gave us back an error response, cache this # as an invalid entry. if len(data) < 1: utils.write(utils.invalid({'response': data}), cache) return None # we only give ssllabs-scan one at a time, # so we can de-pluralize this data = data[0] # if SSL Labs had an error hitting the site, cache this # as an invalid entry. if data["status"] == "ERROR": utils.write(utils.invalid(data), cache) return None utils.write(utils.json_for(data), cache) else: return None # raise Exception("Invalid data from ssllabs-scan: %s" % raw) # can return multiple rows, one for each 'endpoint' for endpoint in data['endpoints']: # this meant it couldn't connect to the endpoint if not endpoint.get("grade"): continue sslv3 = False tlsv12 = False for protocol in endpoint['details']['protocols']: if ((protocol['name'] == "SSL") and (protocol['version'] == '3.0')): sslv3 = True if ((protocol['name'] == "TLS") and (protocol['version'] == '1.2')): tlsv12 = True spdy = False h2 = False npn = endpoint['details'].get('npnProtocols', None) if npn: spdy = ("spdy" in npn) h2 = ("h2" in npn) yield [ endpoint['grade'], endpoint['details']['cert']['sigAlg'], endpoint['details']['key']['alg'], endpoint['details']['key']['size'], endpoint['details']['forwardSecrecy'], endpoint['details']['ocspStapling'], endpoint['details'].get('fallbackScsv', "N/A"), endpoint['details']['supportsRc4'], sslv3, tlsv12, spdy, endpoint['details']['sniRequired'], h2 ]
def scan(domain, options): logging.debug("[%s][sslyze]" % domain) # Optional: skip domains which don't support HTTPS in prior inspection if utils.domain_doesnt_support_https(domain): logging.debug("\tSkipping, HTTPS not supported in inspection.") return None # Optional: if pshtt data says canonical endpoint uses www and this domain # doesn't have it, add it. if utils.domain_uses_www(domain): scan_domain = "www.%s" % domain else: scan_domain = domain # cache XML from sslyze cache_xml = utils.cache_path(domain, "sslyze", ext="xml") # because sslyze manages its own output (can't yet print to stdout), # we have to mkdir_p the path ourselves utils.mkdir_p(os.path.dirname(cache_xml)) force = options.get("force", False) if (force is False) and (os.path.exists(cache_xml)): logging.debug("\tCached.") xml = open(cache_xml).read() else: logging.debug("\t %s %s" % (command, scan_domain)) # use scan_domain (possibly www-prefixed) to do actual scan # Give the Python shell environment a pyenv environment. pyenv_init = "eval \"$(pyenv init -)\" && pyenv shell %s" % pyenv_version # Really un-ideal, but calling out to Python2 from Python 3 is a nightmare. # I don't think this tool's threat model includes untrusted CSV, either. raw = utils.unsafe_execute("%s && %s --regular --quiet %s --xml_out=%s" % (pyenv_init, command, scan_domain, cache_xml)) if raw is None: # TODO: save standard invalid XML data...? logging.warn("\tBad news scanning, sorry!") return None xml = utils.scan(["cat", cache_xml]) if not xml: logging.warn("\tBad news reading XML, sorry!") return None utils.write(xml, cache_xml) data = parse_sslyze(xml) if data is None: logging.warn("\tNo valid target for scanning, couldn't connect.") return None utils.write(utils.json_for(data), utils.cache_path(domain, "sslyze")) yield [ data['protocols']['sslv2'], data['protocols']['sslv3'], data['protocols']['tlsv1.0'], data['protocols']['tlsv1.1'], data['protocols']['tlsv1.2'], data['config'].get('any_dhe'), data['config'].get('all_dhe'), data['config'].get('weakest_dh'), data['config'].get('any_rc4'), data['config'].get('all_rc4'), data['config'].get('ocsp_stapling'), data['certs'].get('key_type'), data['certs'].get('key_length'), data['certs'].get('leaf_signature'), data['certs'].get('any_sha1'), data['certs'].get('not_before'), data['certs'].get('not_after'), data['certs'].get('served_issuer'), data.get('errors') ]