def delete_record_set(self, name, record_type): """ Deletes a record set Args: name (str): The DNS name (i.e. the fully-qualified domain name) record_type (str): The DNS record type Raises: gcpdns.RecordSetNotFound """ logger.info("Deleting record set: {0} {1}".format(name, record_type)) tld = publicsuffix2.get_public_suffix(name).lower() if tld in ZONE_CACHE: zone = ZONE_CACHE[tld] else: zone = self.get_zone(tld) ZONE_CACHE[tld] = zone name = "{0}{1}".format( name.lower().rstrip(".").replace(zone.dns_name.rstrip("."), ""), zone.dns_name).lstrip(".") record_type = record_type.upper() record_to_delete = None change = zone.changes() records = zone.list_resource_record_sets() for record in records: if record.name == name and record.record_type == record_type: record_to_delete = record if record_to_delete is not None: change.delete_record_set(record_to_delete) change.create() else: raise RecordSetNotFound("Record set not found: {0} {1}".format( name, record_type))
def handleValue(value: str) -> str: out = None try: out = get_public_suffix(value.strip()) except Exception as e: print(f'Failed to parse value: {value} > {e}') return out
def get_domain(hostname: str) -> str: """ Get the first private part of a hostname after the public suffix, but with exceptions where it's known that different hosts have different owners. There's one current exception to the PSL: masto.host. """ # Note: `publicsuffix2.get_public_suffix` really should have been called `get_private_suffix`. # `get_public_suffix('example.com') sounds like it'd return `com`, # but actually returns `example.com`. private_suffix = publicsuffix2.get_public_suffix(hostname) if private_suffix in _multi_user_domains: return hostname return private_suffix
def get_base_domain(domain, use_fresh_psl=False): """ Gets the base domain name for the given domain .. note:: Results are based on a list of public domain suffixes at https://publicsuffix.org/list/public_suffix_list.dat. Args: domain (str): A domain or subdomain use_fresh_psl (bool): Download a fresh Public Suffix List Returns: str: The base domain of the given domain """ psl_path = os.path.join(tempdir, "public_suffix_list.dat") def download_psl(): url = "https://publicsuffix.org/list/public_suffix_list.dat" # Use a browser-like user agent string to bypass some proxy blocks headers = {"User-Agent": USER_AGENT} try: fresh_psl = requests.get(url, headers=headers).text with open(psl_path, "w", encoding="utf-8") as fresh_psl_file: fresh_psl_file.write(fresh_psl) except Exception as error: raise DownloadError( "Failed to download an updated PSL {0}".format(error)) if use_fresh_psl: if not os.path.exists(psl_path): download_psl() else: psl_age = datetime.now() - datetime.fromtimestamp( os.stat(psl_path).st_mtime) if psl_age > timedelta(hours=24): download_psl() with open(psl_path, encoding="utf-8") as psl_file: psl = publicsuffix2.PublicSuffixList(psl_file) return psl.get_public_suffix(domain) else: return publicsuffix2.get_public_suffix(domain)
def main(): with open('mx_records.txt', 'r') as f: domains = dict() for line in f: line = line.replace("{", '').replace('}', '').strip() name_servers = line.split(',') for name_server in name_servers: dom = publicsuffix2.get_public_suffix(name_server) if domains.__contains__(dom): domains[dom] += 1 else: domains[dom] = 1 od = collections.OrderedDict(sorted(domains.items())) with open('dnstats2.csv', 'w') as out: for dom in od: out.write("{},{},\n".format(dom, od.get(dom))) print(dom, od.get(dom))
def test_get_public_suffix_from_builtin_full_publicsuffix_org_using_func(self): assert 'com' == publicsuffix.get_public_suffix('COM') assert 'example.com' == publicsuffix.get_public_suffix('example.COM') assert 'example.com' == publicsuffix.get_public_suffix('WwW.example.COM')
def request(self, **kwargs): """The main request processor. This code implements all rendering of metadata. """ if not self.ready: raise HTTPError(503, _("Service Unavailable (repository loading)")) pfx = kwargs.get('pfx', None) path = kwargs.get('path', None) content_type = kwargs.get('content_type', None) request_type = kwargs.get('request_type', "negotiate") log.debug("MDServer pfx=%s, path=%s, content_type=%s" % (pfx, path, content_type)) def _d(x, do_split=True): dot = six.u('.') if x is not None: x = x.strip() # log.debug("_d(%s,%s)" % (x, do_split)) if x is None or len(x) == 0: return None, None if x.startswith("{base64}"): x = safe_b64d(x[8:]) if isinstance(x, six.binary_type): x = x.decode() if do_split and dot in x: (pth, _, extn) = x.rpartition(dot) if extn in _ctypes: return pth, extn return x, None _ctypes = { 'xml': 'application/xml', 'json': 'application/json', 'htm': 'text/html', 'html': 'text/html', 'ds': 'text/html', 's': 'application/json' } alias = None if pfx: alias = pfx pfx = self.aliases.get(alias, None) if pfx is None: raise NotFound() path, ext = _d(path, content_type is None) if pfx and path: q = "{%s}%s" % (pfx, path) path = "/%s/%s" % (alias, path) else: q = path if ext is not None: log.debug("request path: %s.%s, headers: %s" % (path, ext, cherrypy.request.headers)) else: log.debug("request path: %s, headers: %s" % (path, cherrypy.request.headers)) accept = {} if content_type is None: if ext is not None and ext in _ctypes: accept = {_ctypes[ext]: True} else: accept = MDServer.MediaAccept() if ext is not None: path = "%s.%s" % (path, ext) else: accept = {content_type: True} with self.lock.readlock: if ext == 'ds': pdict = dict() entity_id = kwargs.get('entityID', None) if entity_id is None: raise HTTPError(400, _("400 Bad Request - missing entityID")) e = self.md.store.lookup(entity_id) if e is None or len(e) == 0: raise HTTPError(404) if len(e) > 1: raise HTTPError( 400, _("Bad Request - multiple matches for") + " %s" % entity_id) pdict['entity'] = entity_simple_summary(e[0]) if not path: pdict['search'] = "/search/" pdict['list'] = "/role/idp.json" else: pdict['search'] = "{}.s".format(escape(path, quote=True)) pdict['list'] = "{}.json".format(escape(path, quote=True)) pdict['storage'] = "/storage/" cherrypy.response.headers['Content-Type'] = 'text/html' return render_template(config.ds_template, **pdict) elif ext == 's': query = kwargs.get('query', None) entity_filter = kwargs.get('entity_filter', None) related = kwargs.get('related', None) cherrypy.response.headers['Content-Type'] = 'application/json' cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' if query is None: log.debug("empty query - creating one") query = [cherrypy.request.remote.ip] referrer = cherrypy.request.headers.get('referrer', None) if referrer is not None: log.debug("including referrer: %s" % referrer) url = urlparse(referrer) host = url.netloc if ':' in url.netloc: (host, port) = url.netloc.split(':') for host_part in host.rstrip( get_public_suffix(host)).split('.'): if host_part is not None and len(host_part) > 0: query.append(host_part) log.debug("created query: %s" % ",".join(query)) return dumps( self.md.store.search(query, path=q, entity_filter=entity_filter, related=related)) elif accept.get('text/html'): if not q: if pfx: title = pfx else: title = _("Metadata By Attributes") return render_template("index.html", md=self.md, samlmd=samlmd, alias=alias, aliases=self.aliases, title=title) else: entities = self.md.lookup(q) if not entities: raise NotFound() if len(entities) > 1: return render_template("metadata.html", md=self.md, samlmd=samlmd, subheading=q, entities=entities) else: entity = entities[0] return render_template( "entity.html", headline=entity_display_name(entity), subheading=entity.get('entityID'), entity_id=entity.get('entityID'), samlmd=samlmd, entity=entity_info(entity)) else: for p in self.plumbings: state = { 'request': request_type, 'headers': { 'Content-Type': 'text/xml' }, 'accept': accept, 'url': cherrypy.url(relative=False), 'select': q, 'path': path, 'stats': {} } r = p.process(self.md, state=state) if r is not None: cache_ttl = state.get('cache', 0) log.debug("caching for %d seconds" % cache_ttl) for k, v in list(state.get('headers', {}).items()): cherrypy.response.headers[k] = v cherrypy.response.headers[ 'Access-Control-Allow-Origin'] = '*' caching.expires(secs=cache_ttl) return r raise NotFound()
def test_get_public_suffix_from_builtin_full_publicsuffix_org_using_func( self): assert 'com' == publicsuffix.get_public_suffix('COM') assert 'example.com' == publicsuffix.get_public_suffix('example.COM') assert 'example.com' == publicsuffix.get_public_suffix( 'WwW.example.COM')
def create_or_replace_record_set(self, name, record_type, data, ttl=DEFAULT_TTL, replace=False): """ Adds or replaces a DNS resource record set Args: name (str): The DNS name (i.e. the fully-qualified domain name) record_type (str): The DNS record type data: A list of resource record data strings, or a string of one or more resource records separated by | ttl (int): DNS time to live (in seconds) replace (bool): Replace existing record set if needed Raises: gcpdns.ExistingRecordSetFound """ tld = publicsuffix2.get_public_suffix(name).lower() if tld in ZONE_CACHE: zone = ZONE_CACHE[tld] else: zone = self.get_zone(tld) ZONE_CACHE[tld] = zone name = "{0}{1}".format( name.lower().rstrip(".").replace(zone.dns_name.rstrip("."), ""), zone.dns_name).lstrip(".") record_type = record_type.upper() if ttl is None: ttl = DEFAULT_TTL ttl = int(ttl) old_record_set = None change = zone.changes() for r_set in zone.list_resource_record_sets(): if r_set.name == name and r_set.record_type == record_type: old_record_set = r_set if not replace: raise ExistingRecordSetFound( "Existing record set found: {0} {1} {2} {3}".format( r_set.name, r_set.record_type, r_set.ttl, r_set.rrdatas)) change.delete_record_set(r_set) if type(data) == str: if record_type == "CNAME": data = "{0}.".format(data.rstrip(".")) data = [data] elif record_type == "TXT": new_data = [] data = data.split("|") for r_set in data: r_set = QUOTE_SPACE.sub("", r_set) split_txt = textwrap.wrap(r_set, 253) for i in range(len(split_txt)): split_txt[i] = '"{0}"'.format(split_txt[i]) new_data.append("".join(split_txt)) data = new_data.copy() else: data = data.split("|") if record_type in ["CNAME", "MX", "NS", "PTR", "SRV"]: for i in range(len(data)): data[i] = "{0}.".format(data[i].rstrip(".")) if old_record_set is None: logging.info("Adding record set: {0} {1} {2} {3}".format( name, record_type, ttl, data)) else: logging.info("Replacing record set: {0} {1} {2} {3} " "with: {4} {5} {6} {7}".format( old_record_set.name, old_record_set.record_type, old_record_set.ttl, old_record_set.rrdatas, name, record_type, ttl, data)) r_set = zone.resource_record_set(name, record_type, ttl, rrdatas=data) change.add_record_set(r_set) change.create()