def _RecordSetFromZoneRecord(name, rdset, origin, api_version='v1'): """Returns the Cloud DNS ResourceRecordSet for the given zone file record. Args: name: Name, Domain name of the zone record. rdset: Rdataset, The zone record object. origin: Name, The origin domain of the zone file. api_version: [str], the api version to use for creating the records. Returns: The ResourceRecordSet equivalent for the given zone record, or None for unsupported record types. """ if GetRdataTranslation(rdset.rdtype) is None: return None messages = core_apis.GetMessagesModule('dns', api_version) record_set = messages.ResourceRecordSet() # Need to assign kind to default value for useful equals comparisons. record_set.kind = record_set.kind record_set.name = name.derelativize(origin).to_text() record_set.ttl = rdset.ttl record_set.type = rdatatype.to_text(rdset.rdtype) rdatas = [] for rdata in rdset: rdatas.append(GetRdataTranslation(rdset.rdtype)(rdata, origin)) record_set.rrdatas = rdatas return record_set
def _create_records(self, context, zone_id, dnspython_zone): """ Creates the records """ for record_name in dnspython_zone.nodes.keys(): for rdataset in dnspython_zone.nodes[record_name]: record_type = rdatatype.to_text(rdataset.rdtype) if record_type == 'SOA': continue # Create the recordset values = { 'domain_id': zone_id, 'name': record_name.to_text(), 'type': record_type, } recordset = self.central_api.create_recordset( context, zone_id, RecordSet(**values)) for rdata in rdataset: if (record_type == 'NS' and record_name == dnspython_zone.origin): # Don't create NS records for the domain, they've been # taken care of as servers pass else: # Everything else, including delegation NS, gets # created values = self._record2json(record_type, rdata) self.central_api.create_record( context, zone_id, recordset['id'], Record(**values))
def _create_records(self, context, zone_id, dnspython_zone): """ Creates the records """ for record_name in dnspython_zone.nodes.keys(): for rdataset in dnspython_zone.nodes[record_name]: record_type = rdatatype.to_text(rdataset.rdtype) if record_type == 'SOA': continue # Create the recordset values = { 'domain_id': zone_id, 'name': record_name.to_text(), 'type': record_type, } recordset = central_api.create_recordset( context, zone_id, values) for rdata in rdataset: if (record_type == 'NS' and record_name == dnspython_zone.origin): # Don't create NS records for the domain, they've been # taken care of as servers pass else: # Everything else, including delegation NS, gets # created values = self._record2json(record_type, rdata) central_api.create_record(context, zone_id, recordset['id'], values)
def _create_records(self, context, zone_id, dnspython_zone): """Creates the records""" for record_name in dnspython_zone.nodes.keys(): for rdataset in dnspython_zone.nodes[record_name]: record_type = rdatatype.to_text(rdataset.rdtype) if (record_type == 'NS') or (record_type == 'SOA'): # Don't create SOA or NS recordsets, as they are # created automatically when a domain is # created pass else: # Create the other recordsets values = { 'domain_id': zone_id, 'name': record_name.to_text(), 'type': record_type } recordset = self.central_api.create_recordset( context, zone_id, RecordSet(**values)) for rdata in rdataset: if (record_type == 'NS') or (record_type == 'SOA'): pass else: # Everything else, including delegation NS, gets # created values = self._record2json(record_type, rdata) self.central_api.create_record( context, zone_id, recordset['id'], Record(**values))
def _RecordSetFromZoneRecord(name, rdset, origin): """Returns the Cloud DNS ResourceRecordSet for the given zone file record. Args: name: Name, Domain name of the zone record. rdset: Rdataset, The zone record object. origin: Name, The origin domain of the zone file. Returns: The ResourceRecordSet equivalent for the given zone record, or None for unsupported record types. """ if rdset.rdtype not in RDATA_TRANSLATIONS: return None record_set = messages.ResourceRecordSet() # Need to assign kind to default value for useful equals comparisons. record_set.kind = record_set.kind record_set.name = name.derelativize(origin).to_text() record_set.ttl = rdset.ttl record_set.type = rdatatype.to_text(rdset.rdtype) rdatas = [] for rdata in rdset: rdatas.append(RDATA_TRANSLATIONS[rdset.rdtype](rdata, origin)) record_set.rrdatas = rdatas return record_set
def _write_resp(self, dgram, rtime, ttype, csv): if csv: print >>csv, '%s,%s,%s,%s,%s,%.3f,%s,%d,%d,%s' % (self.service.name, self.address, \ str(dgram.question[0].name), dt.to_text(dgram.question[0].rdtype), ttype, \ rtime * 1000, dc.to_text(dgram.rcode()), len(dgram.answer), \ dgram.answer[0].ttl if len(dgram.answer) > 0 else 0, \ '|'.join([str(rd) for rd in self._get_rdata(dgram)]))
def _create_records(self, context, zone_id, dnspython_zone): """ Creates the records """ for record_name in dnspython_zone.nodes.keys(): for rdataset in dnspython_zone.nodes[record_name]: record_type = rdatatype.to_text(rdataset.rdtype) if record_type == "SOA": continue # Create the recordset values = {"domain_id": zone_id, "name": record_name.to_text(), "type": record_type} recordset = self.central_api.create_recordset(context, zone_id, values) for rdata in rdataset: if record_type == "NS" and record_name == dnspython_zone.origin: # Don't create NS records for the domain, they've been # taken care of as servers pass else: # Everything else, including delegation NS, gets # created values = self._record2json(record_type, rdata) self.central_api.create_record(context, zone_id, recordset["id"], values)
def __prepare_records_update_dict(self, node): update_dict = defaultdict(list) for rdataset in node: for rdata in rdataset: option_name = (record_name_format % rdatatype.to_text(rdata.rdtype).lower()) update_dict[option_name].append(unicode(rdata.to_text())) return update_dict
def __prepare_records_update_dict(self, node): update_dict = defaultdict(list) for rdataset in node: for rdata in rdataset: option_name = (record_name_format % rdatatype.to_text( rdata.rdtype).lower()) update_dict[option_name].append(unicode(rdata.to_text())) return update_dict
def from_rdata(rdata, ttl=300): if not isinstance(rdata, dns.rdata.Rdata): raise TypeError('Invalid rdata'); rtype = rdt.to_text(rdata.rdtype); rclass = rdc.to_text(rdata.rdclass); rec = Record(rtype, rclass, ttl); rec.from_rdata(rdata, ttl); return rec
def format_answer(key, **kwargs): rtype, rdata = key return "[h1]{}:[/h1] {} [h3]({})[/h3]".format( # Record type rdatatype.to_text(rtype), # Unescape strings in TXT records etc rdata.decode('string_escape') if "\\" in rdata else rdata, # TTL, and any other value ", ".join( [str(kwargs.pop("TTL", ""))] + ["{}: {}".format(k, v) for k, v in kwargs.items()]))
def records_list_from_node(cls, name, node): records = [] for rdataset in node: for rd in rdataset: records.append( '{name} {ttl} {rdclass} {rdtype} {rdata}'.format( name=name.ToASCII(), ttl=rdataset.ttl, rdclass=rdataclass.to_text(rd.rdclass), rdtype=rdatatype.to_text(rd.rdtype), rdata=rd.to_text())) return records
def records_list_from_node(cls, name, node): records = [] for rdataset in node: for rd in rdataset: records.append( u'{name} {ttl} {rdclass} {rdtype} {rdata}'.format( name=name.ToASCII(), ttl=rdataset.ttl, rdclass=rdataclass.to_text(rd.rdclass), rdtype=rdatatype.to_text(rd.rdtype), rdata=rd.to_text() ) ) return records
def resolve_name(name): _log.debug("DNS resolve for %s", name) if libnet.is_ipaddr(name): name = reversename.from_address(name) msg = message.make_query(name, "ANY") resp = query.tcp(msg, _config["nameserver"]) dnslines = [] for rr in resp.answer: rrtype = rdatatype.to_text(rr.rdtype) if rrtype in _config["dns_rr_types"]: if rrtype != "TXT": dnslines += ["{} {} {}".format(name, rrtype, str(r) if rrtype != "TXT" else str(r).tolower()) for r in rr] dnslines.sort() result = "\n".join(dnslines) return result
def get_rrs_from_rrsets(rrsets): """This works for answer, authority, and additional rrsets""" rr_list = [] for rrset in rrsets: common_rr_dict = { "Name": str(rrset.name), "Type": rdatatype.to_text(rrset.rdtype), "Class": rdataclass.to_text(rrset.rdclass), "TTL": rrset.ttl # TODO: doesn't each rr have it's own ttl? } for rr in rrset: rr_dict = common_rr_dict.copy() rr_dict.update(get_record_specific_answer_fields(rr)) rr_list.append(rr_dict) return rr_list
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) # Create the other recordsets values = {"name": rname.to_text(), "type": record_type} if rdataset.ttl != 0: values["ttl"] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) # Create the other recordsets values = {'name': rname.to_text(), 'type': record_type} if rdataset.ttl != 0L: values['ttl'] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def compare_section(self, other, my_rrsets, other_rrsets, ttl_tolerance, verbose, side_by_side): """ Compare the RRsets in one section to the RRsets in another section """ #count the number of different RR's based on any difference in the number of RRs in each RRset Same = Diff = 0 #now compare RRs from the two RRsets my_cmp_rrsets = my_rrsets[:] other_cmp_rrsets = other_rrsets[:] for my_rrset in my_rrsets: if my_rrset in other_cmp_rrsets: my_cmp_rrsets.remove(my_rrset) other_cmp_rrsets.remove(my_rrset) Same += len(my_rrset) Diff += len(my_cmp_rrsets) + len(other_cmp_rrsets) N = Same + Diff if N == Same and Diff == 0: # the RRsets match... ttls_match = True if ttl_tolerance >= 0: for my_rrset, other_rrset in zip(my_rrsets, other_rrsets): if abs(my_rrset.ttl - other_rrset.ttl) > ttl_tolerance: txt_rdtype = rdatatype.to_text(my_rrset.rdtype) if not side_by_side: self.report("Equal but TTL of %s rrset differs (%d!=%d), rtt=%d/%d" % (txt_rdtype, my_rrset.ttl, other_rrset.ttl, _ms(self.rtt_time), _ms(other.rtt_time), )) ttls_match = False break if side_by_side: if not ttls_match: self.report_side_by_side(my_rrsets, other_rrsets) else: if verbose: self.report("Equal (Size=%d, rtt=%d/%d)" % (N, _ms(self.rtt_time), _ms(other.rtt_time), )) else: if not ttls_match or verbose: self.report("Equal (Size=%d, rtt=%d/%d)" % (N, _ms(self.rtt_time), _ms(other.rtt_time), )) else: # the RRsets differ if side_by_side: self.report_side_by_side(my_rrsets, other_rrsets) else: self.report("Differ: Size=%d, Equal: %d, Differ: %d, rtt=%d/%d" % (N, Same, Diff, _ms(self.rtt_time), _ms(other.rtt_time)))
def from_file(self, filename): z = dns.zone.from_file(filename); origin = z.origin.to_text(); if origin[-1:] == '.': origin = origin[:-1]; if origin != self._zname: raise Exception('Invalid origin'); for name in z.nodes: rname = name.to_text(); self._records[rname] = {}; for rdataset in z.nodes[name].rdatasets: rtype = rdt.to_text(rdataset.rdtype); ttl = rdataset.ttl; self._records[rname][rtype] = []; for rdata in rdataset.items: rec = record.from_rdata(rdata, ttl); self.add_record(rname, rec); self._getSOA(); self._filename = filename;
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) # Create the other recordsets values = { 'name': rname.to_text().decode('utf-8'), 'type': record_type } if rdataset.ttl != 0: values['ttl'] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def _create_records(self, context, zone_id, dnspython_zone): """ Creates the records """ for record_name in dnspython_zone.nodes.keys(): for rdataset in dnspython_zone.nodes[record_name]: record_type = rdatatype.to_text(rdataset.rdtype) for rdata in rdataset: if record_type == 'SOA': # Don't create SOA records pass elif (record_type == 'NS' and record_name == dnspython_zone.origin): # Don't create NS records for the domain, they've been # taken care of as servers pass else: # Everything else, including delegation NS, gets # created values = self._record2json(record_type, rdata) values['name'] = record_name.to_text() central_api.create_record(context, zone_id, values)
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) name = rname.to_text() if six.PY3 and isinstance(name, bytes): name = name.decode('utf-8') # Create the other recordsets values = {'name': name, 'type': record_type} if rdataset.ttl != 0: values['ttl'] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def get_question(query): return { "Qname": str(query.qname), "Qtype": rdatatype.to_text(query.rdtype), "Qclass": rdataclass.to_text(query.rdclass) }
def sync_zone(domain_records_url, domain): # Synchronise zone print "\nSynchronising DNS zone for", domain, "..." # First get all the existing records existing_records = requests.get(domain_records_url + "?per_page=9999", headers=headers).json().get( 'domain_records', []) # Create an array to hold all the updated records updated_records = [] # Create an array to hold synchronised record IDs synced_record_ids = [] # Get the BIND raw DNS dump bindfile = bindfolder + domain + bindextension with open(bindfile, "r") as dns_file: dns_dump = dns_file.read() dns_dump = "$ORIGIN {0}.\n{1}".format(domain, dns_dump) zone = dns.zone.from_text(dns_dump) for name, node in zone.nodes.items(): name = str(name) print "\nRecord name:", name print "Qualified name:", qualifyName(name, domain) rdatasets = node.rdatasets for rset in rdatasets: print "--> TTL:", str(rset.ttl) print "--> Type:", rdatatype.to_text(rset.rdtype) for rdata in rset: data = None priority = None port = None weight = None if rset.rdtype == MX: priority = rdata.preference print "--> Priority:", priority if unicode(rdata.exchange) == "@": data = "%s." % (domain) else: data = "%s.%s." % (rdata.exchange, domain) elif rset.rdtype == CNAME: if unicode(rdata) == "@": data = "@" else: data = rdata.target elif rset.rdtype == A: data = rdata.address elif rset.rdtype == AAAA: data = rdata.address.lower() elif rset.rdtype == NS: data = rdata.target elif rset.rdtype == SRV: priority = rdata.priority weight = rdata.weight port = rdata.port data = rdata.target elif rset.rdtype == TXT: data = " ".join('"{0}"'.format(string) for string in rdata.strings) if data: print "--> Data:", data data = unicode(data) type = rdatatype.to_text(rset.rdtype) # Try and find an existing record record_id = None for record in existing_records: if type in ["CNAME", "MX", "NS", "SRV" ] and data[-1:] == ".": check_data = data[:-1] else: check_data = data if record['name'] == name and record[ 'type'] == type and record[ 'data'] == check_data: record_id = record['id'] synced_record_ids.append(record_id) break if record_id: print "--> Already exists, skipping" else: if type in ["CNAME", "MX", "NS", "SRV" ] and data != "@" and data[-1:] != ".": data = "{0}.{1}.".format(data, domain) post_data = { "type": type, "name": name, "data": data, "priority": priority, "port": port, "weight": weight } # Collect records to be updated into the updated_records array print "--> Queuing to update" updated_records.append(post_data) # Delete any records that exist with DigitalOcean that have been removed print "\nRemoving deleted records" for record in existing_records: if record['id'] not in synced_record_ids and record['type'] != 'SOA': response = requests.delete("{0}/{1}".format( domain_records_url, record["id"]), headers=headers) if response.status_code == 204: print "--> Deleted record", record["name"], "IN", record[ "type"], record["data"] else: handle_error(response) print "--> Done" # Finally, post the responses for the updated records print "\nPosting updated records" for record in updated_records: response = requests.post(domain_records_url, data=json.dumps(record), headers=headers).json() if 'domain_record' in response: print "--> Updated record", record["name"], "IN", record[ "type"], record["data"] else: handle_error(response) print "--> Done" print "\n--> Complete\n"
class JSONMapper(object): """Map Dnstap data to JSON. This particular implementation filters only client responses to A and AAAA queries, including CNAME chains. Chains are "ellipsed" in the middle if the estimated size of the resulting JSON blob is over MAX_BLOB. Since only Client Response type messages are processed you'll get better performance if you configure your DNS server to only send such messages. The expected specification for BIND in named.conf is: dnstap { client response; }; dnstap-output unix "/tmp/dnstap"; If you don't restrict the message type to client responses, a warning message will be printed for every new connection established. Subclassing to change Filtering or Output ----------------------------------------- filter() -- change packet selection Override filter() to change the packets which get processed further. Some changes can be accomplished by changing MESSAGE_TYPE or ACCEPTED_RECORDS instead. MESSAGE_TYPE -- dnstap.Message.TYPE_* Dnstap message type Changes to this should be coordinated with your nameserver configuration (discussed above). ACCEPTED_RECORDS -- query types This is the set of question (question rdata type or qtype) data types which are accepted. The default is A and AAAA. the constants are defined in dns.rdatatype' FIELDS -- change the output data This list is used to populate a map which is then JSONified. Each entry in the list is an instance of FieldMapping, which ties a JSON name to a function which can extract the appropriate data. """ # This should be safely below MTU, with the intent to avoid fragmentation. MAX_BLOB = 1024 MESSAGE_TYPE = dnstap.Message.TYPE_CLIENT_RESPONSE ACCEPTED_RECORDS = {rdatatype.A, rdatatype.AAAA} FIELDS = (FieldMapping('client', lambda self, p: str(p.field('query_address')[1])), FieldMapping( 'qtype', lambda self, p: rdatatype.to_text( p.field('response_message')[1].question[0].rdtype)), FieldMapping( 'status', lambda self, p: rcode.to_text( p.field('response_message')[1].rcode())), FieldMapping('chain', lambda self, p: self.build_resolution_chain(p))) def build_resolution_chain(self, packet): """Build the (CNAME) resolution chain with ellipsization. CNAMEs should only have one RR each, right? CNAME chains should be short, right? Yeah. Right. So, each element in the chain is actually a list, and the total length of all of the elements in the list of lists cannot exceed MAX_BLOB or we start taking chunks out of the middle to make it smaller. """ response = packet.field('response_message')[1] question = response.question[0].name.to_text().lower() # Deal with NXDOMAIN. if response.rcode() == rcode.NXDOMAIN: return [[question]] # Build a mapping of the rrsets. mapping = { rrset.name.to_text().lower(): rrset for rrset in response.answer } # Follow the question (CNAMEs) to an answer. names = [question] seen = set(names) chain = [[question]] while names: name = names.pop(0) if name in mapping: rr_values = [rr.to_text().lower() for rr in mapping[name]] if mapping[name].rdtype == rdatatype.CNAME: for rr in rr_values: if rr in seen: continue names.append(rr) seen.add(rr) chain.append(rr_values) # Ellipsize if it exceeds MAX_BLOB. lengths = [sum((len(name) for name in e)) for e in chain] if sum(lengths) > self.MAX_BLOB: logging.warn( 'Resolution chain for {} exceeds {}, ellipsizing.'.format( question, self.MAX_BLOB)) shortened = None while sum(lengths) > self.MAX_BLOB: if len(lengths) < 3: break shortened = int(len(lengths) / 2) del lengths[shortened] del chain[shortened] if shortened: chain.insert(shortened, ['(...)']) return chain def filter(self, packet): """Return True if the packet should be processed further.""" if packet.field('type')[1] != self.MESSAGE_TYPE: if self.performance_hint: logging.warn( 'PERFORMANCE HINT: Change your Dnstap config to restrict it to client response only.' ) self.performance_hint = False return False if packet.field('response_message' )[1].question[0].rdtype not in self.ACCEPTED_RECORDS: return False return True def map_fields(self, packet): """Maps all of the fields to their values.""" data = {} for field in self.FIELDS: field(data, self, packet) return data