def _iterate_records(self, zone): for rname, ttl, rdata in zone.iterate_rdatas(): name = rname.derelativize(origin=zone.origin) name = name.to_text(omit_final_dot=True) data = rdata.to_text(origin=zone.origin, relativize=False) yield name, ttl, dns.rdatatype.to_text(rdata.rdtype), data
def _iterate_records(self, zone): for rname, ttl, rdata in zone.iterate_rdatas(): name = rname.derelativize(origin=zone.origin) name = name.to_text(omit_final_dot=True).decode('utf-8') data = rdata.to_text(origin=zone.origin, relativize=False) yield name, ttl, dns.rdatatype.to_text(rdata.rdtype), data
def _iterate_records(self, domain): for rname, ttl, rdata in domain.iterate_rdatas(): name = rname.derelativize(origin=domain.origin) name = name.to_text(omit_final_dot=True) data = rdata.to_text(origin=domain.origin, relativize=False) yield name, ttl, dns.rdatatype.to_text(rdata.rdtype), data
def rec_to_abbrev_text(name, ttl, klass, rdata): ''' Translates a record to abbreviated text. For most records, this is the same as the to_text(); for others (such as RRSIG), it is truncated to attempt to fit on a single terminal line. :param str name: The owner name of the record. :param int ttl: The TTL for the record. :param int/str klass: The class of the record. :param obj rdata: The rdata of the record. :return: Text description of the record. ''' if isinstance(rdata, dns.rdtypes.ANY.RRSIG.RRSIG): # pylint: disable=protected-access sig = dns.rdata._base64ify(rdata.signature) rdata_txt = '%s %d %d %d %s %s %d %s %s' % ( dns.rdatatype.to_text(rdata.type_covered), rdata.algorithm, rdata.labels, rdata.original_ttl, dns.rdtypes.ANY.RRSIG.posixtime_to_sigtime(rdata.expiration), dns.rdtypes.ANY.RRSIG.posixtime_to_sigtime(rdata.inception), rdata.key_tag, rdata.signer, '%s...%s' % (sig[:6], sig[len(sig) - 6:])) else: rdata_txt = rdata.to_text(relativize=False) klass_txt = (isinstance(klass, (str, unicode)) and klass or dns.rdataclass.to_text(klass)) return '%s %d %s %s %s' % ( name, ttl, klass_txt, rdata.__class__.__name__, rdata_txt)
def __prepare_records_update_dict(self, node): update_dict = defaultdict(list) for rdataset in node: for rdata in rdataset: option_name = (record_name_format % rdatatype.to_text(rdata.rdtype).lower()) update_dict[option_name].append(unicode(rdata.to_text())) return update_dict
def __prepare_records_update_dict(self, node): update_dict = defaultdict(list) for rdataset in node: for rdata in rdataset: option_name = (record_name_format % rdatatype.to_text( rdata.rdtype).lower()) update_dict[option_name].append(unicode(rdata.to_text())) return update_dict
def _iterate_records(self, zone): for rname, ttl, rdata in zone.iterate_rdatas(): name = rname.derelativize(origin=zone.origin) name = name.to_text(omit_final_dot=True) if six.PY3 and isinstance(name, bytes): name = name.decode('utf-8') data = rdata.to_text(origin=zone.origin, relativize=False) yield name, ttl, dns.rdatatype.to_text(rdata.rdtype), data
def testInvalidDigestLength(self): # type: () -> None test_records = [] for rdata in [example_ds_sha1, example_ds_sha256, example_ds_sha384]: flags, digest = rdata.to_text().rsplit(' ', 1) # Make sure the construction is working dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS, f'{flags} {digest}') test_records.append(f'{flags} {digest[:len(digest)//2]}') # too short digest test_records.append(f'{flags} {digest*2}') # too long digest for record in test_records: with self.assertRaises(dns.exception.SyntaxError) as cm: dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS, record) self.assertEqual('digest length inconsistent with digest type', str(cm.exception))
def test_misc_escape(self): rdata = dns.rdata.from_text('in', 'svcb', '1 . alpn=\\010\\010') expected = '1 . alpn="\\010\\010"' self.assertEqual(rdata.to_text(), expected) with self.assertRaises(dns.exception.SyntaxError): dns.rdata.from_text('in', 'svcb', '1 . alpn=\\0') with self.assertRaises(dns.exception.SyntaxError): dns.rdata.from_text('in', 'svcb', '1 . alpn=\\00') with self.assertRaises(dns.exception.SyntaxError): dns.rdata.from_text('in', 'svcb', '1 . alpn=\\00q') with self.assertRaises(dns.exception.SyntaxError): dns.rdata.from_text('in', 'svcb', '1 . alpn=\\256') # This doesn't usually get exercised, so we do it directly. gp = dns.rdtypes.svcbbase.GenericParam.from_value('\\001\\002') expected = '"\\001\\002"' self.assertEqual(gp.to_text(), expected)
def generate_ptr(zone, relativize=False, zone_factory=Zone): """Generate PTR Zones for a given zone file and returns a dict with origin as key and the zone object as value. @param zone: the original zone object @type zone: dns.zone.Zone @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @raises KeyError: if zone parameter is not a Zone object. @rtype: dict """ if not isinstance(zone, Zone): raise KeyError("zone parameter must be a Zone object") # Deep copy to be able to change the encaplusated rdata object without # modifying the original Zone. soa = copy.deepcopy(zone.find_rdataset(zone.origin, dns.rdatatype.SOA)) # As SOA is a singleton type we can simply choose relativity # for the encapsulated rdata object for the first object in the set. soa.items[0].choose_relativity(origin=zone.origin, relativize=False) ptr_zones = {} for rdt in (dns.rdatatype.A, dns.rdatatype.AAAA): for name, rdataset in zone.iterate_rdatasets(rdtype=rdt): c_name = copy.copy(name).derelativize(zone.origin) c_rdataset = copy.deepcopy(rdataset) for rdata in c_rdataset: ptr_addr = dns.reversename.from_address(rdata.to_text()) ptr_origin = ptr_addr.parent() z = ptr_zones.setdefault( ptr_origin, zone_factory(ptr_origin, soa.rdclass, relativize=relativize)) z.replace_rdataset(ptr_origin, soa) n = z.get_node(ptr_addr, create=True) rd = dns.rdtypes.ANY.PTR.PTR(soa.rdclass, dns.rdatatype.PTR, c_name) rd.choose_relativity(ptr_origin, relativize) rds = n.get_rdataset(soa.rdclass, dns.rdatatype.PTR, rd.covers(), True) rds.add(rd) return ptr_zones
def threadCheck(self): # Hilo de proceso que busca y valida subdominios while(True): subdomain = None try: # Intenta obtener la siguiente línea del diccionario subdomain = self.dictionary['file-handler'].readline().strip().lower() # Aumenta el número de la línea actual (para imprimir el # progreso). self.dictionary['current-line'] += 1 except Exception as e: pass if(not subdomain): # No hay mas líneas. break # Compone el nombre de dominio completo a buscar hostname = subdomain.strip() + '.' + self.dictionary['hostname-base'] # Cantidad de reintentos actuales retries = 0 # Iteración de intentos while(True): # Informa el progreso actual self.context.out( message=( self.context.strings['methods']['dictionary']['progress-clear'] + '\n'.join(self.context.strings['methods']['dictionary']['progress']) ), parseDict={ 'hostname' : hostname, 'current-line' : "{:,}".format(self.dictionary['current-line']), 'total-lines' : "{:,}".format(self.dictionary['n-subdomains-in-file']), 'percent-lines' : "{:,.2f}".format(((self.dictionary['current-line'] * 100) / self.dictionary['n-subdomains-in-file'])) + '%', 'total-threads' : self.dictionary['max-threads'], 'total-retries' : self.dictionary['retries'] }, end='' ) # Si el nombre de dominio tiene una dirección IP válida es # porque existe. nsAnswer = None try: resolv = dns.resolver.Resolver() if(self.dictionary['nameservers']): resolv.nameservers = self.dictionary['nameservers'] # Crea una consulta DNS al registro A nsAnswer = resolv.query(hostname, 'A', tcp=True) if(not nsAnswer): # No hay una dirección IP asociada al nombre de dominio break # Busca en cada respuesta del registro A for rdata in nsAnswer: # Intenta obtener la dirección IP de la respuesta ip = rdata.to_text().strip('"') if(not ip): # No hay una dirección IP asociada a la respuesta continue # Agrega el subdominio encontrado a la pila global de # resultados. self.context.addHostName( hostname=hostname, messageFormat=( # Clear space of the buffer rogress self.context.strings['methods']['dictionary']['progress-clear'] + # Show the subdomain found self.context.strings['methods']['dictionary']['item-found'] + # Make space for the buffer progress self.context.strings['methods']['dictionary']['progress-pre'] ) ) # Detiene la búsqueda en las respuestas de la consulta # DNS. break # Detiene el while, no hay reintentos, todo está bien break except dns.resolver.NXDOMAIN: # No se encuentra el nombre de dominio break except dns.resolver.Timeout: # Hay que reintentar otra vez # Actualiza el contador de reintentos para el nombre de # dominio actual. retries += 1 # Actualiza el contador de reintentos globales self.dictionary['retries'] += 1 # ¿Se ha llegado al límite de reintentos? if(retries > self.dictionary['max-retries']): break except dns.exception.DNSException: # Error desconocido break except Exception as e: # Error desconocido break
def run(self): ''' Runs the HAxfr instance. @return tuple of (num_recs, soa_rdata, zone_hashvalue). @note A variety of exceptions can be thrown; caller should be prepared! ''' # Create master hasher. We'll use this to copy() individual record # hashers from, for speed (since hashlib.new() is not as fast as # directly hard-coding a specific algorithm, we'll do it just once): # master_hasher = hashlib.new(self._algorithm) bin_size = master_hasher.digest_size hex_size = bin_size * 2 # Initialize the hash accumulator: accum = '\x00' * bin_size # Initiate zone AXFR stream: xfr = dns.query.xfr(where=self._server, port=self._port, zone=self._zone, relativize=False, timeout=self._timeout, lifetime=self._lifetime) # Hash the zone. We'll pull messages from the xfr generator and # extract rrset's, rdataset's and ultimately rdata's from them. # We need to drill-down to individual rdata's because the higher-level # dnspython structures (such as rrset's and rdataset's) are collections # of records, whose orderings are random, thus making hashes of such # collections indeterminate: # soa_rdata = None num_recs = 0 for msg in xfr: for rrset in msg.answer: # Extract owner name from rrset: name = rrset.name # Convert rrset to rdataset: rdataset = rrset.to_rdataset() # Extract class, type and ttl from rdataset: rdclass = rdataset.rdclass rdtype = rdataset.rdtype if self._skipttl == True: ttl = 0 else: ttl = rdataset.ttl # For each rdata in the rdataset: for rdata in rdataset: # If this is the first record, it is the SOA, so save it # off, and don't process it (the AXFR will end with the # identical SOA so it will ultimately be included): # if soa_rdata is None: soa_rdata = rdata # It is not the first record; process it: else: num_recs = num_recs + 1 # Construct a "canonical" text-oriented representation # of the resource record that we'll hash (similar to # the BIND presentation format, except no origin, use # of FQDN's, and using single spaces to delimit fields). # We'll use this format (vs. others like wire format) # because it is comprehensible and easy to synthesize, # even in other environments: # rec = '%s %d %s %s %s' % ( name, ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(rdtype), rdata.to_text(relativize=False)) # Hash the resource record representation: hasher = master_hasher.copy() hasher.update(rec) hash = hasher.digest() # ADD the hash into the accumulator. We do this for # speed, vs. something like hashing the join()'d sorted # list of record hashes (which would require caching # and sorting every record hash value, increasing memory # consumption and processing time). Although the chosen # technique may not produce the "most strongly-hashed" # hash values, it should be sufficient for our purposes # (especially used with those hash algorithms that # produce larger values): # new_accum = cStringIO.StringIO() carry = 0 for i in xrange(bin_size - 1, -1, -1): byte_sum = ord(hash[i]) + ord(accum[i]) + carry byte = byte_sum % 256 carry = byte_sum // 256 new_accum.write(chr(byte)) accum = new_accum.getvalue()[::-1] # Dump the record/hash if specified: if self._dump and self._verbose: print '# %s <== "%s"' % (dns.rdata._hexify( hash, chunksize=hex_size), rec) elif self._dump: print '#', rec elif self._verbose: print '#', dns.rdata._hexify(hash, chunksize=hex_size) # Return the SOA rdata, and accumulated hash value as a hex string: return (num_recs, soa_rdata, dns.rdata._hexify(accum, chunksize=hex_size))
def threadCheck(self): while (True): subdomain = None try: # Try read the next line from dictionary subdomain = self.dictionary['file-handler'].readline().strip( ).lower() # Line count self.dictionary['current-line'] += 1 except Exception as e: pass if (not subdomain): # No more lines break # The full hostname hostname = subdomain.strip( ) + '.' + self.dictionary['hostname-base'] retries = 0 # Retries while (True): # Info progress self.context.out( message=(self.context.strings['methods']['dictionary'] ['progress-clear'] + '\n'.join(self.context.strings['methods'] ['dictionary']['progress'])), parseDict={ 'hostname': hostname, 'current-line': "{:,}".format(self.dictionary['current-line']), 'total-lines': "{:,}".format(self.dictionary['n-subdomains-in-file']), 'percent-lines': "{:,.2f}".format( ((self.dictionary['current-line'] * 100) / self.dictionary['n-subdomains-in-file'])) + '%', 'total-threads': self.dictionary['max-threads'], 'total-retries': self.dictionary['retries'] }, end='') # Check if have a ip address nsAnswer = None try: resolv = dns.resolver.Resolver() if (self.dictionary['nameservers']): # Custom nameservers resolv.nameservers = self.dictionary['nameservers'] nsAnswer = resolv.query( # Full hostname to find hostname, # Record type 'A', # TCP for better results tcp=True) if (nsAnswer): # For each response data of record for rdata in nsAnswer: # Get the ip address from current response record ip = rdata.to_text().strip('"') # The record have a valid ip address? # ip = str(socket.gethostbyname(hostname)) Fail ns server if (ip): # Add full hostname self.context.addHostName( hostname=hostname, messageFormat=( # Clear space of the buffer rogress self.context.strings['methods'] ['dictionary']['progress-clear'] + # Show the subdomain found self.context.strings['methods'] ['dictionary']['item-found'] + # Make space for the buffer progress self.context.strings['methods'] ['dictionary']['progress-pre'])) # Break for break # Break while break except dns.resolver.NXDOMAIN: # No such domain break except dns.resolver.Timeout: # Retry # Update retries count for this hostname retries += 1 # Update global retries counts self.dictionary['retries'] += 1 # Limit of retries if (retries > self.dictionary['max-retries']): break except dns.exception.DNSException: # Unknown exception break except Exception as e: # Unknown exception break
def add (args): _ndns = ndns.ndns_session (args.data_dir) try: zone_ndn = ndn.Name (args.zone) zone_dns = ndns.dnsify (args.zone) except NameError as e: sys.stderr.write ("ERROR: %s\n\n" % e) parser.print_help () exit (1) zone = _ndns.query (ndns.Zone).filter (ndns.Zone.has_name (zone_ndn)).first () if not zone: sys.stderr.write ("ERROR: zone [%s] is not configured\n" % zone_ndn) exit (1) origin = dns.name.from_text (zone_dns) if args.rr: zonefile = dns.zone.from_text (args.rr, origin = origin, check_origin = False) else: zonefile = dns.zone.from_file (sys.stdin, origin = origin, check_origin = False) default_rtt = zone.soa[0].rrs[0].ttl for (name, ttl, rdata) in zonefile.iterate_rdatas (): if ttl == 0: ttl = default_rtt if not args.quiet: print "Create record: '%s %s %d %s'" % (name, dns.rdatatype.to_text (rdata.rdtype), ttl, rdata.to_text ()) rrset = ndns.add_rr (_ndns, zone, origin, name, ttl, rdata) rrset.refresh_ndndata (_ndns, zone.default_key) if getattr (args, 'commit', True): _ndns.commit ()