def _walk_zone(self): dname = self.start covering_nsec = None while not self._finished(dname): query_dn, recv_nsec = self._retrieve_nsec(dname, covering_nsec) if len(recv_nsec) == 0: raise NSECWalkError, ( 'no NSEC RR received\n', "Maybe the zone doesn't support DNSSEC or uses NSEC3 RRs") covering_nsec = self._find_covering_rr(recv_nsec, query_dn) if covering_nsec is None: raise NSECWalkError, ( "no covering NSEC RR received for domain name ", str(dname)) log.debug2('covering NSEC RR found: ', str(covering_nsec)) self._write_record(covering_nsec) if (covering_nsec.owner > covering_nsec.next_owner and covering_nsec.next_owner != self.zone): raise NSECWalkError, ('NSEC owner > next_owner, ', 'but next_owner != zone') self.nsec_chain.append(covering_nsec) log.info('discovered owner: ', str(covering_nsec.owner), "\t", ' '.join(covering_nsec.types)) log.update() dname = covering_nsec.next_owner self._write_number_of_records(len(self.nsec_chain)) return self.nsec_chain
def add_dep(self, mode, dep): src = File(name=dep) debug2('add-dep: %r < %s %r\n' % (self.name, mode, src.name)) assert(self.id != src.id) _write("insert or replace into Deps " " (target, mode, source, delete_me) values (?,?,?,?)", [self.id, mode, src.id, False])
def _walk_zone(self): dname = self.start covering_nsec = None while not self._finished(dname): query_dn, recv_nsec = self._retrieve_nsec(dname, covering_nsec) if len(recv_nsec) == 0: raise NSECWalkError, ('no NSEC RR received\n', "Maybe the zone doesn't support DNSSEC or uses NSEC3 RRs") covering_nsec = self._find_covering_rr(recv_nsec, query_dn) if covering_nsec is None: raise NSECWalkError, ("no covering NSEC RR received for domain name ", str(dname)) log.debug2('covering NSEC RR found: ', str(covering_nsec)) self._write_record(covering_nsec) if (covering_nsec.owner > covering_nsec.next_owner and covering_nsec.next_owner != self.zone): raise NSECWalkError, ('NSEC owner > next_owner, ', 'but next_owner != zone') self.nsec_chain.append(covering_nsec) log.info('discovered owner: ', str(covering_nsec.owner), "\t", ' '.join(covering_nsec.types)) log.update() dname = covering_nsec.next_owner self._write_number_of_records(len(self.nsec_chain)) return self.nsec_chain
def __init__(self, zone, queryprovider, hash_queues, prehash_pool, nsec3_records, ignore_overlapping=False, label_counter=None, output_file=None, stats=None,predictor=None, aggressive=0): super(NSEC3Walker, self).__init__(zone, queryprovider, output_file, stats) self.stats['tested_hashes'] = 0 self._prediction_current = None if predictor is not None: self._predictor_proc,self._predictor_pipe = predictor else: self._predictor_proc = None self._write_chain(nsec3_records) self.nsec3_chain = NSEC3Chain(ignore_overlapping=ignore_overlapping) self._update_predictor_state() for rr in nsec3_records: self.nsec3_chain.insert(rr) self._update_predictor_state() self._prehash_processes = prehash_pool if label_counter is not None: log.debug2("setting initial label counter to 0x{0:x}".format( label_counter)) self._label_counter_init = label_counter else: self._label_counter_init = 0L self._label_counter_state = 0L self._hash_queues = itertools.cycle(hash_queues) self._reset_prehashing() self._aggressive = aggressive
def _check_salt(self, nsec3): if self.salt is None: self.salt = nsec3.salt log.debug2("salt = 0x", util.str_to_hex(self.salt)) elif self.salt != nsec3.salt: raise ZoneChangedError, "NSEC3 salt changed" else: nsec3.salt = self.salt
def update_stamp(self, must_exist=False): newstamp = self.read_stamp() if must_exist and newstamp == STAMP_MISSING: raise Exception("%r does not exist" % self.name) if newstamp != self.stamp: debug2("STAMP: %s: %r -> %r\n" % (self.name, self.stamp, newstamp)) self.stamp = newstamp self.set_changed()
def _find_do_file(f): for dofile,basename,ext in _possible_do_files(f.name): debug2('%s: %s ?\n' % (f.name, dofile)) if os.path.exists(dofile): f.add_dep('m', dofile) return dofile,basename,ext else: f.add_dep('c', dofile) return None,None,None
def prepare(self): assert self.target.dolock().owned == state.LOCK_EX self.target.build_starting() self.before_t = _try_stat(self.target.name) newstamp = self.target.read_stamp() if newstamp.is_override_or_missing(self.target): if newstamp.is_missing(): # was marked generated, but is now deleted debug3('oldstamp=%r newstamp=%r\n', self.target.stamp, newstamp) self.target.forget() self.target.refresh() elif vars.OVERWRITE: warn('%s: you modified it; overwrite\n', self.target.printable_name()) else: warn('%s: you modified it; skipping\n', self.target.printable_name()) return 0 if self.target.exists_not_dir() and not self.target.is_generated: # an existing source file that was not generated by us. # This step is mentioned by djb in his notes. # For example, a rule called default.c.do could be used to try # to produce hello.c, but we don't want that to happen if # hello.c was created in advance by the end user. if vars.OVERWRITE: warn('%s: exists and not marked as generated; overwrite.\n', self.target.printable_name()) else: warn('%s: exists and not marked as generated; not redoing.\n', self.target.printable_name()) debug2('-- static (%r)\n', self.target.name) return 0 (self.dodir, self.dofile, self.dobasedir, self.dobasename, self.doext) = _find_do_file(self.target) if not self.dofile: if newstamp.is_missing(): err('no rule to make %r\n', self.target.name) return 1 else: self.target.forget() debug2('-- forget (%r)\n', self.target.name) return 0 # no longer a generated target, but exists, so ok self.outdir = self._mkoutdir() # name connected to stdout self.tmpname_sout = self.target.tmpfilename('out.tmp') # name provided as $3 self.tmpname_arg3 = os.path.join(self.outdir, self.target.basename()) # name for the log file unlink(self.tmpname_sout) unlink(self.tmpname_arg3) self.log_fd = logger.open_log(self.target, truncate=True) self.tmp_sout_fd = os.open(self.tmpname_sout, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666) close_on_exec(self.tmp_sout_fd, True) self.tmp_sout_f = os.fdopen(self.tmp_sout_fd, 'w+') return None
def _find_do_file(f): for dodir, dofile, basedir, basename, ext in _possible_do_files(f.name): dopath = os.path.join(dodir, dofile) debug2('%s: %s:%s ?\n' % (f.name, dodir, dofile)) if os.path.exists(dopath): f.add_dep('m', dopath) return dodir, dofile, basedir, basename, ext else: f.add_dep('c', dopath) return None, None, None, None, None
def _find_covering_rr(self, recv_rr, query_dn): covering_nsec = None for nsec in recv_rr: log.debug2('received NSEC RR: ' + str(nsec)) if not nsec.part_of_zone(self.zone): raise NSECWalkError, "received invalid NSEC RR, not part of zone" if nsec.covers(query_dn) or nsec.next_owner == self.zone: covering_nsec = nsec break return covering_nsec
def add_timeouterror(self, max_retries): if max_retries != -1: self.retries += 1 retries_left = max_retries - self.retries log.warn("timeout reached when waiting for response from ", str(self), ", ", str(retries_left), " retries left") if retries_left <= 0: raise TimeOutError, ('no response from server: ' + str(self)) else: log.debug2("timeout reached when waiting for response from ", str(self))
def _find_do_file(f): for dodir,dofile,basedir,basename,ext in _possible_do_files_ext(f.name): dopath = os.path.join(dodir, dofile) debug2('%s: %s:%s ?\n' % (f.name, dodir, dofile)) if os.path.exists(dopath): f.add_dep('m', dopath) return dodir,dofile,basedir,basename,ext else: f.add_dep('c', dopath) return None,None,None,None,None
def _insert_records(self, recv_rr): # TODO: check if records cover query name for rr in recv_rr: log.debug2('received NSEC3 RR: ', str(rr)) if not rr.part_of_zone(self.zone): raise NSEC3WalkError, 'NSEC3 RR not part of zone' was_new = self.nsec3_chain.insert(rr) if was_new: log.debug1("discovered: ", str(rr.owner), " ", ' '.join(rr.types)) self._write_record(rr) self._update_predictor_state()
def main_redo_delegate(redo_flavour, targets): import builder, state, vars from log import debug2 if vars.TARGET: f = state.File(name=vars.TARGET) debug2('TARGET: %r %r %r\n', vars.STARTDIR, vars.PWD, vars.TARGET) else: f = None debug2('%S: no target - not delegating.\n', redo_flavour) targets = state.fix_chdir(targets) return builder.main(targets, delegate=f)
def main_redo_ifchange(redo_flavour, targets): import ifchange, state, vars, builder from log import debug2 if vars.TARGET: f = state.File(name=vars.TARGET) debug2('TARGET: %r %r %r\n', vars.STARTDIR, vars.PWD, vars.TARGET) else: f = None debug2('%s: no target - not adding depends.\n', redo_flavour) targets = state.fix_chdir(targets) return builder.main(targets, ifchange.should_build, f, re_do=False)
def _find_do_file(f): for dodir,dofile,basedir,basename,ext in _possible_do_files_in_do_dir(f.name): if dodir and not os.path.isdir(dodir): # we don't want to normpath() unless we have no other choice. # otherwise we could have odd behaviour with symlinks (ie. # x/y/../z might not be the same as x/z). On the other hand, # if one of the path elements doesn't exist (yet), normpath # can help us find the .do file anyway, and that .do file might # create the sub-path. dodir = os.path.normpath(dodir) dopath = os.path.join(dodir, dofile) debug2('%s: %s:%s ?\n', f.name, dodir, dofile) dof = state.File(dopath) if os.path.exists(dopath): f.add_dep(dof) return dodir,dofile,basedir,basename,ext else: f.add_dep(dof) return None,None,None,None,None
def _insert_records(self, recv_rr): # TODO: check if records cover query name for rr in recv_rr: log.debug2('received NSEC3 RR: ', str(rr)) if not rr.part_of_zone(self.zone): raise NSEC3WalkError, 'NSEC3 RR not part of zone' # check if the record is minimally-covering # ref 'NSEC3 White Lies': # https://tools.ietf.org/html/rfc7129#appendix-B if rr.distance_covered() == 2: raise NSEC3WalkError, ('Received minimally-covering NSEC3 record\n', 'This zone likely uses "NSEC3 White Lies" to prevent zone enumeration\n', '(See https://tools.ietf.org/html/rfc7129#appendix-B)' ) was_new = self.nsec3_chain.insert(rr) if was_new: log.debug1("discovered: ", str(rr.owner), " ", ' '.join(rr.types)) self._write_record(rr) self._update_predictor_state()
def set_failed(self): debug2('FAILED: %r\n' % self.name) self.update_stamp() self.failed_runid = vars.RUNID self.is_generated = True
def set_changed(self): debug2('BUILT: %r (%r)\n' % (self.name, self.stamp)) self.changed_runid = vars.RUNID self.failed_runid = None self.is_override = False
def isdirty(f, depth, expect_stamp): assert(isinstance(expect_stamp, state.Stamp)) debug('%s?%s\n', depth, f.name) debug3('%sexpect: %r\n', depth, expect_stamp) debug3('%sold: %r\n', depth, f.stamp) if not f.is_generated and expect_stamp.is_none() and f.exists(): debug('%s-- CLEAN (static)\n', depth) return CLEAN if f.exitcode: debug('%s-- DIRTY (failed last time)\n', depth) return DIRTY if not expect_stamp.is_missing() and f.stamp.is_missing() and not f.stamp.runid(): debug('%s-- DIRTY (never built)\n', depth) return DIRTY if f.stamp.is_old(): debug('%s-- DIRTY (from old redo)\n', depth) return DIRTY if not f.stamp or f.stamp.is_none(): debug('%s-- DIRTY (no stamp)\n', depth) return DIRTY newstamp = f.read_stamp() debug3('%snew: %r\n', depth, newstamp) if newstamp.is_override_or_missing(f) and not newstamp.is_missing(): if vars.OVERWRITE: debug('%s-- DIRTY (override)\n', depth) return DIRTY else: debug('%s-- CLEAN (override)\n', depth) return CLEAN if newstamp.is_stamp_dirty(f): if newstamp.is_missing(): debug('%s-- DIRTY (missing)\n', depth) else: debug('%s-- DIRTY (mtime)\n', depth) return [f] if f.stamp.is_csum() else DIRTY must_build = [] for stamp2, f2 in f.deps: dirty = CLEAN if f2 == state.ALWAYS: if f.stamp_mtime >= vars.RUNID: # has already been checked during this session debug('%s-- CLEAN (always, checked)\n', depth) else: debug('%s-- DIRTY (always)\n', depth) dirty = DIRTY else: f2 = state.File(f2, f.dir) sub = isdirty(f2, depth = depth + ' ', expect_stamp = stamp2) if sub: debug('%s-- DIRTY (sub)\n', depth) dirty = sub if not f.stamp.is_csum(): # f is a "normal" target: dirty f2 means f is instantly dirty if dirty: # if dirty==DIRTY, this means f is definitely dirty. # if dirty==[...], it's a list of the uncertain children. return dirty else: # f is "checksummable": dirty f2 means f needs to redo, # but f might turn out to be clean after that (ie. our parent # might not be dirty). if dirty == DIRTY: # f2 is definitely dirty, so f definitely needs to # redo. However, after that, f might turn out to be # unchanged. return [f] elif isinstance(dirty, list): # our child f2 might be dirty, but it's not sure yet. It's # given us a list of targets we have to redo in order to # be sure. must_build += dirty if must_build: # f is *maybe* dirty because at least one of its children is maybe # dirty. must_build has accumulated a list of "topmost" uncertain # objects in the tree. If we build all those, we can then # redo-ifchange f and it won't have any uncertainty next time. return must_build if expect_stamp.is_dirty(f): # This must be after we checked the children. Before, we didn't knew # if the current target was dirty or not debug('%s-- DIRTY (parent)\n', depth) return DIRTY # if we get here, it's because the target is clean debug2('%s-- CLEAN (dropped off)\n', depth) return CLEAN
def _sendquery(self, query): self.stats['queries'] += 1 log.debug2('query: ', query.query_dn, '; ns = ', query.ns, '; rrtype = ', query.rrtype) self._active_queries[query.id] = query self._query_queue.put(query) return query.id
def update_stamp(self): newstamp = self.read_stamp() if newstamp != self.stamp: debug2("STAMP: %s: %r -> %r\n" % (self.name, self.stamp, newstamp)) self.stamp = newstamp self.set_changed()
sh = hashlib.sha1() while 1: b = os.read(0, 4096) sh.update(b) if not b: break csum = sh.hexdigest() if not vars.TARGET: sys.exit(0) me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) changed = (csum != f.csum) debug2('%s: old = %s\n' % (f.name, f.csum)) debug2('%s: sum = %s (%s)\n' % (f.name, csum, changed and 'changed' or 'unchanged')) f.is_generated = True f.is_override = False f.failed_runid = None if changed: f.set_changed() # update_stamp might not do this if the mtime is identical f.csum = csum else: # unchanged f.set_checked() f.save() state.commit()
def _start_do(self): assert (self.lock.owned) t = self.t sf = self.sf newstamp = sf.read_stamp() if (sf.is_generated and newstamp != state.STAMP_MISSING and (sf.stamp != newstamp or sf.is_override)): state.warn_override(_nice(t)) if not sf.is_override: warn('%s - old: %r\n' % (_nice(t), sf.stamp)) warn('%s - new: %r\n' % (_nice(t), newstamp)) sf.set_override() sf.set_checked() sf.save() return self._after2(0) if (os.path.exists(t) and not os.path.isdir(t + '/.') and not sf.is_generated): # an existing source file that was not generated by us. # This step is mentioned by djb in his notes. # For example, a rule called default.c.do could be used to try # to produce hello.c, but we don't want that to happen if # hello.c was created by the end user. debug2("-- static (%r)\n" % t) sf.set_static() sf.save() return self._after2(0) sf.zap_deps1() (dodir, dofile, basedir, basename, ext) = paths.find_do_file(sf) if not dofile: if os.path.exists(t): sf.set_static() sf.save() return self._after2(0) else: err('no rule to make %r\n' % t) return self._after2(1) unlink(self.tmpname1) unlink(self.tmpname2) ffd = os.open(self.tmpname1, os.O_CREAT | os.O_RDWR | os.O_EXCL, 0666) close_on_exec(ffd, True) self.f = os.fdopen(ffd, 'w+') # this will run in the dofile's directory, so use only basenames here arg1 = basename + ext # target name (including extension) arg2 = basename # target name (without extension) argv = [ 'sh', '-e', dofile, arg1, arg2, # temp output file name state.relpath(os.path.abspath(self.tmpname2), dodir), ] if vars.VERBOSE: argv[1] += 'v' if vars.XTRACE: argv[1] += 'x' if vars.VERBOSE or vars.XTRACE: log_('\n') firstline = open(os.path.join(dodir, dofile)).readline().strip() if firstline.startswith('#!/'): argv[0:2] = firstline[2:].split(' ') log('%s\n' % _nice(t)) self.dodir = dodir self.basename = basename self.ext = ext self.argv = argv sf.is_generated = True sf.save() dof = state.File(name=os.path.join(dodir, dofile)) dof.set_static() dof.save() state.commit() jwack.start_job(t, self._do_subproc, self._after)
def zap_deps1(self): debug2('zap-deps1: %r\n' % self.name) _write('update Deps set delete_me=? where target=?', [True, self.id])
def _sendquery(self, query_dn, ns, rrtype): self.stats['queries'] += 1 log.debug2('query: ', query_dn, '; ns = ', ns, '; rrtype = ', rrtype) return query.query(query_dn, ns, rrtype, self.timeout)
def should_build(t): f = state.File(name=t) if f.is_failed(): raise builder.ImmediateReturn(32) dirty = deps.isdirty(f, depth = '', max_changed = vars.RUNID) return dirty==[f] and deps.DIRTY or dirty rv = 202 try: if vars.TARGET and not vars.UNLOCKED: me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) debug2('TARGET: %r %r %r\n' % (vars.STARTDIR, vars.PWD, vars.TARGET)) else: f = me = None debug2('redo-ifchange: not adding depends.\n') try: targets = sys.argv[1:] if f: for t in targets: f.add_dep('m', t) f.save() rv = builder.main(targets, should_build) finally: jwack.force_return_tokens() except KeyboardInterrupt: sys.exit(200) state.commit()
def _check_iterations(self, nsec3): if self.iterations is None: self.iterations = nsec3.iterations log.debug2("number of iterations = ", self.iterations) elif self.iterations != nsec3.iterations: raise ZoneChangedError, "NSEC3 number of iterations changed"
def zap_deps2(self): debug2('zap-deps2: %r\n' % self.name) _write('delete from Deps where target=? and delete_me=1', [self.id])
def _start_do(self): assert(self.lock.owned) t = self.t sf = self.sf newstamp = sf.read_stamp() if (sf.is_generated and not sf.failed_runid and newstamp != state.STAMP_MISSING and (sf.stamp != newstamp or sf.is_override)): state.warn_override(_nice(t)) sf.set_override() sf.set_checked() sf.save() return self._after2(0) if (os.path.exists(t) and not os.path.exists(t + '/.') and not sf.is_generated): # an existing source file that was not generated by us. # This step is mentioned by djb in his notes. # For example, a rule called default.c.do could be used to try # to produce hello.c, but we don't want that to happen if # hello.c was created by the end user. # FIXME: always refuse to redo any file that was modified outside # of redo? That would make it easy for someone to override a # file temporarily, and could be undone by deleting the file. debug2("-- static (%r)\n" % t) sf.set_static() sf.save() return self._after2(0) sf.zap_deps1() (dodir, dofile, basedir, basename, ext) = _find_do_file(sf) if not dofile: if os.path.exists(t): sf.set_static() sf.save() return self._after2(0) else: err('no rule to make %r\n' % t) return self._after2(1) unlink(self.tmpname1) unlink(self.tmpname2) ffd = os.open(self.tmpname1, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666) close_on_exec(ffd, True) self.f = os.fdopen(ffd, 'w+') # this will run in the dofile's directory, so use only basenames here argv = ['sh', '-e', dofile, basename, # target name (no extension) ext, # extension (if any), including leading dot os.path.join(basedir, os.path.basename(self.tmpname2)) # temp output file name ] if vars.VERBOSE: argv[1] += 'v' if vars.XTRACE: argv[1] += 'x' if vars.VERBOSE or vars.XTRACE: log_('\n') firstline = open(os.path.join(dodir, dofile)).readline().strip() if firstline.startswith('#!/'): argv[0:2] = firstline[2:].split(' ') log('%s\n' % _nice(t)) self.dodir = dodir self.basename = basename self.ext = ext self.argv = argv sf.is_generated = True sf.save() dof = state.File(name=os.path.join(dodir, dofile)) dof.set_static() dof.save() state.commit() jwack.start_job(t, self._do_subproc, self._after)
else: sh = hashlib.sha1() while 1: b = os.read(0, 4096) sh.update(b) if not b: break csum = sh.hexdigest() if not vars.TARGET: sys.exit(0) me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) changed = (csum != f.csum) debug2('%s: old = %s\n' % (f.name, f.csum)) debug2('%s: sum = %s (%s)\n' % (f.name, csum, changed and 'changed' or 'unchanged')) f.is_generated = True f.is_override = False f.failed_runid = None if changed: f.set_changed() # update_stamp might not do this if the mtime is identical f.csum = csum else: # unchanged f.set_checked() f.save() state.commit()