def lookup_multi(self, questions, qtype='A', timeout=10): """lookup a list of multiple records of the same qtype. the lookups will be done in parallel returns a dict question->[list of DNSLookupResult] """ tg = TaskGroup() for question in questions: tg.add_task(self.lookup, args=(question, qtype)) threadpool = get_default_threadpool() threadpool.add_task(tg) try: tg.join(timeout) except TimeOut: self.logger.warn('timeout in lookup_multi') pass result = {} for task in tg.tasks: if task.done: result[task.args[0]] = task.result else: self.logger.warn("hanging lookup: %s" % task) # print "lookup multi, questions=%s, qtype=%s , # result=%s"%(questions,qtype,result) threadpool.stayalive = False return result
def listings(self, domain, timeout=10, parallel=False, abort_on_hit=False): """return a dict identifier:humanreadable for each listing warning: parallel is very experimental and has bugs - do not use atm """ listed = {} if parallel: tg = TaskGroup() for provider in self.providers: tg.add_task(provider.listed, (domain,), ) threadpool = get_default_threadpool() threadpool.add_task(tg) tg.join(timeout) for task in tg.tasks: if task.done: for identifier, humanreadable in task.result: listed[identifier] = humanreadable threadpool.stayalive = False else: starttime = time.time() for provider in self.providers: loopstarttime = time.time() runtime = loopstarttime - starttime if timeout > 0 and runtime > timeout: self.logger.info('lookups aborted after %.2fs due to timeout' % runtime) break for identifier, humanreadable in provider.listed(domain): listed[identifier] = humanreadable if abort_on_hit: return listed.copy() self.logger.debug('%s completed in %.2fs' % (provider.rbldomain, time.time()-loopstarttime)) return listed.copy()
def listings(self, domain, timeout=10, parallel=False, abort_on_hit=False): """return a dict identifier:humanreadable for each listing warning: parallel is very experimental and has bugs - do not use atm """ listed = {} if parallel: tg = TaskGroup() for provider in self.providers: tg.add_task( provider.listed, (domain, ), ) threadpool = get_default_threadpool() threadpool.add_task(tg) tg.join(timeout) for task in tg.tasks: if task.done: for identifier, humanreadable in task.result: listed[identifier] = humanreadable threadpool.stayalive = False else: starttime = time.time() for provider in self.providers: loopstarttime = time.time() runtime = loopstarttime - starttime if timeout > 0 and runtime > timeout: self.logger.info( 'lookups aborted after %.2fs due to timeout' % runtime) break for identifier, humanreadable in provider.listed(domain): listed[identifier] = humanreadable if abort_on_hit: return listed.copy() self.logger.debug( '%s completed in %.2fs' % (provider.rbldomain, time.time() - loopstarttime)) return listed.copy()