def extractor(self, logger_type, target): # 返回一个提取到的目标URL的队列 try: if isinstance(self.data, Iterable): for items in self.data: item = items.group() if self.type == "proxy": if self.filter(item, self.container): self.contain_target.put(item) elif self.type == "url": filted_url = URL_PATH.sub("=", item) # www.baidu.com/a/?b= #print(f"[*] from filter.py line 47 : {filted_url}") if self.filter(filted_url, self.container): # 如果是原先没有的URL url = convert_target(item) # 处理一下URL格式 # # url = "http:/"+item # logger = factory_logger(logger_type,target,"url") # logger.info(url) # print(f"{purple}[~][{time}] Collecting a target for testing : {url}{end}") self.contain_target.put(url) # 加入到目标URL中 return self.contain_target except Exception as e: return e
def extractor(self, logger_type, target): try: if isinstance(self.data, Iterable): for items in self.data: item = items.group() if self.type == "proxy": if self.filter(item, self.container): self.contain_target.put(item) elif self.type == "url": filted_url = URL_PATH.sub("=", item) if self.filter(filted_url, self.container): url = convert_target(item) # # url = "http:/"+item # logger = factory_logger(logger_type,target,"url") # logger.info(url) print( f"{purple}[~][{time}] Collecting a target for testing : {url}{end}" ) self.contain_target.put(url) return self.contain_target except Exception as e: return e
file_= None subdomain_queue = None cookies = None proxy_queue = None if file: file_= str(file) if cookie: cookies = cookies if target: target = convert_target(target[0]) logger_type = "StreamLogger" if outfile: logger_type = "FileLogger" if mail: logger_type = "STMPLogger" if account and password: account = account password = password