class ProxyPool(): def __init__(self,minimal=10) -> None: self.proxies=[] #多线程爬虫时,加个锁 self._lock=Lock() self.minimal=minimal def pick_proxy(self): #选择proxy时,检查代{过}{滤}理池是否够用 self._fill_pool() return random.choice(self.proxies) def remove_proxy(self,proxy): try: self.proxies.remove(proxy) print('Remove proxy:',proxy) except: print('Proxy has been removed!') def _fill_pool(self): #少于minimal个代{过}{滤}理时,更新代{过}{滤}理池 if len(self.proxies)<self.minimal: #加锁,防止同时重复调用 self._lock.acquire() if len(self.proxies)<self.minimal: self.proxies=get_proxies_retry() self._lock.release()
class DATA_AND(object): def __init__(self, payloads): self.threads = conf['thread_num'] self.payloads = payloads self.mutex = Lock() def run(self, payload): try: data = payload['payload'] text = send_data(data) if not find_success(success_flag, text): value = 0 self.mutex.acquire() payload['value'] = value self.mutex.release() except Exception as e: print e.message def _start(self): try: pool = ThreadPool(processes=self.threads) pool.map_async(self.run, self.payloads).get(0xffff) pool.close() pool.join() except Exception as e: print e except KeyboardInterrupt: print '[!] user quit!' sys.exit(1)
class PixivAlbum(PixivItem): def __init__(self, illust_id, headers): PixivItem.__init__(self, illust_id, headers) self.albumURL = f'https://www.pixiv.net/member_illust.php?mode=manga&illust_id={illust_id}' self.session.headers['Referer'] = self.albumURL def getAlbum(self, path): logging.info(f'getting album {self.illust_id}...') # get title soup = self.getSoup(self.pageURL) logging.debug(f'page for {self.illust_id} (main page) get.') self.getTitleAndArtist(soup) # get pic urls picURLs = set() soup = self.getSoup(self.albumURL) imageContainers = soup.find_all('div', {'class': 'item-container'}) for item in imageContainers: picURLs.add(item.img['data-src']) # mkdir thisPath = path + os.sep + self.title + ' - ' + self.artist if not os.path.exists(thisPath): os.mkdir(thisPath) self.count = 1 self.lock = ThreadLock() def func(url): self.downloadImage(url, path) self.lock.acquire() logging.debug( f'({self.title} - {self.artist}) pic {self.count} done.') self.count += 1 self.lock.release() pool = ThreadPool(5) pool.map(func, picURLs) pool.close() pool.join() logging.info( f'album {self.title} - {self.artist} ({self.illust_id}) done.') def downloadImage(self, url, path): thisPath = path + os.sep + self.title + ' - ' + self.artist num = re.findall(r'p(\d+)', url)[0] imageName = thisPath + os.sep + num + '.' + url.split('.')[-1] imageName = imageName.replace('*', '※').replace('?', '?') self.downloadImageTo(url, imageName)
class assassin: def __init__(self, ipfile, store_file, thread_num, port): self.ipfile = open(ipfile, "r") self.thread_num = thread_num self.port = port self.vuls = set() self.store_file = store_file self.lock = Lock() def _ips(self): for line in self.ipfile: line = line.strip() if( len(line) == 0): continue for ip in ipaddress.ip_network(unicode(line), strict=False): yield '%s' % ip def run(self, host): vul = False try: sock = socket.socket() socket.setdefaulttimeout(3) sock.connect((host, self.port)) payload = '\x2a\x31\x0d\x0a\x24\x34\x0d\x0a\x69\x6e\x66\x6f\x0d\x0a' sock.send(payload) recvdata = sock.recv(1024) if recvdata and 'redis_version' in recvdata: self.lock.acquire() print '[+] maybe vul: %s' % host self.vuls.add(host) self.lock.release() vul = True return vul else: return vul except: return vul finally: sock.close() def multi_assasin(self): start = time.time() ip_set = self._ips() pool = ThreadPool(self.thread_num) results = pool.map(self.run, ip_set) pool.close() pool.join() with open(self.store_file, "w") as f: for _ in self.vuls: f.write(_ + "\n") print "%s\ttotal vuln sites:%s \n used %s minutes." % (time.ctime(),\ len(self.vuls), (time.time()-start)/ 60.0)
class JobPool: """Multiprocessing job pool which captures and re-raises exceptions in jobs. Args: num_processes (int): Max number of processes to be used by job pool. Use 'None' for os.cpu_count(). Note: Exceptions will be re-raised in the process which wait_for_done is called. """ def __init__(self, num_processes): self._pool = Pool(num_processes) self._pending = 0 self._exc = None self._mutex = Lock() def _job_callback(self, _): self._mutex.acquire() self._pending -= 1 self._mutex.release() def _job_err_callback(self, exc): self._mutex.acquire() self._exc = exc self._mutex.release() def submit_job(self, func, args): """Submit a job to the pool.""" self._pending += 1 self._pool.apply_async( func, args, callback=self._job_callback, error_callback=self._job_err_callback) def wait_for_done(self): """Wait for all jobs to complete.""" while True: # copy locked values self._mutex.acquire() pending = self._pending exc = self._exc self._mutex.release() # check for exception if exc: self._pool.terminate() self._pool.join() raise self._exc # check for done if pending == 0: return
class Scanner(object): def __init__(self, target, startPort, endPort): self.target = target self.startPort = startPort self.endPort = endPort self.dnsRecords = [] self.mutex = Lock() self.ports = [] self.getPorts() self.time = time() def getPorts(self): for i in range(int(self.startPort), int(self.endPort) + 1): self.ports.append(i) def checkCdn(self): myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 try: for i in DNSServer: myResolver.nameservers = i record = myResolver.query(self.target) self.dnsRecords.append(record[0].address) self.dnsRecords = list(set(self.dnsRecords)) except Exception as e: pass finally: return True if len(self.dnsRecords) > 1 else False def scanPort(self, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) return True if s.connect_ex((self.target, port)) == 0 else False except Exception as e: pass finally: s.close() def getHttpBanner(self, url): try: r = requests.get(url, headers={'UserAgent': UserAgent().random}, timeout=2, verify=False, allow_redirects=True) soup = BeautifulSoup(r.content, 'lxml') return soup.title.text.strip('\n').strip() except Exception as e: pass def getSocketInfo(self, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) s.connect((self.target, port)) s.send('HELLO\r\n') return s.recv(1024).split('\r\n')[0].strip('\r\n') except Exception as e: pass finally: s.close() def run(self, port): try: if self.scanPort(port): banner = self.getHttpBanner(f'http://{self.target}:{port}') self.mutex.acquire() if banner: print(f'{str(port).rjust(6)} ---- open {banner[:18]}') else: banner = self.getHttpBanner( f'https://{self.target}:{port}') if banner: print( f'{str(port).rjust(6)} ---- open {banner[:18]}') else: banner = self.getSocketInfo(port) if banner: print( f'{str(port).rjust(6)} ---- open {banner[:18]}' ) else: print(f'{str(port).rjust(6)} ---- open ') self.mutex.release() except Exception as e: pass def _start(self): try: print(f'正在扫描地址: {socket.gethostbyname(self.target)}\n') pool = ThreadPool(processes=100) pool.map_async(self.run, self.ports).get(0xffff) pool.close() pool.join() print(f'扫描完成耗时: {time() - self.time} 秒.\n') except Exception as e: print(e) except KeyboardInterrupt: print('用户终止扫描...') sys.exit(1) def scanRecords(self): inputNums = input( '\n请输入需要扫描的序号\n输入 0 代表全部扫描 , 不输入代表取消扫描 \n扫描特定端口时 , 请输入端口前面的序号 , 各个序号用空格分隔\n请输入:' ).strip() if inputNums == '': print('用户终止扫描...') sys.exit(1) if inputNums == '0': for (i, ip) in enumerate(self.dnsRecords): print(f'\n第{i+1}个 IP 开始扫描...') Scanner(ip, sys.argv[2], sys.argv[3]).checkTarget() else: recordsLen = len(self.dnsRecords) nums = inputNums.split(sep=' ') if len(nums) < 1: print('用户终止扫描...') sys.exit(1) for (i, num) in enumerate(nums): num = int(num) print(f'\n第{i+1}个 IP 开始扫描...') if num > recordsLen: print('序号输入违法...') continue Scanner(self.dnsRecords[num - 1], sys.argv[2], sys.argv[3]).checkTarget() def checkTarget(self): ipRegex = re.compile( '^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$' ) if ipRegex.match(self.target): self._start() elif not self.checkCdn(): print('该域名无 DNS 解析纪录...') sys.exit(1) else: print('域名解析的 IP 如下:') for (i, ip) in enumerate(self.dnsRecords): print(str(i + 1) + ' : ' + ip) self.scanRecords()
class ProgressBar: global charset charset = "▏▎▍▌▋▊▉█" def __init__(self, maxCount, maxLength=50, printCount=True, printPercentage=True, printTime=False): self.lock = ThreadLock() self.maxCount = maxCount self.currentCount = 0 self.maxLength = maxLength self.printCount = printCount self.printPercentage = printPercentage self.printTime = printTime self.startTime = time.time() def begin(self): self.update(0) def update(self, currentCount, maxCount=None): self.currentCount = currentCount if maxCount: self.maxCount = maxCount self.lock.acquire() self._print() self.lock.release() return def grow(self, growCount=1): return self.update(self.currentCount+growCount) def _print(self): percentage = float(self.currentCount)/float(self.maxCount) percentage = max(0., percentage) percentage = min(1., percentage) length = float(self.maxLength)*percentage intPart = math.floor(length) restPart = length-intPart string = charset[-1]*intPart # print main part if restPart > 0.01: string += charset[math.floor(restPart*len(charset))] # print rest part string += ' ' * (self.maxLength - len(string)) # fill with blank string += charset[0] # print boarder if self.printPercentage: head = "%3.1f"%(percentage*100.,) head = (5 - len(head))*' ' + head string = head + "% " + string if self.printCount: countStr = str(self.currentCount)+'/'+str(self.maxCount) string += countStr if self.printTime: secondsSpent = math.floor(time.time() - self.startTime) if secondsSpent < 3600: timeStr = time.strftime('%M:%S', time.gmtime(secondsSpent)) elif secondsSpent < 3600*24: timeStr = time.strftime('%H:%M:%S', time.gmtime(secondsSpent)) else: days = secondsSpent // (3600*24) seconds = secondsSpent % (3600*24) timeStr = int(days) + time.strftime('days %H:%M:%S', time.gmtime(seconds)) string += ' (' + timeStr + ')' string = '\r ' + string + '\r' print(string, end='') def __del__(self): print()
class mmonly: def __init__(self): self.ua = UserAgent() self.headers = {} self.q1 = Queue(300) self.q2 = Queue(1000) self.lock = Lock() # self.path = 'D:/IMG/' self.main_page_urls = [] self.subpageurls = [] conn = sqlite3.connect('mmonly.db') conn.isolation_level = None try: conn.execute( '''create table subpageurl(url text primary key not null)''') conn.execute( '''create table imgurl(url text primary key not null)''') except (Exception) as e: print('创建表:{}'.format(e).decode('utf-8').encode(type)) finally: conn.close() self.rootpath = os.getcwd().replace('\\', '/') self.path = os.path.join(self.rootpath, 'imges/') if not os.path.exists(self.path): os.mkdir(self.path) def get_mainpage_urls(self, inurl): # 得到所有主页url self.headers['User-Agent'] = self.ua.random try: req = requests.get(inurl, headers=self.headers, timeout=10) req.encoding = 'gbk' cont = req.text content = pq(cont) elem = list(content('div #pageNum').children('a').items()) for ele in elem: if ele.text() == '末页': pgnum = int(ele.attr('href').split('_')[-1].split('.')[0]) spurl = inurl.split('_') for i in range(1, pgnum + 1): self.main_page_urls.append('{}_{}_{}.html'.format( spurl[0], spurl[1], str(i))) print('主页计算完毕!!'.decode('utf-8').encode(type)) except (Exception) as e: self.lock.acquire() print('主页读取错误:{}'.format(e).decode('utf-8').encode(type)) self.lock.release() return def get_subpage_urls(self, inurl): # 得到所有子页面url self.headers['User-Agent'] = self.ua.random try: req = requests.get(inurl, headers=self.headers, timeout=10) req.encoding = 'gbk' cont = req.text content = pq(cont) elems = list(content('div .ABox').children('a').items()) for ele in elems: url = ele.attr('href') self.q1.put(url) print('取得子页面地址:{}'.format(url).decode('utf-8').encode(type)) except (Exception) as e: self.lock.acquire() print('遍历主页面读取错误:{}'.format(e).decode('utf-8').encode(type)) self.lock.release() return def savesuburl(self): # 将子页面url存入数据库subpageurl表中 while 1: try: suburl = self.q1.get(timeout=20) self.subpageurls.append(suburl) print('列表存入子页面:{}'.format(suburl).decode('utf-8').encode(type)) except (Exception) as e: print('读取子页面url:{}'.format(e).decode('utf-8').encode(type)) time.sleep(2) if self.q1.empty(): time.sleep(2) if self.q1.empty(): break conn = sqlite3.connect('mmonly.db') cur = conn.cursor() time.sleep(4) print('开始将子页面url写入数据库'.decode('utf-8').encode(type)) for date in self.subpageurls: try: cur.execute('insert into subpageurl values(?)', (date, )) print('写入:{}'.format(date).decode('utf-8').encode(type)) except (Exception) as er: print('写入数据库错误:{}'.format(er).decode('utf-8').encode(type)) conn.commit() conn.close() print('写入完毕!!'.decode('utf-8').encode(type)) def get_img_url(self, inurl): # get图片地址 self.headers['User-Agent'] = self.ua.random try: req = requests.get(inurl, headers=self.headers, timeout=10) time.sleep(0.2) req.encoding = 'gbk' cont = req.text content = pq(cont) imgnum = int(content('.totalpage').text()) urlsp = '.'.join(inurl.split('.')[:-1]) for n in range(1, imgnum + 1): imgpage = '{}_{}.html'.format(urlsp, n) self.headers['User-Agent'] = self.ua.random try: req = requests.get(imgpage, headers=self.headers, timeout=10) time.sleep(0.3) req.encoding = 'gbk' cont = req.text content = pq(cont) imgurl = content('.down-btn').attr('href') self.q2.put(imgurl) except (Exception) as ee: print('get图片url错误:{}'.format(ee).decode('utf-8').encode( type)) print( 'get图片url:{}'.format(imgurl).decode('utf-8').encode(type)) except (Exception) as e: print('get图片页面地址错误:{}'.format(e).decode('utf-8').encode(type)) return def download(self, inurl): # 下载图片 # inurl = q.get(timeout=10) na = inurl.split('/') imgname = '{}{}'.format(na[-2], na[-1]) imgpath = '{}{}'.format(self.path, imgname) statu = os.path.exists(imgpath) if not statu: self.headers['User-Agent'] = self.ua.random try: req = requests.get(inurl, headers=self.headers, timeout=8).content with open(imgpath, 'wb') as f: f.write(req) self.lock.acquire() print('下载图片:{}'.format(imgname).decode('utf-8').encode(type)) self.lock.release() except (Exception) as e: self.lock.acquire() print('下载错误:{}'.format(e).decode('utf-8').encode(type)) self.lock.release() else: self.lock.acquire() print('重复图片:{}'.format(imgname).decode('utf-8').encode(type)) self.lock.release() def run(self, inurl): ch = eval( input('输入1表示采集页面\n输入2表示下载图片\n输入3退出程序\n输入:'.decode('utf-8').encode( type))) if ch == 1: self.get_mainpage_urls(inurl) time.sleep(4) pool1 = Pool(20) for mainurl in self.main_page_urls: pool1.apply_async(self.get_subpage_urls, (mainurl, )) time.sleep(1) self.savesuburl() pool1.close() pool1.join() print('子页面采集完毕!!!'.decode('utf-8').encode(type)) self.run('http://www.mmonly.cc/mmtp/list_9_2.html') elif ch == 2: conn = sqlite3.connect('mmonly.db') cur = conn.cursor() pool2 = Pool(10) pool3 = Pool(30) cur.execute('select * from subpageurl') suburls = cur.fetchall() while 1: for nn in range(200): try: for i in suburls: pool2.apply_async(self.get_img_url, i) cur.execute('delete from subpageurl where url=?', i) while 1: img = self.q2.get(timeout=20) pool3.apply_async(self.download, (img, )) except (Exception) as e: print('数据库读取子页面url:{}'.format(e).decode( 'utf-8').encode(type)) time.sleep(2) if self.q2.empty(): time.sleep(2) if self.q2.empty(): break conn.commit() conn.close() conn = sqlite3.connect('mmonly.db') cur = conn.cursor() cur.execute('select * from subpageurl') suburls = cur.fetchall() time.sleep(2) if self.q2.empty(): time.sleep(2) if self.q2.empty(): break pool3.close() pool2.close() pool3.join() pool2.join() else: print('结束程序!'.decode('utf-8').encode(type))
class Scanner(object): def __init__(self, target, startPort, endPort): self.target = target self.startPort = startPort self.endPort = endPort self.dnsRecords = [] self.mutex = Lock() self.ports = [] self.getPorts() self.time = time() def getPorts(self): for i in range(int(self.startPort), int(self.endPort) + 1): self.ports.append(i) def checkCdn(self): myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 try: for i in DNSServer: myResolver.nameservers = i record = myResolver.query(self.target) self.dnsRecords.append(record[0].address) self.dnsRecords = list(set(self.dnsRecords)) except Exception as e: pass finally: return True if len(self.dnsRecords) > 1 else False def scanPort(self, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) return True if s.connect_ex((self.target, port)) == 0 else False except Exception as e: pass finally: s.close() def getHttpBanner(self, url): try: r = requests.get(url, headers={'UserAgent': UserAgent().random}, timeout=2, verify=False, allow_redirects=True) soup = BeautifulSoup(r.content, 'lxml') return soup.title.text.strip('\n').strip() except Exception as e: pass def getSocketInfo(self, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) s.connect((self.target, port)) s.send('HELLO\r\n') return s.recv(1024).split('\r\n')[0].strip('\r\n') except Exception as e: pass finally: s.close() def run(self, port): try: if self.scanPort(port): banner = self.getHttpBanner(f'http://{self.target}:{port}') self.mutex.acquire() if banner: print(f'{str(port).rjust(6)} ---- open {banner[:18]}') else: banner = self.getHttpBanner( f'https://{self.target}:{port}') if banner: print( f'{str(port).rjust(6)} ---- open {banner[:18]}') else: banner = self.getSocketInfo(port) if banner: print( f'{str(port).rjust(6)} ---- open {banner[:18]}' ) else: print(f'{str(port).rjust(6)} ---- open ') self.mutex.release() except Exception as e: pass def _start(self): try: print( f'Endereço de digitalização: {socket.gethostbyname(self.target)}\n' ) pool = ThreadPool(processes=100) pool.map_async(self.run, self.ports).get(0xffff) pool.close() pool.join() print( f'Demorado para completar a verificação: {time() - self.time} 秒.\n' ) except Exception as e: print(e) except KeyboardInterrupt: print('O usuário encerrou a varredura ...') sys.exit(1) def scanRecords(self): inputNums = input( '\nPor favor, digite o número de série a ser verificado \nInsira 0 para todas as varreduras, se você não inserir, cancela a varredura \nAo escanear uma porta específica, digite o número de série na frente da porta, e cada número de série é separados por um espaço \nPor favor, digite:' ).strip() if inputNums == '': print('O usuário encerrou a varredura ...') sys.exit(1) if inputNums == '0': for (i, ip) in enumerate(self.dnsRecords): print(f'\nPrimeiro{i+1}个 IP Comece a escanear...') Scanner(ip, sys.argv[2], sys.argv[3]).checkTarget() else: recordsLen = len(self.dnsRecords) nums = inputNums.split(sep=' ') if len(nums) < 1: print('O usuário encerra a varredura...') sys.exit(1) for (i, num) in enumerate(nums): num = int(num) print(f'\nPrimeiro{i+1}个 IP Comece a escanear...') if num > recordsLen: print('Número de série inserido ilegal...') continue Scanner(self.dnsRecords[num - 1], sys.argv[2], sys.argv[3]).checkTarget() def checkTarget(self): ipRegex = re.compile( '^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$' ) if ipRegex.match(self.target): self._start() elif not self.checkCdn(): print('O nome de domínio não tem registro de resolução DNS ...') sys.exit(1) else: print('O IP para resolução de nome de domínio é o seguinte:') for (i, ip) in enumerate(self.dnsRecords): print(str(i + 1) + ' : ' + ip) self.scanRecords()
class Scanner(object): def __init__(self, target, start, end): self.target = target self.start = start self.end = end self.W = '\033[0m' self.G = '\033[1;32m' self.O = '\033[1;33m' self.R = '\033[1;31m' self.time = time() self.ports = [] self.result = [] self.mutex = Lock() self.get_ports() def get_ports(self): for i in xrange(int(self.start), int(self.end) + 1): self.ports.append(i) def check_cdn(self): #目标域名cdn检测 myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 dnsserver = [['114.114.114.114'], ['8.8.8.8'], ['223.6.6.6']] try: for i in dnsserver: myResolver.nameservers = i record = myResolver.query(self.target) self.result.append(record[0].address) except: pass finally: if len(set(list(self.result))) == 3: return True else: return False def scan_port(self, port): #端口扫描 try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(1.0) r = s.connect_ex((self.target, port)) if r == 0: return True except Exception as e: print e finally: s.close() def get_http_banner(self, url): #http/https请求获取banner try: r = requests.get(url, headers={'UserAgent': UserAgent().random}, timeout=2, verify=False, allow_redirects=True) soup = BeautifulSoup(r.content, 'lxml') return soup.title.text.strip('\n').strip() except Exception as e: pass def get_socket_info(self, port): #socket获取banner try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.5) s.connect((self.target, port)) s.send('HELLO\r\n') r = s.recv(1024) return r.split('\r\n')[0].strip('\r\n') except Exception as e: pass finally: s.close() def run(self, port): try: if self.scan_port(port): banner = self.get_http_banner('http://{}:{}'.format( self.target, port)) self.mutex.acquire() if banner: print '{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner, self.W) else: banner = self.get_http_banner('https://{}:{}'.format( self.target, port)) if banner: print '{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner, self.W) else: banner = self.get_socket_info(port) if banner: print '{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner, self.W) else: print '{}[+] {} ---- open {}'.format( self.G, str(port).rjust(6), self.W) self.mutex.release() except Exception as e: print e def _start(self): try: print '-' * 60 print u'{}[-] 正在扫描地址: {}{} '.format( self.O, socket.gethostbyname(self.target), self.W) print '-' * 60 #线程数 pool = ThreadPool(processes=50) pool.map(self.run, self.ports) pool.close() pool.join() print '-' * 60 print u'{}[-] 扫描完成耗时: {} 秒.{}'.format(self.O, time() - self.time, self.W) except Exception as e: print e def check_target(self): #判断目标是域名还是还是ip地址 flag = self.target.split('.')[-1] try: #ip地址 if int(flag) >= 0: self._start() except: #域名地址 if not self.check_cdn(): self._start() else: print '-' * 60 print u'{}[-] 目标使用了CDN技术,停止扫描.{}'.format(self.R, self.W) print '-' * 60 sys.exit(1)
class SessionManager: """ SessionManager is in charge of creating and maintaining session pool. """ def __init__(self, transfermanager, session_count, files_dir='', coo=None): """ Author: David Desc: maxs defines how much sessions run concurrently. curs defines how much sessions are running now. wq is for storing delayed jobs due to given maxs. """ self._tm = transfermanager self._maxs = session_count self._curs = 0 self._files_dir = files_dir self._wq = [] self._pl = Pool() self._lock = Lock() self._coo = coo def __call__(self, *args): """ Author: David Desc: When a SessionManager instance is called, it will maintain the current session count and fetch another job from wq if wq is not empty. """ self._lock.acquire() self._curs -= 1 self._lock.release() if self._wq: args = self._wq.pop(0) args[0](*args[1:]) def _get_session(self, local, remote_ip, msg_port, port, filename, force, delayed): """ Author: David Desc: Init a GET session on SERVER, it will try to bind to local ip and given port, it will try another port if given port has been used and force is not set to True, or it will return reject message when force is set to True. """ fms = _FileMasterSession(remote_ip, filename, self._files_dir, False, self._coo) while True: try: sock = socket.socket() sock.bind((local, port)) sock.listen(1) except Exception, e: logging.warning(traceback.format_exc()) if e.errno == 48 and force: return (401, port) elif e.errno == 48 and not force: port = randint(10000, 15000) else: return (603, port) else: break try: self._lock.acquire() self._pl.apply_async(fms, args=(sock,), callback=self) self._curs += 1 self._lock.release() except Exception, e: logging.warning(traceback.format_exc())
class Scanner(object): def __init__(self, target, threads, custom_ports): self.W = '\033[0m' self.G = '\033[1;32m' self.O = '\033[1;33m' self.R = '\033[1;31m' self.custom_ports = custom_ports self.server = target self.result = [] self.ips = [] self.time = time.time() self.threads = threads self.lock = Lock() self.target = self.handle_target() self.get_ip_addr() def handle_target(self): #处理给定扫描目标 try: if int(self.server.split('.')[-1]) >= 0: return '.'.join(self.server.split('.')[:3])+'.0/24' except: if not self.check_cdn(): return '.'.join(i for i in socket.gethostbyname(self.server).split('.')[:3])+'.0/24' else: print u'{}[-] 目标使用了CDN, 停止扫描...{}'.format(self.R, self.W) sys.exit(1) def check_cdn(self): #cdn检测 myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 dnsserver = [['114.114.114.114'],['8.8.8.8'],['223.6.6.6']] try: for i in dnsserver: myResolver.nameservers = i record = myResolver.query(self.server) self.result.append(record[0].address) except: pass finally: return True if len(set(list(self.result))) > 1 else False def get_ip_addr(self): #获取目标c段ip地址 for ip in IPy.IP(self.target): self.ips.append(ip) def get_info(self, ip, port): try: url = 'http://{}:{}'.format(str(ip), str(port)) header = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3)'} res = requests.get(url, timeout=1, headers=header, verify=False, allow_redirects=True) serv = res.headers['Server'][:17] if 'Server' in str(res.headers) else '' title = BeautifulSoup(res.content,'lxml').title.text.strip('\n').strip()[:16] result = '{}[+] {}{}{}{}{}'.format(self.G, url.ljust(28), str(res.status_code).ljust(6), serv.ljust(24), title, self.W) self.lock.acquire() print result self.lock.release() except Exception as e: pass def start(self, ip): #自定义扫描端口使用协程进行处理 if self.custom_ports: gevents = [] for port in self.custom_ports.split(','): gevents.append(gevent.spawn(self.get_info, ip, port)) gevent.joinall(gevents) else: self.get_info(ip, 80) def run(self): try: pool = ThreadPool(processes=self.threads) pool.map_async(self.start, self.ips).get(0xffff) pool.close() pool.join() print '-'*90 print u'{}[-] 扫描完成耗时: {} 秒.{}'.format(self.O, time.time()-self.time, self.W) except Exception as e: pass except KeyboardInterrupt: print u'\n[-] 用户终止扫描...' sys.exit(1)
class Scanner(object): def __init__(self, target, start, end): self.target = target self.start = start self.end = end self.W = '\033[0m' self.G = '\033[1;32m' self.O = '\033[1;33m' self.R = '\033[1;31m' self.time = time() self.ports = [] self.result = [] self.mutex = Lock() self.get_ports() def get_ports(self): for i in xrange(int(self.start), int(self.end) + 1): self.ports.append(i) def check_cdn(self): # 目标域名cdn检测 myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 dnsserver = [['114.114.114.114'], ['8.8.8.8'], ['223.6.6.6']] if self.target.endswith('/'): self.target = self.target[:-1] if self.target.startswith('http://'): self.target = self.target[7:] if self.target.startswith('https://'): self.target = self.target[8:] try: for i in dnsserver: myResolver.nameservers = i record = myResolver.query(self.target) self.result.append(record[0].address) except Exception as e: pass finally: return True if len(set(list(self.result))) > 1 else False def scan_port(self, port): # 端口扫描 try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) return True if s.connect_ex((self.target, port)) == 0 else False except Exception as e: pass finally: s.close() def get_http_banner(self, url): # http/https请求获取banner try: r = requests.get(url, headers={'UserAgent': UserAgent().random}, timeout=2, verify=False, allow_redirects=True) soup = BeautifulSoup(r.content, 'lxml') return soup.title.text.strip('\n').strip() except Exception as e: pass def get_socket_info(self, port): # socket获取banner try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) s.connect((self.target, port)) s.send('HELLO\r\n') return s.recv(1024).split('\r\n')[0].strip('\r\n') except Exception as e: pass finally: s.close() def run(self, port): try: if self.scan_port(port): banner = self.get_http_banner('http://{}:{}'.format(self.target, port)) self.mutex.acquire() if banner: print '{}[+] {} ---- open {}{}'.format(self.G,str(port).rjust(6), banner[:18], self.W) out = '{}'.format(str(port).rjust(6)) list_res.append(str(port)) else: banner = self.get_http_banner('https://{}:{}'.format( self.target, port)) if banner: out = '{}'.format(str(port).rjust(6)) list_res.append(str(port)) print '{}[+] {} ---- open {}{}'.format(self.G,str(port).rjust(6), banner[:18], self.W) else: banner = self.get_socket_info(port) if banner: out = '{}'.format(str(port).rjust(6)) list_res.append(str(port)) print '{}[+] {} ---- open {}{}'.format(self.G,str(port).rjust(6), banner[:18], self.W) else: out = '{}'.format(str(port).rjust(6)) list_res.append(str(port)) print '{}[+] {} ---- open {}'.format(self.G,str(port).rjust(6), self.W) self.mutex.release() except Exception as e: pass def _start(self): try: print '-' * 60 print u'{}[-] : {}{} '.format(self.O, socket.gethostbyname(self.target), self.W) print '-' * 60 # 线程数 pool = ThreadPool(processes=100) # get传递超时时间,用于捕捉ctrl+c pool.map_async(self.run, self.ports).get(0xffff) pool.close() pool.join() fo = open("./file/result/Port_scan.txt","w") for i in list_res: fo.write(i) fo.write('\n') fo.close() print '-' * 60 print u'{}[-] : {} .{}'.format(self.O, time() - self.time, self.W) except Exception as e: print e except KeyboardInterrupt: print self.R + u'\n[-] ...' sys.exit(1) def check_target(self): # 判断目标是域名还是还是ip地址 flag = self.target.split('.')[-1] try: # ip地址 if int(flag) >= 0: self._start() except: # 域名地址 if not self.check_cdn(): self._start() else: print '-' * 60 print u'{}[-] ,.{}'.format(self.R, self.W) print '-' * 60
class Linkie: def __init__(self, config=None, config_file_path=None): self.file_count = 0 self.status_counts = {} self.urls = dict( ) # Dictionary of URLs that have been checked, with their broken status and status code self.domains = dict( ) # Dictionary of URL domains and when they were last requested from (429 code) self.unchecked_urls = set() # Initial set of urls to be checked self.delayed_urls = [] # List of urls to be checked later (429 code) self.directory = '.' self.pool = ThreadPool(THREADS) self.lock = Lock() if not config and config_file_path: logging.info( 'Using Linkie configuration file {}'.format(config_file_path)) config = self.read_config(config_file_path) elif config: logging.info('Using custom Linkie settings via Python constructor') elif not config and not config_file_path: logging.info('Using default Linkie configuation') config = self.check_config(config) self.config = self.process_config(config) def read_config(self, config_file_path): config_file = open(config_file_path, 'r') config = yaml.load(config_file) config_file.close() return config def check_config(self, config): default_config = { 'exclude-directories': [ '.git/', 'docs/build/', ], 'file-types': [ 'html', 'md', 'rst', 'txt', ], 'skip-urls': [], } if config: if config.get('exclude-directories'): if type(config['exclude-directories']) != list: raise TypeError( 'The exclude-directories value should be a list of directories.' ) if config.get('file-types'): if type(config['file-types']) != list: raise TypeError( 'The file-types value should be a list of file extensions.' ) if config.get('skip-urls'): if type(config['skip-urls']) != list: raise TypeError( 'The skip-urls value should be a list of URLs to skip.' ) for key in config.keys(): if key in config: default_config[key] = config[key] return default_config def process_config(self, config): exclude_directories = config['exclude-directories'] for i in range(len(exclude_directories)): directory = exclude_directories[i] directory = os.path.join('./', directory) if directory.endswith('/'): directory = directory[:-1] exclude_directories[i] = directory config['exclude-directories'] = exclude_directories file_types = config['file-types'] for i in range(len(file_types)): if not file_types[i].startswith('.'): file_types[i] = '.' + file_types[i] config['file-types'] = tuple(file_types) return config def get_domain(self, url): # Return everything before the third / # i.e https://example.com/subpage/?hello-there&general-kenobi # becomes https://example.com url_parts = url.split('/') return '/'.join(url_parts[:3]) def count_broken_links(self): count = 0 for url, url_data in self.urls.items(): if url_data['broken']: count += 1 return count def run(self): self.traverse_directory() self.traverse_connection_errors() self.print_summary() if self.count_broken_links(): return 1 else: return 0 def traverse_directory(self): for directory_root, directories, files in os.walk(self.directory): # Remove directories in exclude list processed_directories = [] for directory in directories: directory_path = os.path.join(directory_root, directory) if directory_path not in self.config['exclude-directories']: processed_directories.append(directory) directories[:] = processed_directories for filename in files: if filename.endswith(self.config['file-types']): self.search_file(os.path.join(directory_root, filename)) self.pool.map(self.check_link, self.unchecked_urls) repeat_count = 1 max_repeats = 1000000 while len(self.delayed_urls) > 0 and repeat_count <= max_repeats: # Many iterations are expected because the timeout may still be going each time this repeats itself, so the pool map will end immediately # Only uncomment this line if debugging locally # print('Retrying delayed urls **MANY ITERATIONS ARE EXPECTED** #{}'.format(repeat_count), end='\r') repeat_urls = self.delayed_urls[:] self.delayed_urls = [] self.pool.map(self.check_link, repeat_urls) repeat_count += 1 if repeat_count > max_repeats: logging.critical( "Infinite loop in retrying delayed urls. The timeout period can't have ended!" ) def traverse_connection_errors(self): connect_errors = [] for url, url_data in self.urls.items(): if str(url_data['status']) == 'ConnectionError': connect_errors.append(url) for url in connect_errors: self.urls.__delitem__(url) if len(connect_errors): logging.info( 'Rechecking {} link{} that returned ConnectionError... '. format(len(connect_errors), 's' if len(connect_errors) != 1 else '')) self.pool = ThreadPool(min(THREADS, len(connect_errors))) self.pool.map(self.check_link, connect_errors) def search_file(self, file_path): self.file_count += 1 file_message = 'Checking file {} for URLs... '.format(file_path) file_object = open(file_path, 'r') file_contents = file_object.read() file_object.close() urls = re.findall(URL_REGEX, file_contents) logging.info('{}{} URL{} found'.format(file_message, len(urls), 's' if len(urls) != 1 else '')) for url in urls: # Remove trailing characters url = url.rstrip('> !"#$%&\'*+,-./@:;=^_`|~').lstrip(' <(=\"') # Remove extra trailing bracket if link containing brackets # within Markdown link syntax. # [Wikipedia link](http://foo.com/blah_blah_(wikipedia)) if url.count('('): url += url.count('(') * ')' self.domains[self.get_domain(url)] = -1 self.unchecked_urls.add(url) def check_link(self, url): domain = self.get_domain(url) self.lock.acquire() time_at_429 = self.domains[domain] is_ready = time_at_429 < 0 or time.perf_counter( ) - time_at_429 > TIMEOUT if is_ready: self.domains[domain] = -1 self.lock.release() if not is_ready: # Put the url back to be checked later self.lock.acquire() self.delayed_urls.append(url) self.lock.release() return message = ' - ' if url in self.config['skip-urls']: message += 'Skipping URL (as defined in config file)' elif url not in self.urls: try: status_code = requests.head(url, headers=HEADERS).status_code # If response doesn't allow HEAD request, try GET request if status_code >= 400: status_code = requests.get(url, headers=HEADERS).status_code # If connection error except Exception as e: status_code = str(type(e).__name__) if type(status_code) == str: message += '{}'.format(status_code) else: message += 'Status {}'.format(status_code) if type(status_code) == str or status_code >= 400: if status_code == 429: # Too many requests message += " => Delaying requests to the domain {} for {} seconds".format( domain, TIMEOUT) self.lock.acquire() # Save the time the request was made self.domains[domain] = time.perf_counter() # Put the url back to be checked again later self.delayed_urls.append(url) self.lock.release() else: self.save_url(url, status_code, True) else: self.save_url(url, status_code, False) else: message += '{} (already checked)'.format(self.urls[url]['status']) message += ' = {}'.format(url) logging.info(message) def save_url(self, url, status_code, broken): self.urls[url] = { 'broken': broken, 'status': status_code, } def collect_status_counts(self): for _, url_data in self.urls.items(): status_code = str(url_data['status']) self.status_counts[status_code] = self.status_counts.get( status_code, 0) + 1 def print_summary(self): number_broken_links = self.count_broken_links() self.collect_status_counts() logging.info('=============================================') logging.info('SUMMARY') logging.info('=============================================') logging.info('{} file{} checked'.format( self.file_count, 's' if self.file_count != 1 else '')) logging.info('{} unique URL{} found'.format( len(self.urls), 's' if len(self.urls) != 1 else '')) logging.info('{} broken link{} found'.format( number_broken_links, 's' if number_broken_links != 1 else '')) logging.info('---------------------------------------------') logging.info('Status code counts') logging.info('---------------------------------------------') for status in sorted(self.status_counts.keys()): logging.info('{}: {}'.format(status, self.status_counts[status])) if 999 in self.status_counts: logging.info('Status 999 refers to a connection error.') logging.info('---------------------------------------------') logging.info('Broken links') logging.info('---------------------------------------------') if number_broken_links: for url, url_data in self.urls.items(): if url_data['broken']: logging.info('{}: {}'.format(url_data['status'], url)) else: logging.info('No broken links found!')
class PageShelveCacher: 'Base class for objects working with shelve db' def __init__(self, db_file_path='http_lib_cache.db'): self.compress_type = strRAW_PAGE self.db = shelve.open(str(db_file_path), writeback=True) self.db_lock = Lock() self.rollback() self.page_root_node = self.db def __del__(self): self.db.close() def reset_db(self): self.db = {} # def set_page_root(self, root_node): # self.page_root_node = root_node def start_transaction(self): self.rollback() self.transact_data = {} # self.page_root_node = self.transact_data def commit(self): if not self.transact_data: return self.page_root_node.update(self.transact_data) self.rollback() self.sync() def rollback(self): self.transact_data = None def sync(self): self.db.sync() def get_save_root_node(self): if self.transact_data: return self.transact_data return self.page_root_node def save_db_page(self, url, post_params, page): self.db_lock.acquire() # logOut('db_lock.acquire') str_params = post_params_to_str(post_params) key = get_request_db_key(url, post_params) page_root_node = self.get_save_root_node() page_root_node[key] = {self.compress_type: page} # logOut('db_lock.release') self.db_lock.release() def load_db_page(self, url, post_params=None): page = None # self.db_lock.acquire() key = get_request_db_key(url, post_params) try: page_node = self.page_root_node.get(key, None) except: page_node = None if page_node: page = page_node[self.compress_type] # self.db_lock.release() return page
class EmailScanner(object): def __init__(self, host, user, protocol, threads, ssl): self.W = '\033[0m' self.G = '\033[1;32m' self.O = '\033[1;33m' self.R = '\033[1;31m' self.time = time.time() self.host = host self.user = user self.proto = protocol self.uname = '' self.pwd = [] self.result = [] self.ssl = ssl self.thread = threads self.lock = Lock() def get_user(self): try: if os.path.isfile(self.user): with open(self.user) as f: for i in f.readlines(): yield i.strip() else: yield self.user except Exception as e: print e def get_pwd(self, user): try: path = os.path.dirname(os.path.abspath('{}'.format(sys.argv[0]))) with open(path + '/dict/pwd.txt') as f: for i in f.readlines(): self.pwd.append(i.strip().replace('{user}', user)) return self.pwd except Exception as e: print e def pop(self, user, pwd): try: server = poplib.POP3_SSL(self.host) if self.ssl else poplib.POP3( self.host) server.user(user) if '+OK' in server.pass_(pwd): print '{}[+] 发现一个用户: {} {}{}'.format(self.G, user, pwd, self.W) self.result.append('[+] {} {}'.format(user, pwd)) except Exception as e: pass finally: server.quit() def smtp(self, user, pwd): try: server = smtplib.SMTP_SSL(self.host) if self.ssl else smtplib.SMTP( self.host) if ' successful' in server.login(user, pwd)[1]: print '{}[+] 发现一个用户: {} {}{}'.format(self.G, user, pwd, self.W) self.result.append('[+] {} {}'.format(user, pwd)) except Exception as e: pass finally: server.quit() def imap(self, user, pwd): try: server = imaplib.IMAP4_SSL( self.host) if self.ssl else imaplib.IMAP4(self.host) if ' OK' in server.login(user, pwd): print '{}[+] 发现一个用户: {} {}{}'.format(self.G, user, pwd, self.W) self.result.append('[+] {} {}'.format(user, pwd)) except Exception as e: pass finally: server.logout() def handle(self, user, pwd): self.lock.acquire() print u'[-] 正在尝试: {} {}'.format(user.ljust(12), pwd) self.lock.release() if self.proto == 'pop3': self.pop(user, pwd) elif self.proto == 'smtp': self.smtp(user, pwd) else: self.imap(user, pwd) def start(self, pwd): try: self.handle(self.uname, pwd) except Exception as e: pass def start_thread(self): try: pool = ThreadPool(processes=self.thread) pool.map_async(self.start, self.get_pwd(self.uname)).get(0xffff) pool.close() pool.join() except Exception as e: pass except KeyboardInterrupt: print u'\n[-] 用户终止扫描...' sys.exit(1) def run(self): user = self.get_user() try: while True: self.uname = user.next() self.start_thread() self.pwd = [] except Exception as e: pass finally: print '-' * 67 print u'{}[-] 扫描完成耗时: {} 秒.{}'.format(self.O, time.time() - self.time, self.W) print '-' * 67 if self.result: for i in self.result: print self.G + i + self.W
class Scanner(object): def __init__(self, target, threads): self.target = target self.ips = [] self.time = time.time() self.threads = threads self.lock = Lock() self.get_ip_addr() def get_ip_addr(self): ip_C = '.'.join(self.target.split('.')[:-1]) self.ips = list(map(lambda x: ip_C + '.' + str(x), range(1, 256))) def check_port(self, ip, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(TIMEOUT) try: s.connect((ip, port)) except: return False else: self.get_title(ip, port) def get_title(self, ip, port): url = 'http://{}:{}'.format(ip, port) headers = { 'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3)' } try: response = requests.get(url, verify=False, headers=headers, timeout=TIMEOUT, allow_redirects=True) except: result = 'Error to read title' else: content_type = response.headers['Content-Type'] charset = charset_p.findall(content_type) response.encoding = charset server = response.headers['Server'] if 'Server' in str( response.headers) else '' title = pattern.findall(response.text)[0] if pattern.findall( response.text) else '' result = '{} {} {}'.format(server, response.status_code, title) self.lock.acquire() print('{}:{}'.format(ip, port).ljust(24), end=' Open ') print(result) self.lock.release() def start(self, port): gevents = [] for ip in self.ips: gevents.append(gevent.spawn(self.check_port, ip, port)) gevent.joinall(gevents) def run(self): try: pool = ThreadPool(processes=self.threads) print('程序开始') pool.map_async(self.start, list(range(1, 65535))).get(0xffff) pool.close() pool.join() except KeyboardInterrupt: print('用户停止') sys.exit(1) except Exception as e: print(e)
class Deploy(object): """Manage data and actions relevant to deploying code """ def __init__(self, release_candidate_id, chef_api=None, deploy_user='******'): self.chef_api = chef_api self.chef_app_prefs = None self.deploy_user = deploy_user # TODO: get this from the project model # turn app:app_rev into tarball url self.rc = release_candidate.get_by_id(release_candidate_id) self.project = project.get_by_id(self.rc['project_id']) self.lock = Lock() self.nodes = [] self.code_dir = None self.venv_dir = None @property def available_roles(self): """Get the list of available roles for this application. Roles are optional and the lack of them should return an empty list in which case every node can be deployed to. Throws if the app config hasn't been loaded from chef. """ return chef_role.mget_by_project(self.project['project_id']) def deploy(self, env=None, roles=None, run_pre_activate_scripts=False): """Perform a deploy. """ # does this app_rev exist for this app? #TODO: should we raise here? self.code_dir = get_toplevel_dir_from_tarball( self.rc['code_tarball_location']) self.venv_dir = get_toplevel_dir_from_tarball( self.rc['venv_tarball_location']) if not self.code_dir or not self.venv_dir: return None #dn = chef.Node('dev-monitor-001', api=self.chef_api) # Are the roles valid? target_role_names = set([r['chef_role_name'] for r in roles]) available_role_names = set([r['chef_role_name'] for r in self.available_roles]) if (target_role_names | available_role_names) != available_role_names: raise RuntimeError("Attempting to deploy to invalid role: %s" % ( target_role_names - available_role_names)) self.load_node_list(roles, env) # can we deploy app to these nodes? if not self.nodes: return None # TODO: Upload static assets #if 'www' in roles: #self.upload_static_assets_to_cdn() # transfer tarballs to servers self._transfer() # Run pre-activate hook script on role migration # TODO: Make sure the script exists migration_tag = 'gus_migration' chef_query = "chef_environment:%s AND tags:%s" % (env['environment_name'], migration_tag) migration_node = self.chef_api.Search('node', chef_query) box = "%s@%s" % (self.deploy_user, migration_node['automatic']['ipaddress_eth1']) script = os.path.join([self.code_dir, self.project['pre_activate_hook']]) run_as_subprocess('/usr/bin/ssh', box, '"%s"' % script, self.venv_dir) # activate on all machines self._activate() # TODO: Notify Hipchat # TODO: Ping statsd, if applicable # TODO: kick off smoke tests, if applicable def _transfer(self): '''Use a pool of workers to push the tarball out to the specified nodes ''' pool = Pool(min(len(self.nodes, MAX_NUM_WORKERS))) res = pool.map(self._transfer_to_node, self.nodes) return res def _transfer_to_node(self, node): # Logging doesn't play nicely with multi-processing, so lock before # logging. self.lock.acquire() log.info("Tranferring tarballs to node: %s", node) self.lock.release() box = "%s@%s" % (self.deploy_user, node['automatic']['ipaddress_eth1']) # Deploy and untar code _push_and_untar(box, self.rc['code_tarball_location'], self.project['code_target_dir']) # Deploy and untar venv _push_and_untar(box, self.rc['venv_tarball_location'], self.project['venv_target_dir']) # Load the application into the virtual env venv_activate = "source %s/bin/activate" % self.project['venv_target_dir'] pip_install = "pip install -e %s" % self.project['code_target_dir'] run_as_subprocess('/usr/bin/ssh', box, '"%s && %s"' % (venv_activate, pip_install)) def _activate(self): pool = Pool(min(len(self.nodes, MAX_NUM_WORKERS))) res = pool.map(self._activate_node, self.nodes) return res def _activate_node(self, node): box = "%s@%s" % (self.deploy_user, node['automatic']['ipaddress_eth1']) # Cut the code & venv symlinks over run_as_subprocess('/usr/bin/ssh', box, "'unlink /opt/%(pn)s/app && ln -s %(dir)s /opt/%(pn)s/app'" % {'pn': self.project['project_name'], 'dir': self.code_dir}) run_as_subprocess('/usr/bin/ssh', box, "'unlink /opt/%(pn)s/app_virtual_env && " \ "ln -s %(dir)s /opt/%(pn)s/app_virtual_env'" % {'pn': self.project['project_name'], 'dir': self.venv_dir}) # Restart uwsgi raw_status = subprocess.check_output('/usr/bin/ssh', box, 'supervisorctl status') process_list = raw_status.decode('utf8').split("\n") for row in process_list: if '%s:uwsgi' % self.project['project_name'] in row: match = re.search('pid (\d+)', row) if match: pid = match.group(1) run_as_subprocess('/usr/bin/ssh', box, "sudo kill -HUP %s" % pid) # Start all supervisor tasks <project>:task:* run_as_subprocess('/usr/bin/ssh', box, '"supervisorctl restart %s:task:*"' % self.project['project_name']) # TODO: run status again to check for errors return def load_node_list(self, roles, env): # Turn role list into node list role_clause = ' OR '.join(['role:%s' % r['chef_role_name'] for r in roles]) chef_query = "chef_environment:%s AND (%s)" % (env['environment_name'], role_clause) self.nodes = self.chef_api.Search('node', chef_query) def _upload_static_assets_to_cdn(): raise NotImplementedError
class DownLoader(object): ''' 分段下载 每一个任务下载一个区块,并写入文件 ''' def __init__(self, req, url, dirname, filename, block_size=BLOCK_SIZE): ''' req 会话 url 下载请求url dirname 下载目录名 filename 下载文件名 ''' self.req = req self.url = url self.dir_name = dirname self.file_name = filename self.block_size = block_size self.lock = Lock() self.block_done = 0 self.length = self.get_length() self.block_num = self.get_blocknum() self.pool = self.get_pool() self.progress_show() self.status = 'LOAD' self.ranges_undo = [] self.ranges_done = [] def get_pool(self, process=None): if process and isinstance(process, int): return Pool(process) else: return Pool() def get_length(self): ''' 计算下载的总长度 ''' res = self.req.head(self.url, headers=DEFAULT_HEADERS, verify=False) if res.ok: self.length = int(res.headers.get('content-length', 0)) logger.debug(res.headers) logger.info(self.length) return self.length else: self.length = 0 return self.length def get_blocknum(self): ''' 计算分块数量 ''' self.block_num = int(self.length + self.block_size - 1) / self.block_size #向上取整 return self.block_num def get_ranges(self): ''' 计算分块下载时,各分块的下载范围 ''' ranges = [] for i in range(self.block_num): if i == self.block_num - 1: ranges.append((i * self.block_size, self.length)) else: ranges.append((i * self.block_size, (i + 1) * self.block_size)) return ranges def progress_show(self): ''' 进度条显示 ''' widgets = ['Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker('>-=')), \ ' ', ETA(), ' ', FileTransferSpeed()] self.pbar = ProgressBar(widgets=widgets, maxval=self.length).start() def download_block(self, range_): ''' 单线程下载指定区块 ''' if self.status == 'LOAD': headers = deepcopy(DEFAULT_HEADERS) headers['Range'] = 'bytes=%d-%d' % range_ for i in range(3): try: res = self.req.get(self.url, headers=headers, verify=False) except Exception, e: logger.error(e.message) logger.error('bytes=%d-%d download error' % range_) if res.ok: self.lock.acquire() self.f.seek(range_[0]) self.f.write(res.content) self.f.flush() self.block_done = self.block_done + 1 self.ranges_done.append(range_) self.ranges_undo.remove(range_) if self.finished(): self.pbar.update(self.length - 1) else: self.pbar.update(self.block_size * self.block_done) self.lock.release() break else: logger.warn(res.status_code) logger.warn(res.request.headers) logger.warn(res.headers) elif self.status == 'PAUSE': pass
class Scanner(object): def __init__(self, target, start, end): self.target = target self.start = start self.end = end self.W = '\033[0m' self.G = '\033[1;32m' self.O = '\033[1;33m' self.R = '\033[1;31m' self.time = time() self.ports = [] self.result = [] self.mutex = Lock() self.get_ports() def get_ports(self): for i in range(int(self.start), int(self.end) + 1): self.ports.append(i) def check_cdn(self, ip): # 目标域名cdn检测 myResolver = dns.resolver.Resolver() myResolver.lifetime = myResolver.timeout = 2.0 dnsserver = [['114.114.114.114'], ['8.8.8.8'], ['223.6.6.6']] try: for i in dnsserver: myResolver.nameservers = i record = myResolver.query(ip) self.result.append(record[0].address) except Exception as e: pass finally: return True if len(set(list(self.result))) > 1 else False def scan_port(self, ip, port): # 端口扫描 try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) return True if s.connect_ex((ip, port)) == 0 else False except Exception as e: pass finally: s.close() def get_http_banner(self, url): # http/https请求获取banner try: r = requests.get(url, headers={'UserAgent': UserAgent().random}, timeout=2, verify=False, allow_redirects=True) soup = BeautifulSoup(r.content, 'lxml') return soup.title.text.strip('\n').strip() except Exception as e: pass def get_socket_info(self, ip, port): # socket获取banner try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.2) s.connect((ip, port)) s.send('HELLO\r\n') return s.recv(1024).split('\r\n')[0].strip('\r\n') except Exception as e: pass finally: s.close() def run(self, port, ip): try: if self.scan_port(ip, port): banner = self.get_http_banner('http://{}:{}'.format(ip, port)) self.mutex.acquire() if banner: print('{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner[:18], self.W)) else: banner = self.get_http_banner('https://{}:{}'.format( ip, port)) if banner: print('{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner[:18], self.W)) else: banner = self.get_socket_info(ip, port) if banner: print('{}[+] {} ---- open {}{}'.format( self.G, str(port).rjust(6), banner[:18], self.W)) else: print('{}[+] {} ---- open {}'.format( self.G, str(port).rjust(6), self.W)) self.mutex.release() except Exception as e: pass def _start(self, ip): try: print('-' * 60) print('{}[-] 正在扫描地址: {}{} '.format(self.O, socket.gethostbyname(ip), self.W)) print('-' * 60) # 线程数 pool = ThreadPool(processes=100) # get传递超时时间,用于捕捉ctrl+c partial_work = partial(self.run, ip=ip) pool.map_async(partial_work, self.ports).get(0xffff) pool.close() pool.join() print('-' * 60) print('{}[-] 扫描完成耗时: {} 秒.{}'.format(self.O, time() - self.time, self.W)) except Exception as e: print(e) except KeyboardInterrupt: print(self.R + u'\n[-] 用户终止扫描...') sys.exit(1) def cidr2ip(self, cidr): # C段转ip try: # 解析C段地址 ips = ipaddress.ip_network(cidr) return list(ips) except: # 解析错误 print('usage: python {} 1.2.3.0/24 21 8080'.format(sys.argv[0])) sys.exit(0) def parse_ips(self, ips): # 解析ip段 try: ip_range = ips.split('.')[-1] ip_range_num = ip_range.split('-') ip_start = ips.rstrip(ip_range) ips = [] for i in range(int(ip_range_num[0]), int(ip_range_num[1]) + 1): ip = ip_start + str(i) ips.append(ip) return ips except: # 解析错误 print('usage: python {} 1.2.3.0-255 21 8080'.format(sys.argv[0])) sys.exit(0) def check_target(self): # 判断是不是C段地址 if '/' in self.target: ips = self.cidr2ip(self.target) for ip in ips: self._start(str(ip)) elif '-' in self.target: ips = self.parse_ips(self.target) for ip in ips: self._start(str(ip)) # 判断目标是域名还是还是ip地址 flag = self.target.split('.')[-1] try: # ip地址 if int(flag) >= 0: self._start(self.target) except: # 域名地址 if not self.check_cdn(self.target): self._start(self.target) else: print('-' * 60) print('{}[-] 目标使用了CDN技术,停止扫描.{}'.format(self.R, self.W)) print('-' * 60)
class TEST(): def __init__(self): self.client_list = deque() self.db = DB() self.stats = {} self.pool = None self.lock = Lock() signal.signal(signal.SIGTERM, self.term_test) signal.signal(signal.SIGINT, self.term_test) def term_test(self, *args, **kwargs): global STATS_ON global exit_threads exit_threads = 1 STATS_ON = 0 print("Exitting..") sys.exit(1) def prepare(self): print('Preparing clients... ') pad = len(str(NCLIENTS)) clean = '\r\r\r\r\r\r\r\r\r\r' for i in range(NCLIENTS): sys.stderr.write(clean + str(i + 1).zfill(pad)) cli = CLIENT() #cli.info() self.client_list.append(cli) sys.stderr.write(clean + str(NCLIENTS) + ' generated!\n') self.do_stats() self.print_stats() def prepare_threads(self): global MAX_THREADS if (NCLIENTS < MAX_THREADS): MAX_THREADS = NCLIENTS print("NEW NTHREADS {}".format(MAX_THREADS)) self.pool = ThreadPool(processes=MAX_THREADS) sys.stderr.write( 'Created pool for {} threads, {} clients per thread\n'.format( MAX_THREADS, int(NCLIENTS / MAX_THREADS))) self.sess = requests.Session() adapter = requests.adapters.HTTPAdapter( pool_connections=CACHE_CONNECTIONS, pool_maxsize=MAX_THREADS + 1, max_retries=3, pool_block=False) self.sess.mount('http://', adapter) def init_test(self): global STATS_ON self.prepare() #self.db.init_db() #self.db.close_db() self.prepare_threads() if STATS_ON and STATS_ON != 0: STATS_ON = 1 try: threading.Thread(target=self.time_stats).start() except: pass self.start() STATS_ON = 0 def do_stats(self): for rel in RELEASES: self.stats[rel] = {} self.stats[rel]['nclients'] = 0 self.stats[rel]['nclients_updated'] = 0 self.stats[rel]['flavours'] = {} for cli in self.client_list: self.stats[cli.release]['nclients'] = self.stats[ cli.release]['nclients'] + 1 self.stats[cli.release]['nclients_updated'] = self.stats[ cli.release]['nclients_updated'] + cli.updates if cli.real_flavour not in self.stats[cli.release]['flavours']: self.stats[cli.release]['flavours'][cli.real_flavour] = 1 else: self.stats[cli.release]['flavours'][ cli.real_flavour] = self.stats[cli.release]['flavours'][ cli.real_flavour] + 1 def print_stats(self): print('') for rel in RELEASES: print('RELEASE {}: {} clients ({} updated)'.format( rel, self.stats[rel]['nclients'], self.stats[rel]['nclients_updated'])) keys = self.stats[rel]['flavours'].keys() keys = sorted(keys) for key in keys: print('\t{} : {}'.format(key, self.stats[rel]['flavours'][key])) print('') def time_stats(self): global STATS_ON, NPET, PAUSE_SHOW_STATS, SUCCESS, DATE_ROUNDS global NCLIENTS, LOOPS print("STATS_ON") total = NCLIENTS * LOOPS old_npet = [] * 9 f = lambda x, y: x + y while STATS_ON and STATS_ON != 0: if not PAUSE_SHOW_STATS: old_npet.append(NPET) med_npet = reduce(f, old_npet) / len(old_npet) old_npet = old_npet[1 - len(old_npet):] sys.stderr.write( ' ' * 20 + '\r' * 20 + '{} r/s ETA:{} secs'.format( int(med_npet), int((total - SUCCESS) / (med_npet + 1)))) self.lock.acquire() NPET = 0 self.lock.release() time.sleep(1) def start(self): global APPS_SENT, ALL_BY_APP, PAUSE_SHOW_STATS, PAUSE_WITH_DATE_ROUNDS, CURRENT_DATE_ROUND, CURRENT_LOOP_ROUND global STATS_ON, SHOW_BY_APP, NPET, SUCCESS, FAILED, REDO, UPDATED, UPDATED_TOTAL #DATE_DATA=[] ALL_BY_APP = {} for k in range(DATE_ROUNDS): CURRENT_DATE_ROUND = k NPET = 0 SUCCESS = 0 FAILED = 0 REDO = 0 UPDATED = 0 start_time = time.time() if (k > 0): for cli in self.client_list: cli.inc_date += 1 TOTAL_SENT = {} ALL_SENT = {} for rel in RELEASES: TOTAL_SENT[rel] = {} ALL_SENT[rel] = {} for fla in ['desktop', 'server', 'client', 'other']: TOTAL_SENT[rel][fla] = 0 ALL_SENT[rel][fla] = 0 ALL_SENT[rel][fla + 'apps'] = {} for i in range(LOOPS): CURRENT_LOOP_ROUND = i UPDATED_TOTAL = UPDATED sys.stderr.write('\nRound {}/{} Date round {}/{} ...\n'.format( i + 1, LOOPS, k + 1, DATE_ROUNDS)) APPS_SENT = {} for rel in RELEASES: APPS_SENT[rel] = {} for fla in ['desktop', 'server', 'client', 'other']: APPS_SENT[rel][fla] = 0 APPS_SENT[rel][fla + 'apps'] = {} try: self.pool.map_async(self.thread_code, self.client_list).get(timeout=1000000) except: sys.stderr.write('Fatal error when threading\n') for rel in RELEASES: for fla in ['desktop', 'server', 'client', 'other']: if not STATS_ON and DEBUG and DEBUG > 1: sys.stderr.write( '\t{}:{} ({} apps sent ((tmp_packages count)insert or update))\n' .format(rel, fla, APPS_SENT[rel][fla])) TOTAL_SENT[rel][ fla] = TOTAL_SENT[rel][fla] + APPS_SENT[rel][fla] ALL_SENT[rel][ fla] = ALL_SENT[rel][fla] + APPS_SENT[rel][fla] for app in APPS_SENT[rel][fla + 'apps']: if app not in ALL_SENT[rel][fla + 'apps']: ALL_SENT[rel][fla + 'apps'][app] = APPS_SENT[rel][ fla + 'apps'][app] else: ALL_SENT[rel][fla + 'apps'][app] += APPS_SENT[rel][ fla + 'apps'][app] if not STATS_ON: sys.stderr.write('\n') #STATS_ON=0 total = 0 print("\n\nTOTAL APPS SENT AFTER DATE_ROUND {}/{}\n".format( k + 1, DATE_ROUNDS)) other_all_sent = {} #mini_date_data={} for rel in RELEASES: total_rel = 0 #mini_date_data[rel]={} for fla in ['desktop', 'server', 'client', 'other']: print( "\tBY {} {} : {} sent (insert or update)(tmp_packages count)" .format(rel, fla, ALL_SENT[rel][fla])) total_rel = total_rel + ALL_SENT[rel][fla] if fla == 'other': for x in ALL_SENT[rel][fla + 'apps']: if x in other_all_sent: other_all_sent[x] += ALL_SENT[rel][fla + 'apps'][x] else: other_all_sent[x] = ALL_SENT[rel][fla + 'apps'][x] else: sorted_x = sorted(ALL_SENT[rel][fla + 'apps'].items(), key=operator.itemgetter(1), reverse=True) #mini_date_data[rel][fla]=ALL_SENT[rel][fla+'apps']; for x in sorted_x[0:10]: print("\t\t{}:{}".format(x[0], x[1])) print( "\tTOTAL RELEASE {} : {} sent (insert or update)(tmp_packages count)" .format(rel, total_rel)) total = total + total_rel print("\tOTHER (combined 15,16):") sorted_x = sorted(other_all_sent.items(), key=operator.itemgetter(1), reverse=True) for x in sorted_x[0:10]: print("\t\t{}:{}".format(x[0], x[1])) #mini_date_data['other']={'other':other_all_sent} #DATE_DATA.append(mini_date_data) print( "TOTAL ALL RELEASES : {} sent (insert or updated)(tmp_packages count)" .format(total)) if SHOW_BY_APP: sorted_apps = sorted(ALL_BY_APP.items(), key=operator.itemgetter(1), reverse=True) print( "\n~~~~~~~~~~~~~~~~~~~\n~~RESUME BY APPS:~~\n~~~~~~~~~~~~~~~~~~~\n" ) for x in sorted_apps[0:20]: print("\t\t{}:{}".format(x[0], x[1])) elapsed_time = time.time() - start_time print( "\nEND DATE ROUND {}/{}\nCLIENTS UPDATED:{}\nTOTAL_CLIENTS:{}\nSUCCESS:{} req\nFAILED:{} req\nREDO:{} req\nELAPSED:{} secs\n{} req/s\n" .format(k + 1, DATE_ROUNDS, UPDATED_TOTAL, NCLIENTS + UPDATED_TOTAL, SUCCESS, FAILED, REDO, int(elapsed_time), int(SUCCESS / elapsed_time))) if PAUSE_WITH_DATE_ROUNDS and DATE_ROUNDS > 1 and k != DATE_ROUNDS - 1: PAUSE_SHOW_STATS = True readed = None while readed != '\n': print("Now change date into server and press enter") readed = sys.stdin.readline() PAUSE_SHOW_STATS = False ''' print("\n~~~~~~~~~~~~~~~~~~~\n~~~~ RESUME: ~~~~\n~~~~~~~~~~~~~~~~~~~\n") #print(json.dumps(DATE_DATA)) #print "" k=0 for l in DATE_DATA: #print "another round {}".format(k) if k==0: continue; for r in l: #print "release: {}".format(r) d=l[r] for f in d: #print "flavour {}".format(f) for app in d[f]: value=d[f][app] #print "\t\t{}: {}".format(app,d[f][t]) if app in l[0][r][f]: DATE_DATA[0][r][f]+=value else: DATE_DATA[0][r][f]=value k+=1 for r in DATE_DATA[0]: print("TOTAL Release {}".format(r)) for f in DATE_DATA[0][r]: sorted_x=sorted(DATE_DATA[0][r][f].items(),key=operator.itemgetter(1),reverse=True) print("\tFlavour {}".format(f)) for x in sorted_x[0:10]: print("\t\t{}: {}".format(x[0],x[1])) ''' def thread_code(self, client): global exit_threads if exit_threads and exit_treads == 1: return global APPS_SENT, TMP, ALL_BY_APP global NPET, SUCCESS, FAILED, REDO, UPDATED, CURRENT_LOOP_ROUND, LOOPS try: data, napps, lapps = client.get_data() except: pass try: do = False i = 5 error_replay = False while i > 0: try: r = self.sess.post(HOST, data=data, headers=HEADERS, timeout=TIMEOUT) i = 0 except Exception as e: print('R' + e + "\n") do = True time.sleep(SLEEP_THREAD_TIME) if do: self.lock.acquire() REDO += 1 time.sleep(SLEEP_FAIL_SENDING) self.lock.release() i -= 1 if i == 0: error_replay = True #r = self.sess.post(HOST,data=data,headers=HEADERS,timeout=TIMEOUT) #sys.stderr.write('.') if SLEEP_THREAD_TIME and SLEEP_THREAD_TIME > 0: time.sleep(SLEEP_THREAD_TIME) if error_replay: self.lock.acquire() FAILED += 1 self.lock.release() raise Exception('Fail Sending') if r.text != 'OK': self.lock.acquire() FAILED += 1 self.lock.release() print('{}'.format(data)) raise Exception('Reply NOK') self.lock.acquire() NPET += 1 SUCCESS += 1 APPS_SENT[str( client.release)][client.real_flavour] = APPS_SENT[str( client.release)][client.real_flavour] + napps for app in lapps: if app not in ALL_BY_APP: ALL_BY_APP[app] = int(lapps[app]) else: ALL_BY_APP[app] += int(lapps[app]) if app not in APPS_SENT[str( client.release)][client.real_flavour + 'apps']: APPS_SENT[str(client.release)][client.real_flavour + 'apps'][app] = int( lapps[app]) else: APPS_SENT[str(client.release)][client.real_flavour + 'apps'][app] += int( lapps[app]) self.lock.release() except Exception as e: sys.stderr.write('E' + str(e)) if LOOPS > 1 and CLIENT_UPDATE_FREQ > random.randint(0, 100): if CURRENT_LOOP_ROUND < LOOPS - 1: self.lock.acquire() UPDATED += 1 self.lock.release() client.update()