def Work(): redis = RedisWork() sysvulplugin_manager = SystemVulPlugin() sysvulplugin_manager.loadPlugins() sysvulplugins = sysvulplugin_manager.getPlugins() taskid = 0 while True: sysvul_json = redis.getvulInfo(SYSVUL_KEY) if sysvul_json is not None: try: sysvulinfo = eval(sysvul_json) taskid = sysvulinfo['taskid'] for sysvulplugin in sysvulplugins: sysvulplugin.execute_run(sysvulinfo['ip'], sysvulinfo['port'], taskid) except: continue else: time.sleep(300) for sysvulplugin in sysvulplugins: sysvulplugin.wait_for_complete(taskid) break logger.info('[SystemVul exit] systemvul scan over, exit!') #扫描完成 sys.exit()
def Work(): redis = RedisWork() webplugin_manager = WebPathPlugin() webplugin_manager.loadPlugins() webplugins = webplugin_manager.getPlugins() taskid = 0 while True: web_json = redis.getvulInfo(WEBSCAN_KEY) if web_json is not None: try: webinfo = eval(web_json) print webinfo taskid = webinfo["taskid"] for webplugin in webplugins: webplugin.execute_run( ip=webinfo["ip"], port=webinfo["port"], bdomain=webinfo["type"], taskid=taskid ) except Exception, e: print str(e) continue else: logger.info("[webscan empty]webpath redis empty!") time.sleep(300) for webplugin in webplugins: webplugin.wait_for_complete(0, taskid) break time.sleep(0.2)
def download_page(request_url): logger.info('request_url = ' + request_url) res = requests.get(request_url) if res.status_code != 200: logger.error('response.status_code=%s, download %s failed' % (str(res.status_code), request_url)) return None return res.text
def download(trade_date): if trade_date < '20020107': logger.error('上海期货龙虎榜数据下载日期有误请检查!') exit_now() request_url = 'http://www.shfe.com.cn/data/dailydata/kx/pm%s.dat' % trade_date __headers[ 'Referer'] = 'http://www.shfe.com.cn/statements/dataview.html?paramid=pm¶mdate=%s' % trade_date logger.info('trade_date=%s, request_url=%s' % (trade_date, request_url)) # 下载大连期货行情数据 future_holding_downloaded_origin_file_path = \ get_future_holding_downloaded_origin_file_path(trade_date, FuturesExchange.SHFE) if os.path.exists(future_holding_downloaded_origin_file_path): logger.info('%s %s上海期货龙虎榜数据已下载:' % (future_holding_downloaded_origin_file_path, trade_date)) return True res = requests.get(request_url, headers=__headers) if res.status_code != 200: logger.error('response.status_code=%s, download %s failed' % (str(res.status_code), request_url)) return False with open(future_holding_downloaded_origin_file_path, 'w', encoding='utf-8') as f: f.write(res.text) return True
def __init__(self, fileName, encoding="utf8"): logger.info("初始化数据库类,文件名:{},编码:{}".format(fileName, encoding)) if not os.path.exists(Db.__dirName): logger.info("{} 不存在,新建".format(Db.__dirName)) os.mkdir(Db.__dirName) self.fileName = Db.__dirName + "/" + fileName self.encoding = encoding if not os.path.exists(self.fileName): logger.info("文件不存在,新建") f = open(self.fileName, "w", encoding=self.encoding) f.close() self.itemList = [] else: logger.info("文件已经存在") f = open(self.fileName, "r", encoding=self.encoding) block = 10000 result = "" while True: line = f.read(block) if line == "": break result += line f.close() result = result.strip() if result != "": self.itemList = result.split(" ") else: self.itemList = [] logger.info("数据库初始化完成")
def wrapper(): try: wrapped_function() except: # noqa logger.info("error in function") return status.HTTP_500_INTERNAL_SERVER_ERROR
def preform_requests(debug: bool = False) -> None: users_data = Handlers.elastic_handler.get_all_today_data(_type="status") all_cookies = Handlers.cookies_handler.get_all_today_data(_type="cookie") current_run_users = Handlers.settings_handler.get_all_today_data(_type="settings", hour=dt.datetime.now().strftime("%H:%M")) current_run_user_ids = [user_id["_source"]["VENDOR_UUID"] for user_id in current_run_users] current_run_users_data = [user_data for user_data in users_data if user_data["_source"]["VENDOR_UUID"] in current_run_user_ids] successful_users, all_users = set(), set() for user_data in current_run_users_data: data_source = user_data["_source"] all_users.add(data_source['VENDOR_UUID']) assert "MAIN_CODE" in data_source and "SECONDARY_CODE" in data_source and "VENDOR_UUID" in data_source response, cookie, headers = PreformRequest.preform_user_request(data_source, all_cookies) if str(response.status_code) == "200": logger.info( f"successful report for:\n\tUUID: {data_source['VENDOR_UUID']}\n\tusing app cookie: {cookie}\n\tusing headers: {pformat(headers)}\ngot response: {response.status_code if not response.text else ': ' + str(response.text)}") successful_users.add(data_source['VENDOR_UUID']) else: logger.warning( f"FAILURE IN REPORT FOR:\n\tUUID: {data_source['VENDOR_UUID']}\n\tusing app cookie: {cookie}\n\tusing headers: {pformat(headers)}\ngot response: {response.status_code if not response.text else ': ' + str(response.text)}") if all_users: # only if there were users to preform requests for within this run PreformRequest.log_and_email(all_users, successful_users) if not debug: PreformRequest.send_APNS( [{"VENDOR_UUID": successful_user, "success": True} for successful_user in successful_users] + [{"VENDOR_UUID": unsuccessful_user, "success": False} for unsuccessful_user in all_users - successful_users] )
def get_log(update, context): if int(update.message.from_user.id) in admin_ids: logger.info("[Command /get_log from admin %s]", update.message.from_user.id) context.bot.send_document(chat_id=update.message.from_user.id, document=open(os.path.relpath('bot.log'), 'rb'), filename="catalogobot_log_{}.txt" .format(datetime.now().strftime("%d%b%Y-%H%M%S")))
def subscriptions(update, context): logger.info("[Command /suscripciones]") subscribed_deptos = context.chat_data.get("subscribed_deptos", []) subscribed_cursos = context.chat_data.get("subscribed_cursos", []) sub_deptos_list = ["- <b>({})</b> <i>{} {}</i>".format(x, DEPTS[x][0], DEPTS[x][1]) for x in subscribed_deptos] sub_cursos_list = ["- <b>({}-{})</b> <i>{} en {} {}</i>" .format(x[0], x[1], x[1], DEPTS[x[0]][0], DEPTS[x[0]][1]) for x in subscribed_cursos] result = "<b>Avisos activados:</b> <i>{}</i>\n\n" \ .format("Sí \U00002714 (Detener: /stop)" if context.chat_data.get("enable", False) else "No \U0000274C (Activar: /start)") if sub_deptos_list or sub_cursos_list: result += "Actualmente doy los siguientes avisos para este chat:\n\n" else: result += "Actualmente no tienes suscripciones a ningún departamento o curso.\n" \ "Suscribe avisos con /suscribir_depto o /suscribir_curso." if sub_deptos_list: result += "<b>Avisos por departamento:</b>\n" result += "\n".join(sub_deptos_list) result += "\n\n" if sub_cursos_list: result += "<b>Avisos por curso:</b>\n" result += "\n".join(sub_cursos_list) result += "\n\n" if sub_deptos_list or sub_cursos_list: result += "<i>Puedes desuscribirte con /desuscribir_depto y /desuscribir_curso.</i>" try_msg(context.bot, chat_id=update.message.chat_id, parse_mode="HTML", text=result)
def crack(self, *args, **kwargs): ip = args[1]['ip'] port = args[1]['port'] service = args[1]['service'] if not ip or not port or not service: return None userpath = '%s/%s' % (self.__get_crack_dic_path(), self.user_dict) passpath = '%s/%s' % (self.__get_crack_dic_path(), self.pass_dict) self.command = "%s -h %s -n %s -U %s -P %s -e ns -M %s -f -v 4 -t 16 -R 0" % (self.medusa_script, ip, str(port), userpath, passpath, service) print self.command start = datetime.datetime.now() process = subprocess.Popen(self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while process.poll() is None: time.sleep(0.5) now = datetime.datetime.now() if (now - start).seconds > self.timeout: try: print process.pid self.normal_exit = False #process.terminate() #这里就不能在return 了 logger.info("medusa will be stopped because of crack [%s:%s] time out." % (ip, str(port))) except Exception,e: logger.error('Exception:%s' % str(e)) process.terminate() #这里就不能在return 了 process.kill() time.sleep(2)
async def produce(queue, client, user): logger.info('Starting producer...') @client.on(NewMessage(from_users=CHAT_NAME, incoming=True)) async def incoming_message_handler(event): message_obj = event.message message_text = message_obj.message if any(re_search(pattern, message_text) for pattern in SKIP_MESSAGES) or \ message_obj.photo is not None: return else: await queue.put(message_obj) logger.info(f'Incoming message. {message_text[:150]}...') @client.on(NewMessage(chats=CHAT_NAME, outgoing=True)) async def outgoing_message_handler(event): message_obj = event.message if SECRET_STOP_WORD in message_obj.message.lower(): await client.disconnect() checker.cancel() checker = ensure_future(check_last_message(user)) await client.run_until_disconnected()
def new_user_query(user_id, message): try: entry = db_query.find_one({'user_id': user_id}) except Exception as e: status = {"error in querying database": str(e)} raise Exception(status) try: # creating class for json file here uquery = UserQuery(user_id) if entry: uquery.message = entry['message'] + [message] else: uquery.message = [message] # inserting json into database uqueryjson = jsonpickle.encode(uquery) except Exception as e: status = {"error in creating json for database": str(e)} raise Exception(status) try: db_query.replace_one({'user_id': user_id}, json.loads(uqueryjson), upsert=True) logger.info("New query added to the userQuery collection") except Exception as e: status = {"error while inserting/updating database": str(e)} raise Exception(status)
def Work(): redis = RedisWork() webplugin_manager = WebPathPlugin() webplugin_manager.loadPlugins() webplugins = webplugin_manager.getPlugins() taskid = 0 while True: web_json = redis.getvulInfo(WEBSCAN_KEY) if web_json is not None: try: webinfo = eval(web_json) print webinfo taskid = webinfo['taskid'] for webplugin in webplugins: webplugin.execute_run(ip=webinfo['ip'], port=webinfo['port'], bdomain=webinfo['type'], taskid=taskid) except Exception, e: print str(e) continue else: logger.info('[webscan empty]webpath redis empty!') time.sleep(300) for webplugin in webplugins: webplugin.wait_for_complete(0, taskid) break time.sleep(0.2)
def removeItem(self, item): try: self.itemList.remove(item) logger.info("[成功] 删除 {} 中的 {}".format(self.fileName, item)) return True except ValueError: logger.info("[失败] 删除 {} 中的 {}".format(self.fileName, item)) return False
def stop(update, context): logger.info("[Command /stop]") context.chat_data["enable"] = False try_msg(context.bot, chat_id=update.message.chat_id, text="Ok, dejaré de avisar cambios en el catálogo por este chat. " "Puedes volver a activar los avisos enviándome /start nuevamente." )
def login(driver): while True: driver.get('https://passport.jd.com/new/login.aspx') input('请手动登录,然后在这里输入回车') time.sleep(2) if check_login(driver): logger.info('登录成功,将开始检测') return True print('未检测到登录状态,请重试')
def set_manifest(filename: str): with db_session_handler() as db_session: file_date = datetime.strptime(filename.split('.')[1], '%Y%m%d') logger.info(f'appending to manifest') new_record = { 'TABLE': 'member_eligibility', 'LOAD_DATE': file_date, } db_session.add(Manifest(**new_record))
def unsubscribe_curso(update, context): logger.info("[Command /desuscribir_curso]") if context.args: deleted = [] notsub = [] failed = [] failed_depto = [] for arg in context.args: try: (d_arg, c_arg) = arg.split("-") except ValueError: failed.append(arg) continue if d_arg in DEPTS: if "subscribed_cursos" not in context.chat_data: context.chat_data["subscribed_cursos"] = [] if (d_arg, c_arg) in context.chat_data["subscribed_cursos"]: context.chat_data["subscribed_cursos"].remove((d_arg, c_arg)) data.persistence.flush() deleted.append((d_arg, c_arg)) else: notsub.append((d_arg, c_arg)) else: failed_depto.append((d_arg, c_arg)) response = "" if deleted: response += "\U0001F6D1 Dejaré de avisarte sobre cambios en:\n<i>{}</i>\n\n" \ .format("\n".join([("- " + x[1] + " de " + DEPTS[x[0]][1] + " ({})".format(x[0])) for x in deleted])) if notsub: response += "\U0001F44D No estás suscrito a\n<i>{}</i>\n\n" \ .format("\n".join([("- " + x[1] + " de " + DEPTS[x[0]][1] + " ({})".format(x[0])) for x in notsub])) if failed_depto: response += "\U0001F914 No pude identificar ningún departamento asociado a:\n<i>{}</i>\n\n" \ .format("\n".join(["- " + x[0] for x in failed_depto])) response += "Puedo recordarte la lista de /deptos que reconozco.\n" if failed: response += "\U0001F914 No pude identificar el par <i>'depto-curso'</i> en:\n<i>{}</i>\n\n"\ .format("\n".join(["- " + str(x) for x in failed])) response += "Guíate por el formato del ejemplo:\n" \ "<i>Ej. /desuscribir_curso 5-CC3001 21-MA1002</i>\n" response += "\nRecuerda que puedes apagar temporalmente todos los avisos usando /stop, " \ "sin perder tus suscripciones" try_msg(context.bot, chat_id=update.message.chat_id, parse_mode="HTML", text=response) else: try_msg(context.bot, chat_id=update.message.chat_id, parse_mode="HTML", text="Indícame qué cursos quieres dejar de monitorear.\n" "<i>Ej. /desuscribir_curso 5-CC3001 21-MA1002</i>\n\n" "Para ver las suscripciones de este chat envía /suscripciones\n" "Para ver la lista de códigos de deptos que reconozco envía /deptos\n")
def log_and_email(all_users: Set[str], successful_users: Set[str]) -> None: if all_users == successful_users: mail_text = f"REPORT STATUS WAS SENT FOR {len(successful_users)}, ALL OF WHICH WERE SUCCESSFUL!" logger.info(f"{'*'*20}{mail_text}{'*'*20}") send_status_smtp.send(subject="preform_user_requests success!", mail_text=mail_text) else: mail_text = f"REPORT STATUS WAS ATTEMPTED TO BE SENT FOR {len(all_users)} USERS\nBUT ONLY {len(successful_users)} WERE SUCCESSFUL ({round(100*len(successful_users)/len(all_users), 2)}%)" logger.warning(f"{'*'*20}{mail_text}{'*'*20}") send_status_smtp.send(subject="preform_user_requests failure!", mail_text=mail_text)
def get_record_trading_future_contract_name_file_path(future_exchange_name): if future_exchange_name not in FuturesExchange.ALL: logger.info('get_record_trading_future_contract_name_file_path future_exchange value error!') exit_now(ignore_debug=True) future_data_path = os.path.join(tmp_work_path, 'future') if not os.path.exists(future_data_path): os.makedirs(future_data_path) record_trading_future_contract_name_file_path = \ os.path.join(future_data_path, 'record_trading_future_contract_name_%s.txt' % future_exchange_name) return record_trading_future_contract_name_file_path
def deptos(update, context): logger.info("[Command /deptos]") deptos_list = ["<b>{}</b> - <i>{} {}</i>".format(x, DEPTS[x][0], DEPTS[x][1]) for x in DEPTS] try_msg(context.bot, chat_id=update.message.chat_id, parse_mode="HTML", text="Estos son los códigos que representan a cada departamento o área. " "Utilizaré los mismos códigos que usa U-Campus para facilitar la consistencia\n" "\n{}".format("\n".join(deptos_list)))
def __init__(self, phone): logger.info("初始化Downloader对象,phone = {}".format(phone)) self.finishedDb = Db(phone + "_finished") self.errorDb = Db(phone + "_error") self.s = requests.session() self.phone = phone self.s.headers.update(self.__headers) self.isLogin = False self.login() logger.info("初始化Downloader对象结束")
def main(): schedule.every().hour.at(":00").do(job) schedule.every().hour.at(":30").do(job) schedule.every().day.at("02:00").do(notifications_db.delete_all) logger.info("Service started") while True: schedule.run_pending() time.sleep(1)
def addItem(self, item): try: self.itemList.index(item) logger.info("[失败] 添加 {} 中的 {}".format(self.fileName, item)) return False except ValueError: # 插入到最后面 self.itemList.append(item) # self.itemList.insert(0,item) logger.info("[成功] 添加 {} 中的 {}".format(self.fileName, item)) return True
async def incoming_message_handler(event): message_obj = event.message message_text = message_obj.message if any(re_search(pattern, message_text) for pattern in SKIP_MESSAGES) or \ message_obj.photo is not None: return else: await queue.put(message_obj) logger.info(f'Incoming message. {message_text[:150]}...')
async def update(user, message): if '👥' in message: await user.update_info(message) elif '🍗' in message: await user.update_stats(message) elif 'ПРИПАСЫ В РЮКЗАКЕ' in message: await user.update_food(message) elif 'Уровень голода:' in message: await user.update_hungry_level(message) logger.info(repr(user))
def gen_new_cookie(self, reason): '''Starts proxy to get new cookie from a user''' logger.warn( "Cookie invalid - reason: {} - loading proxy to regenerate".format( reason)) logger.info( "In order to get a new token, we need to intercept it from the real NSO app. Please make sure you have a smartphone or Android emulator to continue." ) logger.info( "If your smartphone runs Android 7.0 or higher, you will need to use an Android emulator or an iOS device to continue." ) start_credential_proxy()
def check_vid(): with open(andy_lee_all_video_ids_file_path, 'r', encoding='utf-8') as f: video_ids = f.read().strip().split('\n') downloaded_video_ids = list() for file_name in os.listdir(andy_lee_video_path): video_id = file_name[9:20] if video_id not in downloaded_video_ids: downloaded_video_ids.append(video_id) for v in video_ids: if v not in downloaded_video_ids: logger.info(v) logger.info(len(downloaded_video_ids))
def set_member_eligibility(filename: str): with open(f'data/{filename}') as file: psv_reader = DictReader(file, delimiter='|') new_records = [] logger.info(f'loading file: {filename}') with db_session_handler() as db_session: for row in psv_reader: row['ELIGIBILITY_START'] = datetime.strptime( row['ELIGIBILITY_START'], '%Y-%m-%d') row['ELIGIBILITY_END'] = datetime.strptime( row['ELIGIBILITY_END'], '%Y-%m-%d') if row['ELIGIBILITY_END'] else None new_records.append(MemberEligibility(**row)) db_session.bulk_save_objects(new_records)
def enable_check_changes(update, context): if int(update.message.from_user.id) in admin_ids: logger.info("[Command /enable_check_changes from admin %s]", update.message.from_user.id) current = data.job_check_changes.enabled data.job_check_changes.enabled = not current data.config["is_checking_changes"] = not current save_config() notif = "Check changes: {}".format(str(data.config["is_checking_changes"])) try_msg(context.bot, chat_id=admin_ids[0], text=notif ) logger.info(notif)
def run(cls, X, y): n_epochs = 100 learning_rate = 0.01 batch_size = 1 n_classes = y.shape[1] n_input = X.shape[1] n_hidden_1 = 10 n_hidden_2 = 20 n_output = n_classes size_X = X.shape[0] model = AlaResNet(n_input, n_hidden_1, n_hidden_2, n_output, learning_rate) for epoch in range(n_epochs): predicted = 0 for index in range(0, size_X, batch_size): X_batch = X[index:min(index + batch_size, size_X), :] y_batch = y[index:min(index + batch_size, size_X), :] out1, out2, y_pred = model.forward(X_batch) loss = cross_entropy(y_batch, y_pred) logger.info('Loss on iter {}: {}'.format(index, loss)) # update weights using vanilla gradient descent d_out = (y_batch.flatten() - y_pred).reshape(n_output, 1) d_l2 = np.dot(np.diag(relu_deriv(model.a_2)), np.dot(model.W_3.T, d_out)) d_l1 = np.dot(np.diag(tanh_deriv(model.a_1)), np.dot(model.W_2.T, d_l2)) delta_W_out = np.dot(d_out, model.a_2.reshape((n_hidden_2, 1)).T) delta_W_two = np.dot(d_l2, model.a_1.reshape((n_hidden_1, 1)).T) delta_W_one = np.dot(d_l1, X_batch) # Update weights model.W_3 = model.W_3 - learning_rate * delta_W_out model.W_2 = model.W_2 - learning_rate * delta_W_two model.W_1 = model.W_1 - learning_rate * delta_W_one if y_batch.flatten().argmax(axis=0) == y_pred.argmax(axis=0): predicted += 1 accuracy = predicted / (size_X / batch_size) logger.info('Accuracy on epoch {}: {}'.format(epoch, accuracy))
def notification(update, context): if int(update.message.from_user.id) in admin_ids: logger.info("[Command /notification from admin %s]", update.message.from_user.id) chats_data = dp.chat_data if context.args: message = update.message.text message = message[message.index(" ")+1:].replace("\\", "") for chat_id in chats_data: if chats_data[chat_id].get("enable", False): try_msg(context.bot, chat_id=chat_id, text=message, parse_mode="Markdown", )
def ontimeout(self, host): if self.proc is not None: self.timer.cancel() self.lock.acquire() logger.info("medusa will be stopped because of crack [%s] time out." % host) self.lock.release() if self.proc.poll() != None: try: self.proc.terminate() self.proc.kill() self.proc.wait() except Exception,e: os.kill(self.pid, signal.SIGKILL) print str(e)
def domain_analyse_start(target): ''' C段查询与子域名收集 :param target: :return: ''' domain = target domain_type = get_domain_type(domain) domain_plugin_ctrl = DomainPluginController(domain,domain_type) domain_plugin_ctrl.plugin_init() #发送到各个扫描节点中去并存入数据库 db_core = DomainDB().getConn() producer = ActiveMQProducer() domainscan_dic = {} domainscan_dic['domain'] = domain domainscan_dic['scan_type'] = 'subdomain' domainscan_dic['first_time'] = getCurTime() taskid = db_core.insert_by_dict(DOMAIN_SCAN_TABLE, domainscan_dic) #获取当前的任务ID for domain_item in domain_plugin_ctrl.domain_url: domainUrlDic = {} domainUrlDic['sid'] = taskid domainUrlDic['subdomain'] = domain_item domainUrlDic['active'] = 1 domainUrlDic['first_time'] = getCurTime() db_core.insert_by_dict(DOMAIN_URL_TABLE, domainUrlDic) #send activemq message domain_json = json.dumps({'type':DOMAIN_TYPE[0], 'url':domain_item, 'taskid':taskid}) producer.send2msg(domain_json, 'scan_info') for ip_item in domain_plugin_ctrl.domain_ip: ipUrlDic = {} ipUrlDic['sid'] = taskid ipUrlDic['ips'] = ip_item ipUrlDic['first_time'] = getCurTime() db_core.insert_by_dict(DOMAIN_IP_TABLE, ipUrlDic) cip_json = json.dumps({'type': DOMAIN_TYPE[1], 'ip':ip_item, 'taskid':taskid}) producer.send2msg(cip_json, 'scan_info') logger.info('%s domain analyse done..' % domain) #关闭activemq producer producer.close() db_core.close() #发送邮件 pass
def start(self, domain): super(SubDomainFindByDit, self).start(domain) dnsBrute = DNSBrute(domain, names_file = DOMAIN_DIC_PATH + '/domain_dic_large.txt', next_sub_file = DOMAIN_DIC_PATH + '/next_sub.txt') dnsBrute.run() ip_list = [] url_list = [] for (url, ips) in dnsBrute.getAvailDomain().items(): url_list.append(url) for ip in ips: ip_list.append(getCrangeIP(ip)) self.result = {'ip':list(set(ip_list)), 'domain': list(set(url_list))} logger.info('subdomain by dic found domain count:%d' % len(url_list)) super(SubDomainFindByDit, self).complete() return self.result
def __init_plugin(self): ''' 初始化组件 :return: ''' if self.__burlaccess(): self.__load_scan_dic() #加载扫描字典 if self.proxy: self.load_proxylist() #加载代理字典 self.__crawler_status() #判断扫描状态 (可以继续完善) if not self.crawler_status.info: logger.info("[done]domain url %s scanner stop" % self.url) return None return True else: logger.info("[done]domain url %s can't access" % self.url) return None
def start(self, domain): super(DnsTransWork, self).start(domain) dnscheck = DnsEnum(domain) dnscheck.getEachDnsInfo() ip_list = [] url_list = [] if dnscheck.getDnsEnumRet().retcode: for item in dnscheck.getDnsEnumRet().retlist: url_list.append(item['domain']) if item['ip'] is not None: ip_list.append(item['ip']) logger.info('domain %s exists DNS domain transfer vul!' % domain) else: logger.info('domain %s Not exists DNS domain transfer vul!' % domain) self.result = {'ip':list(set(ip_list)), 'domain': list(set(url_list))} super(DnsTransWork, self).complete() return self.result
def run(self, *args, **kwargs): for ip in self.ip: self.ip_que.put_nowait(ip) threads = [] for num in range(self.threads_num): thread = threading.Thread(target=self.__scan_thread) threads.append(thread) for thread in threads: thread.start() for thread in threads: thread.join() logger.info('%s crange ip scan over!' % ip) return self.result
def start(self, domain): super(SubDomainFindByInterface, self).start(domain) #调用两个域名查询接口 ip_list = [] url_list = [] #------- fofa 接口已死 ---------- #fofa_result = FofaDomain(domain).analyse() #if fofa_result is not None and type(fofa_result) == dict: # ip_list.extend(fofa_result['ip']) # url_list.extend(fofa_result['domain']) ilink_result = LinksDomain(domain).analyse() ip_list.extend(ilink_result['ip']) url_list.extend(ilink_result['domain']) self.result = {'ip':list(set(ip_list)), 'domain':list(set(url_list))} logger.info('subdomain by interface found domain count:%d' % len(url_list)) super(SubDomainFindByInterface,self).complete() return self.result
def run(self): while True: try: if self.daemon: callbackfunc, args, kwargs = self.workQueue.get(block = True) #如果主线程退出,子线程也退出。这里设置阻塞 else: callbackfunc, args, kwargs = self.workQueue.get(block = True, timeout = self.timeout) res = callbackfunc(args, kwargs) #执行结果加入到结果队列中 if res is not None: self.resultQueue.put_nowait(res) except Queue.Empty: logger.info('work queue empty!') break except: logger.error(sys.exc_info()) raise time.sleep(0.3)
def run(self, *args, **kwargs): if self.__init_plugin() is not None: gevent.joinall([ gevent.spawn(self.webdir_crawler_schedu), gevent.spawn(self.webfile_crawler_schedu) ]) result_list = [] if self.exist_result_que.qsize() > 25: result_list.append({'http_code':200, 'url':self.url, 'title':''}) logger.info('[done]domain url %s webscan to much files!' % self.url) else: while not self.exist_result_que.empty(): result = self.exist_result_que.get_nowait() #print 'code:%s,url:%s,title:%s' % (result['respinfo']['http_code'], result['url'], result['respinfo']['title']) result_list.append({'http_code':result['respinfo']['http_code'], 'url':result['url'], 'title':result['respinfo']['title']}) logger.info('[done]domain url %s webscan over!' % self.url) return {'url':self.url, 'figerinfo':self.figerinfo, 'result_list':result_list} else: logger.info("domain url %s webscanner init failed!" % self.url) return None
def run(self): self.threadpool = ThreadPool(num_of_threads= self.threadpool_count , num_of_work= 0 , daemon = False, timeout = 30) while self.cip_que.qsize() > 0: cip_dic = self.cip_que.get_nowait() self.taskid = cip_dic['taskid'] #判断是否是内网IP if not test_private_ip(cip_dic['ip']): logger.info('current scan ip %s' % cip_dic['ip']) nmap = NmapScan(ip = cip_dic['ip'], threads_num = 30) self.threadpool.add_job(nmap.run) else: logger.info('skip private ip:%s!' % cip_dic['ip']) time.sleep(0.5) if self.threadpool is not None: self.threadpool.wait_for_complete() #等待线程结束 self.insert_db() self.threadpool = None logger.info('port scan over!') self.vulplugin_dispatch() #开启扫描插件 logger.info('plugin start running......')
def execute_run(self, ip, port, taskid): if str(port) in self.port_list: logger.info('[Openssl] ip:%s, port:%s' % (str(ip), str(port))) self.threadpool.add_job(self.__test_heartbleed, ip, port) self.async_deal_into_db(taskid)
def execute_run(self, ip, port, taskid): if str(port) in self.port_list: logger.info('[redis] ip:%s, port:%s' % (str(ip), str(port))) self.threadpool.add_job(self.__test_redisunauth, ip, port) self.async_deal_into_db(taskid)
portscan_plugin = PortScanPluginController(rediswork) #端口扫描 while True: try: message = consumer.listener.msg_que.get(block = False) scan_json = json.loads(message) if scan_json['type'] == DOMAIN_TYPE[0]: print scan_json['url'] rediswork.pushvulInfo(keyname = WEBSCAN_KEY, ip = scan_json['url'], port = 80, taskid = scan_json['taskid'], type = DOMAIN_TYPE[0]) elif scan_json['type'] == DOMAIN_TYPE[1]: print scan_json['ip'] portscan_plugin.push_ip(ip = scan_json['ip'], taskid = scan_json['taskid']) except Exception,e: if portscan_plugin.get_ip_cnt() > 0: break #logger.error(str(e)) time.sleep(0.1) logger.info('port scan plugin start working!') portscan_plugin.run() #开启端口扫描线程 if __name__ == '__main__': url = sys.argv[1] type = sys.argv[2] if type == '1': domain_analyse_start(url) else: run_portscan_plugin()
def Work(): redis = RedisWork() portcrackplugin_manager = PortCrackPlugin() portcrackplugin_manager.loadPlugins() portcrackplugins = portcrackplugin_manager.getPlugins() taskid = 0 while True: print '------ port crack -------' port_json = redis.getvulInfo(PORTCRACK_KEY) if port_json is not None: try: portinfo = eval(port_json) print portinfo taskid = portinfo['taskid'] for portcrackplugin in portcrackplugins: portcrackplugin.execute_run(portinfo['ip'], portinfo['port'], taskid) time.sleep(0.5) except Exception,e: continue else: time.sleep(300) #等待5分钟后结束所有线程 for portcrackplugin in portcrackplugins: portcrackplugin.async_deal_into_db(taskid) break logger.info('[PortCrack exit] portcrack scan over, exit!') sys.exit() if __name__ == '__main__': Work()
if web_json is not None: try: webinfo = eval(web_json) print webinfo taskid = webinfo["taskid"] for webplugin in webplugins: webplugin.execute_run( ip=webinfo["ip"], port=webinfo["port"], bdomain=webinfo["type"], taskid=taskid ) except Exception, e: print str(e) continue else: logger.info("[webscan empty]webpath redis empty!") time.sleep(300) for webplugin in webplugins: webplugin.wait_for_complete(0, taskid) break time.sleep(0.2) logger.info("[webscan exit]webscan scan over, exit!") # send email webpath scan over! sys.exit() if __name__ == "__main__": # if len(sys.argv) == 2: # taskid = str(sys.argv[1]) # Work(taskid) Work()