Esempio n. 1
0
    def hand_ip(self, serviceTypes, option='masscan'):
        ip_list = []

        for item in serviceTypes:
            ip_list.append(item["target"])
        ports = MASSCAN_DEFAULT_PORT
        result2 = {}
        if option == 'masscan':
            if MASSCAN_FULL_SCAN:
                ports = "1-65535"
            target = os.path.join(PATHS.OUTPUT_PATH,
                                  "target_{0}.log".format(time.time()))
            with open(target, "w+") as fp:
                fp.write('\n'.join(ip_list))
            logger.debug("ip:" + repr(ip_list))
            try:
                result = masscan(target, ports)
            except Exception as e:
                logger.error("masscan error msg:{}".format(repr(e)))
                result = None
            if result is None:
                return None
            # format:{'115.159.39.75': ['80'], '115.159.39.215': ['80', '3306'],}
            for host, ports in result.items():
                ports = list(ports)
                if host not in result2:
                    result2[host] = []
                task_update("running", 1)
                try:
                    result_nmap = nmapscan(host, ports)
                except:
                    result_nmap = None
                task_update("running", -1)
                if result_nmap is None:
                    for tmp_port in ports:
                        result2[host].append({"port": tmp_port})
                    continue
                tmp_r = self.nmap_result_handle(result_nmap, host=host)
                result2.update(tmp_r)
        elif option == "nmap":
            logger.debug("ip:" + repr(ip_list))
            for host in ip_list:
                result_nmap = nmapscan(host, ports.split(","))
                tmp_r = self.nmap_result_handle(result_nmap, host=host)
                if tmp_r:
                    result2.update(tmp_r)

        data = {}
        for ip in result2.keys():
            # result2[ip]
            if ip not in data:
                data[ip] = {}
            d = ip_location.poc(ip)
            if d:
                data[ip]["location"] = d
            data[ip]["infos"] = result2[ip]

        collector.add_ips(data)
        for ip in result2.keys():
            collector.send_ok_ip(ip)
Esempio n. 2
0
def run(target):
    domains_dic = {}
    if os.path.isdir(target):
        domain_file_list = glob.glob(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), target, '*.*'))
        for domain_file in domain_file_list:
            domains_dic[os.path.basename(domain_file)] = []
            logger.sysinfo("Loading and checking domains of file %s." % domain_file)
            with open(domain_file, 'r') as f:
                for domain in f.readlines():
                    domain = check_domain(domain)
                    if not domain:
                        logger.error("Error domain: %s" % domain)
                        continue
                    domains_dic[os.path.basename(domain_file)].append(domain)
    elif os.path.isfile(target):
        domains_dic[os.path.basename(target)] = []
        logger.sysinfo("Loading and checking domains of file %s." % target)
        with open(target, 'r') as f:
            for domain in f.readlines():
                domain = check_domain(domain)
                if not domain:
                    logger.error("Error domain: %s" % domain)
                    continue
                domains_dic[os.path.basename(target)].append(domain)
    elif check_domain(target):
        logger.sysinfo("Loading and checking domain %s." % target)
        domains_dic[target] = [target]
    else:
        sys.exit(logger.error("Error domain: %s" % target))
    _run(domains_dic)
Esempio n. 3
0
 def receive_ip(self):
     while 1:
         struct = self.ip_queue.get()
         serviceType = struct.get("serviceType", 'other')
         task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
         if serviceType == "ip":
             flag = False
             self.lock.acquire()
             self.cache_ips.append(struct)
             num = len(self.cache_ips)
             if num >= NUM_CACHE_IP:
                 flag = True
                 serviceTypes = self.cache_ips
                 self.cache_ips = []
             self.lock.release()
             if not flag:
                 self.ip_queue.task_done()
                 continue
             task_update("running", 1)
             try:
                 self.hand_ip(serviceTypes)
             except Exception as e:
                 logger.error("hand ip error:{}".format(repr(e)))
                 logger.error(repr(sys.exc_info()))
             task_update("running", -1)
         self.ip_queue.task_done()
         task_update("tasks", self.queue.qsize() + self.ip_queue.qsize())
Esempio n. 4
0
def _run(domains_dic, vul_scan_flag):
    now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    filename = 'srcscan_subdomain_check_' + time.strftime(
        "%Y%m%d_%H%M%S", time.localtime()) + '.xlsx'
    path = os.path.join(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
    if not os.path.exists(path):
        os.makedirs(path)
    for key in domains_dic.keys():
        domains = list(set(domains_dic[key]))
        if len(domains) > 0:
            logger.sysinfo(
                "Scanning %d domains at %s." %
                (len(domains),
                 time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            for domain in domains:
                ret = set()
                ret = subdomain_scan(domain, ret, now_time)
                title_scan(domain, ret, now_time)
                if vul_scan_flag:
                    vul_scan(domain, now_time)

            logger.sysinfo(
                "Fineshed scan %d domains at %s." %
                (len(domains),
                 time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

            save(domains, path, filename, key)

        else:
            logger.error("Loading %d domains." % (len(domains)))
    send_smtp(path, filename)
Esempio n. 5
0
    async def run(self):
        async with aiohttp.ClientSession() as session:

            flag = await self.check_engine_available(session, self.engine)
            if not flag:
                logger.error(
                    "{engine_name} is not available, skipping!".format(
                        engine_name=self.engine_name))
                return
            logger.debug("{engine_name} is available, starting!".format(
                engine_name=self.engine_name))

            data = {'inputurl': self.target}
            async with session.post(self.base_url, proxy=self.proxy,
                                    data=data) as res:
                if res != None:
                    try:
                        content = await res.text()
                    except:
                        content = ""

                    ret = self.check_response_errors(content)
                    if not ret[0]:
                        self.deal_with_errors(ret[1])

                    self.extract(content)

            logger.sysinfo("{engine} Found {num} sites".format(
                engine=self.engine_name, num=len(self.results['subdomain'])))
            logger.debug(self.engine_name + " " +
                         str(len(self.results['subdomain'])))
Esempio n. 6
0
def masscan(target, ports):
    output = os.path.join(PATHS.OUTPUT_PATH,
                          "output_" + str(time.time()) + ".log")
    cmd = "masscan -p {} --rate={} --randomize-hosts -iL \"{}\" -oL \"{}\"".format(
        ports, MASSCAN_RATE, target, output)
    os.system(cmd)
    logger.debug("masscan saved output:" + output)
    open_list = []

    with open(output, "r") as f:
        result_json = f.readlines()
    if result_json:
        try:
            del result_json[0]
            del result_json[-1]
            open_list = {}
            for res in result_json:
                try:
                    p = res.split()
                    ip = p[3]
                    port = p[2]
                    if ip not in open_list:
                        open_list[ip] = set()
                    open_list[ip].add(port)
                except:
                    pass

        except Exception as e:
            logger.error("masscan read faild")
    if open_list:
        return open_list
    return None
Esempio n. 7
0
def _init_plugins():
    # 加载所有插件
    _plugins = []
    for root, dirs, files in os.walk(PATH['plugins']):
        files = filter(lambda x: not x.startswith("__") and x.endswith(".py"),
                       files)
        for _ in files:
            if len(INCLUDE_PLUGINS) == 1 and INCLUDE_PLUGINS[0] == 'all':
                pass
            else:
                if "loader.py" not in INCLUDE_PLUGINS:
                    INCLUDE_PLUGINS.append("loader.py")
                if _ not in INCLUDE_PLUGINS:
                    continue
            if _ in EXCLUDE_PLUGINS:
                continue
            filename = os.path.join(root, _)
            mod = load_file_to_module(filename)
            try:
                mod = mod.W13SCAN()
                getattr(mod, 'name', 'unknown plugin')
                plugin = os.path.splitext(_)[0]
                plugin_type = os.path.split(root)[1]
                if getattr(mod, 'type', None) is None:
                    setattr(mod, 'type', plugin_type)
                KB["registered"][plugin] = mod
            except AttributeError:
                logger.error('Filename:{} not class "{}"'.format(_, 'W13SCAN'))
    logger.info('Load plugin:{}'.format(len(KB["registered"])))
Esempio n. 8
0
def errormanager(func):
    """
    Menangkap spesifikasi pengecualian
    """
    @functools.wraps(func)
    def decorated(*args, **kwargs):
        global RETRY_COUNT

        try:
            return func(*args, **kwargs)

        except ProtocolError, e:
            # XXX: abaikan ?
            pass

        except Exception, e:
            if issubclass(e.__class__, BrutemapException):
                raise e

            time.sleep(SETTING.DELAY)
            logger.error("Error occurred: %s" % str(e))
            if RETRY_COUNT != SETTING.MAX_RETRY:
                RETRY_COUNT += 1
                return decorated(*args, **kwargs)

            raise e
Esempio n. 9
0
 def __init__(self, target, engine_name=None, timeout=5, proxy=None, random_ua=True,random_ip=False):
     self.headers = {
         'Connection': 'keep-alive',
         'Pragma': 'no-cache',
         'Cache-Control': 'no-cache',
         'Upgrade-Insecure-Requests': '1',
         'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
         'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
         'DNT': '1',
         'Accept-Encoding': 'gzip, deflate',
         'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
     }
     self.target = target
     self.logger = logger
     self.engine_name = engine_name
     self.random_ua = random_ua
     self.random_ip = random_ip
     self.results = {'subdomain':[], 'dns_domain': [], 'cdn_ip': []}
     self.queries = deque()
     self.timeout = timeout
     if conf['config']['domain']['proxy'].lower() == 'true':
         try:
             proxy = conf['config']['domain']['http_proxy']
         except:
             logger.error("Error http(s) proxy: %s or %s." % (
                 conf['config']['domain']['http_proxy'], conf['config']['domain']['https_proxy']))
     self.proxy = proxy
     self.pre_pageno = 0
     self.pre_query = ""
Esempio n. 10
0
    async def run(self):
        async with aiohttp.ClientSession() as session:

            flag = await self.check_engine_available(session, self.engine)
            if not flag:
                logger.error(
                    "{engine_name} is not available, skipping!".format(
                        engine_name=self.engine_name))
                return
            logger.debug("{engine_name} is available, starting!".format(
                engine_name=self.engine_name))

            data = {'inputurl': self.target}
            content = await self.get(session,
                                     self.base_url,
                                     method="POST",
                                     data=data,
                                     headers=self.headers,
                                     timeout=self.timeout,
                                     proxy=self.proxy)

            ret = self.check_response_errors(content)
            if not ret[0]:
                self.deal_with_errors(ret[1])

            self.extract(content)
            logger.sysinfo("{engine} Found {num} sites".format(
                engine=self.engine_name, num=len(self.results['subdomain'])))
            logger.debug(self.engine_name + " " +
                         str(len(self.results['subdomain'])))
Esempio n. 11
0
def init():
    logger.info("Initialize Git")
    process = subprocess.Popen(
        "git init %s" % (paths.GITHACK_DIST_TARGET_PATH),
        shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout, stderr = process.communicate()
    if stderr:
        logger.error("Initialize Git Error: %s" % (stderr))
Esempio n. 12
0
def run_threads(num_threads, thread_function, args: tuple = ()):
    threads = []
    KB["continue"] = True
    KB["console_width"] = getTerminalSize()
    KB['start_time'] = time.time()
    KB['finished'] = 0
    KB["lock"] = threading.Lock()
    KB["result"] = 0
    KB["running"] = 0

    try:
        info_msg = "Staring {0} threads".format(num_threads)
        logger.info(info_msg)

        # Start the threads
        for num_threads in range(num_threads):
            thread = threading.Thread(target=exception_handled_function,
                                      name=str(num_threads),
                                      args=(thread_function, args))
            thread.setDaemon(True)
            try:
                thread.start()
            except Exception as ex:
                err_msg = "error occurred while starting new thread ('{0}')".format(
                    str(ex))
                logger.critical(err_msg)
                break

            threads.append(thread)

        # And wait for them to all finish
        alive = True
        while alive:
            alive = False
            for thread in threads:
                if thread.isAlive():
                    alive = True
                    time.sleep(0.1)

    except KeyboardInterrupt as ex:
        KB['continue'] = False
        if num_threads > 1:
            logger.info("waiting for threads to finish{0}".format(
                " (Ctrl+C was pressed)" if isinstance(ex, KeyboardInterrupt
                                                      ) else ""))
        try:
            while threading.activeCount() > 1:
                pass
        except KeyboardInterrupt:
            raise

    except Exception as ex:
        logger.error("thread {0}: {1}".format(
            threading.currentThread().getName(), str(ex)))
        traceback.print_exc()
    finally:
        Share.dataToStdout('\n')
Esempio n. 13
0
def checkdepends():
    logger.info("Check Depends")
    process = subprocess.Popen("git --version",
                               shell=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
    stdout, stderr = process.communicate()
    if stderr:
        logger.error(DEPENDS)
        sys.exit(1)
    logger.success("Check depends end")
Esempio n. 14
0
def read_domain_file(domain_file, domains_dic):
    domains_dic[os.path.basename(domain_file)] = []
    logger.sysinfo("Loading and checking domains of file %s." % domain_file)
    with open(domain_file, 'r') as f:
        for d in f.readlines():
            domain = check_domain(d)
            if not domain and d.strip() != '':
                logger.error("Error domain: %s" % d)
                continue
            domains_dic[os.path.basename(domain_file)].append(domain)
    return domains_dic
Esempio n. 15
0
    def recursion_deep(self):
        '''
        根据深度值进行爬取
        operate['db'].deep 当前深度
        self.deep 需要爬取的深度
        :return:
        '''
        if operate['db'].deep == 0:
            logger.info("spidering deep == 0 page")
            r = self.get_html(self.url)
            try:
                html = r['html']
            except:
                print "url input error!"
                logger.error("url error(%s)" % (self.url))
                return

            operate['db'].insert(html, self.url)
            self.r_group.append(r)
            operate['db'].deep += 1
            self.recursion_deep()
        elif operate['db'].deep > self.deep:
            logger.info('spider deep over!')
            return
        else:
            logger.info("spidering deep = %s" % operate['db'].deep)
            tmp = []
            url_group = []

            # 从上一个deep爬取的页面中提取url
            for x in self.r_group:
                html = x['html']
                url_group.extend(self.find_url(html))
                logger.debug("from %s page find %s url" %
                             (x['url'], len(url_group)))

            # 当页面没匹配出任何url, 则结束退出
            if url_group == []:
                return
            # 把提取出来的url丢入线程池中
            result_list = self._thread.my_map(url_group)
            for y in xrange(len(result_list)):
                if result_list[y]['type'] == 'html':
                    tmp.append(result_list[y])
                else:
                    logger.debug("delete the not html page (%s)" %
                                 url_group[y])

            self.r_group = tmp
            operate['db'].deep += 1
            self.recursion_deep()
Esempio n. 16
0
def handle(parser):
    args = parser.parse_args()
    banner()
    check_update(args)
    config_parser()
    domains_dic = {}
    # asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

    if args.debug:
        debug = True
        logger.set_level(CUSTOM_LOGGING.DEBUG)
    nomal = args.nomal
    if args.help:
        parser.print_help()
    elif args.domain:
        domain = check_domain(args.domain)
        if not domain:
            sys.exit(logger.error("Error domain: %s" % domain))
        logger.sysinfo("Loading and checking domain %s." % args.domain)
        domains_dic[domain]=[domain]
        run(domains_dic,nomal)
    elif args.domain_file:
        if os.path.isdir(args.domain_file):
            domain_file_list = glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)), "domain",'*.*'))
            for domain_file in domain_file_list:
                domains_dic[os.path.basename(domain_file)] = []
                logger.sysinfo("Loading and checking domains of file %s." % args.domain_file)
                with open(domain_file, 'r') as f:
                    for domain in f.readlines():
                        domain = check_domain(domain)
                        if not domain:
                            logger.error("Error domain: %s" % domain)
                            continue
                        domains_dic[os.path.basename(domain_file)].append(domain)
            run(domains_dic,nomal)
        elif os.path.isfile(args.domain_file):
            domains_dic[os.path.basename(args.domain_file)] = []
            logger.sysinfo("Loading and checking domains of file %s." % args.domain_file)
            with open(args.domain_file, 'r') as f:
                for domain in  f.readlines():
                    domain = check_domain(domain)
                    if not domain:
                        logger.error("Error domain: %s" % domain)
                        continue
                    domains_dic[os.path.basename(args.domain_file)].append(domain)
            run(domains_dic,nomal)
        else:
            logger.sysinfo("Error for domain file, please check the file %s." % args.domain_file)
    else:
        parser.print_help()
Esempio n. 17
0
 def find_url(self, html):
     '''
     使用BeautifulSoup找出网页中的url
     :param html: html页面
     :return: 返回一个list, 其值为html中url
     PS: 暂只考虑a标签中的href属性中的url
     '''
     url_group = []
     logger.debug("start find url in a html")
     try:
         bs = BeautifulSoup(html, 'lxml')
     except Exception, e:
         logger.error("bs4(html) fail!\nthe error info is : " + str(e))
         return
Esempio n. 18
0
def load_file_to_module(file_path):
    if '' not in importlib.machinery.SOURCE_SUFFIXES:
        importlib.machinery.SOURCE_SUFFIXES.append('')
    try:
        module_name = 'plugin_{0}'.format(get_filename(file_path, with_ext=False))
        spec = importlib.util.spec_from_file_location(module_name, file_path, loader=PocLoader(module_name, file_path))
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        return mod

    except ImportError:
        error_msg = "load module failed! '{}'".format(file_path)
        logger.error(error_msg)
        raise
Esempio n. 19
0
 def find_url(self, html):
     '''
     使用BeautifulSoup找出网页中的url
     :param html: html页面
     :return: 返回一个list, 其值为html中url
     PS: 暂只考虑a标签中的href属性中的url
     '''
     url_group = []
     logger.debug("start find url in a html")
     try:
         bs = BeautifulSoup(html, 'lxml')
     except Exception, e:
         logger.error("bs4(html) fail!\nthe error info is : " + str(e))
         return
Esempio n. 20
0
    def recursion_deep(self):
        '''
        根据深度值进行爬取
        operate['db'].deep 当前深度
        self.deep 需要爬取的深度
        :return:
        '''
        if operate['db'].deep == 0:
            logger.info("spidering deep == 0 page")
            r = self.get_html(self.url)
            try:
                html = r['html']
            except:
                print "url input error!"
                logger.error("url error(%s)" %(self.url))
                return

            operate['db'].insert(html, self.url)
            self.r_group.append(r)
            operate['db'].deep += 1
            self.recursion_deep()
        elif operate['db'].deep > self.deep:
            logger.info('spider deep over!')
            return
        else:
            logger.info("spidering deep = %s" %operate['db'].deep)
            tmp = []
            url_group = []

            # 从上一个deep爬取的页面中提取url
            for x in self.r_group:
                html = x['html']
                url_group.extend(self.find_url(html))
                logger.debug("from %s page find %s url" %(x['url'], len(url_group)))

            # 当页面没匹配出任何url, 则结束退出
            if url_group == []:
                return
            # 把提取出来的url丢入线程池中
            result_list = self._thread.my_map(url_group)
            for y in xrange(len(result_list)):
                if result_list[y]['type'] == 'html':
                    tmp.append(result_list[y])
                else:
                    logger.debug("delete the not html page (%s)" % url_group[y])

            self.r_group = tmp
            operate['db'].deep += 1
            self.recursion_deep()
Esempio n. 21
0
def init_conf(path):
    logger.debug("Init tentacle config...")
    configs = {
        "basic": {
            "thread_num": "100",
            "looptimer": str(12*60*60),
            "timeout": "5",
            "user_agent": '\n'.join([
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.0.16 (.NET CLR 3.5.30729)',
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
                'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; de-de) AppleWebKit/531.22.7 (KHTML, like Gecko) Version/4.0.5 Safari/531.22.7',
                'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.4 (KHTML, like Gecko) Chrome/6.0.481.0 Safari/534.4',
                'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.1b4) Gecko/20090423 Firefox/3.5b4 (.NET CLR 3.5.30729)',
                'Mozilla/5.0 (Windows; U; Windows NT 6.0; nb-NO) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
                'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.9.0.14) Gecko/2009082505 Red Hat/3.0.14-1.el5_4 Firefox/3.0.14',
                'Mozilla/5.0 (X11; U; Linux i686; tr-TR; rv:1.9.0.10) Gecko/2009042523 Ubuntu/9.04 (jaunty) Firefox/3.0.10',
                'Opera/9.80 (Macintosh; Intel Mac OS X; U; nl) Presto/2.6.30 Version/10.61',
            ])

        },
        "smtp": {
            "mail_host": "smtp.163.com",
            "mail_port": str(465),
            "mail_user": "******",
            "mail_pass": "******",
            "sender": "*****@*****.**",
            "receivers":"[email protected],[email protected]",
        },
        "proxy": {
            "proxy": False,
            "http_proxy": "http://127.0.0.1:1080",
            "https_proxy": "https://127.0.0.1:1080"
        },
        "google_api": {
            "developer_key": "developer_key",
            "search_enging": "search_enging"
        },
        # 下面接口以后再说
        # "zoomeye_api": {
        #     "username": "******",
        #     "password": "******"
        # },
        # "fofa_api": {
        #     "email": "*****@*****.**",
        #     "token": "*****@*****.**"
        # },
        # "shodan_api": {
        #     "token": "token@tentacle"
        # },
        # "github_api": {
        #     "token": "token@tentacle",
        # },
    }
    cf = configparser.ConfigParser()
    for section in configs.keys():
        cf[section] = configs[section]
    with open(path, 'w+') as configfile:
        cf.write(configfile)
    sys.exit(logger.error("Please set the tentacle config in submon.conf..."))
Esempio n. 22
0
def _init_plugins():
    # 加载所有插件
    _plugins = []
    for root, dirs, files in os.walk(PATH['plugins']):
        files = filter(lambda x: not x.startswith("__") and x.endswith(".py"),
                       files)
        for _ in files:
            if _ in EXCLUDE_PLUGINS:
                continue
            filename = os.path.join(PATH['plugins'], _)
            mod = load_file_to_module(filename)
            try:
                mod = mod.W13SCAN()
                KB["registered"][_] = mod
            except AttributeError:
                logger.error('Filename:{} not class "{}"'.format(_, 'W13SCAN'))
    logger.info('Load plugin:{}'.format(len(KB["registered"])))
Esempio n. 23
0
def send_smtp(path, filename):
    try:
        mail_host = conf['config']['smtp']['mail_host'].strip()
        mail_port = int(conf['config']['smtp']['mail_port'])
        mail_user = conf['config']['smtp']['mail_user']
        mail_pass = conf['config']['smtp']['mail_pass']
        timeout = int(conf['config']['basic']['timeout'])
        sender = conf['config']['smtp']['sender']
        receivers = conf['config']['smtp']['receivers'].split(',')
    except:
        logger.error(
            "Load config error: smtp, please check the config in srcscan.conf."
        )
        return

    content = '''
    你好,

        srcscan 子域名检测结果 【%s】,请查收。

                                                                                —— by srcscan
    ''' % (filename)
    message = MIMEMultipart()
    message['From'] = "srcscan<%s>" % sender
    message['To'] = ','.join(receivers)
    message['Subject'] = Header(filename, 'utf-8')
    message.attach(MIMEText(content, 'plain', 'utf-8'))

    with open(os.path.join(path, filename), 'rb') as f:
        att = MIMEText(f.read(), 'base64', 'utf-8')
        att["Content-Type"] = 'application/octet-stream'
        att.add_header("Content-Disposition",
                       "attachment",
                       filename=("utf-8", "", filename))
        message.attach(att)

    n = 3
    while n > 0:
        try:
            socket.setdefaulttimeout(timeout)
            smtpObj = smtplib.SMTP_SSL(host=mail_host)
            smtpObj.connect(mail_host, mail_port)
            smtpObj.login(mail_user, mail_pass)
            smtpObj.sendmail(sender, receivers, message.as_string())
            logger.sysinfo("SMTP send success.")
            break
        except smtplib.SMTPException as e:
            logger.error("Error for SMTP: %s" % (str(e)))
        except socket.timeout as e:
            logger.error("Timeout for SMTP.")
        except Exception as e:
            print(str(e))
            logger.error(
                "Error for SMTP, please check SMTP' config in srcscan.conf.")
        time.sleep(10)
        n -= 1
Esempio n. 24
0
def update_program():
    git_repository = "https://github.com/orleven/srcscan.git"
    success = False
    path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    if not os.path.exists(os.path.join(path, ".git")):
        msg = "Have not a git repository. Please checkout the 'srcscan' repository "
        msg += "from GitHub (e.g. 'git clone --depth 1 https://github.com/orleven/srcscan.git srcscan')"
        logger.error(msg)
    else:
        msg = "Updating srcscan to the latest version from the gitHub repository."
        logger.sysinfo(msg)

        msg = "The srcscan will try to update itself using 'git' command."
        logger.sysinfo(msg)

        logger.sysinfo("Update in progress.")

    try:
        process = subprocess.Popen(
            "git checkout . && git pull %s HEAD" % git_repository,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            cwd=path.encode(locale.getpreferredencoding())
        )  # Reference: http://blog.stastnarodina.com/honza-en/spot/python-unicodeencodeerror/
        poll_process(process, True)
        stdout, stderr = process.communicate()
        success = not process.returncode
    except (IOError, OSError) as ex:
        success = False
        logger.error(type(ex).__name__)

    if success:
        logger.success("The latest revision '%s'" % (get_revision_number()))
    else:
        if "Not a git repository" in stderr:
            msg = "Not a valid git repository. Please checkout the 'orleven/srcscan' repository "
            msg += "from GitHub (e.g. 'git clone --depth 1 https://github.com/orleven/srcscan.git srcscan')"
            logger.error(msg)
        else:
            logger.error("Update could not be completed ('%s')" %
                         re.sub(r"\W+", " ", stderr).strip())

    if not success:
        if sys.platform == 'win32':
            msg = "for Windows platform it's recommended "
            msg += "to use a GitHub for Windows client for updating "
            msg += "purposes (http://windows.github.com/) or just "
            msg += "download the latest snapshot from "
            msg += "https://github.com/orleven/srcscan"
        else:
            msg = "For Linux platform it's required "
            msg += "to install a standard 'git' package (e.g.: 'sudo apt-get install git')"

        logger.sysinfo(msg)
Esempio n. 25
0
def load_string_to_module(code_string, fullname=None):
    try:
        module_name = 'pocs_{0}'.format(
            get_md5(code_string)) if fullname is None else fullname
        file_path = 'w12scan://{0}'.format(module_name)
        poc_loader = PocLoader(module_name, file_path)
        poc_loader.set_data(code_string)
        spec = importlib.util.spec_from_file_location(module_name,
                                                      file_path,
                                                      loader=poc_loader)
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        return mod

    except ImportError:
        error_msg = "load module '{0}' failed!".format(fullname)
        logger.error(error_msg)
        raise
Esempio n. 26
0
async def get_title(req_list):
    ret = []
    async with ClientSession() as session:
        for subdomain in req_list:
            try:
                logger.debug("Curling %s..." % (subdomain))
                flag = False
                for pro in ['http://', "https://"]:
                    url = pro + subdomain + '/'
                    async with session.get(url=url) as response:
                        if response != None:
                            try:
                                res = await response.read()
                            except:
                                res = ""
                            status = response.status
                            try:
                                res = str(res, 'utf-8')
                            except UnicodeDecodeError:
                                res = str(res, 'gbk')
                            except:
                                res = "网页编码错误"

                            m = re.search('<title>(.*)<\/title>', res.lower())
                            if m != None and m.group(1):
                                title = m.group(1)
                            else:
                                title = '网页没有标题'

                            try:
                                length = int(
                                    response.headers['content-length'])
                            except:
                                length = len(str(response.headers)) + len(res)

                            ret.append([subdomain, url, title, status, length])
                            flag = True
                            break
                if not flag:
                    ret.append([subdomain, "", "", 0, 0])
            except Exception as e:
                logger.error(str(e))
    return ret
Esempio n. 27
0
    def execute(self, request: Request, response: Response):
        self.target = ''
        self.requests = request
        self.response = response
        output = None
        try:
            output = self.audit()
        except NotImplementedError:
            logger.error('Plugin: {0} not defined "{1} mode'.format(
                self.name, 'audit'))

        except ConnectTimeout:
            retry = RETRY
            while retry > 0:
                logger.debug('Plugin: {0} timeout, start it over.'.format(
                    self.name))
                try:
                    output = self.audit()
                    break
                except ConnectTimeout:
                    logger.debug('POC: {0} time-out retry failed!'.format(
                        self.name))
                retry -= 1
            else:
                msg = "connect target '{0}' failed!".format(self.target)
                logger.error(msg)

        except HTTPError as e:
            logger.warning(
                'Plugin: {0} HTTPError occurs, start it over.'.format(
                    self.name))

        except ConnectionError as e:
            msg = "connect target '{0}' failed!".format(self.target)
            logger.error(msg)

        except TooManyRedirects as e:
            logger.error(str(e))

        except Exception as e:
            logger.error(str(e))

        return output
Esempio n. 28
0
def poc(target):
    '''
    这个插件的作用是从html或header中分离出有用的数据
    :param target:
    :return:
    '''
    def discern_from_header(name, discern_type, key, reg):
        if "Server" in headers:
            result.add("Server:" + headers["Server"])
        if "X-Powered-By" in headers:
            result.add("X-Powered-By:" + headers["X-Powered-By"])
        if key in headers and (re.search(reg, headers[key], re.I)):
            result.add(name)
        else:
            pass

    def discern_from_index(name, discern_type, key, reg):
        if re.search(reg, html, re.I):
            result.add(name)
        else:
            pass

    html = collector.get_domain_info(target, "body")
    headers = collector.get_domain_info(target, "headers")
    result = set()
    result_dict = {}
    if html and headers:
        mark_list = read_config()
        for mark_info in mark_list:
            name, discern_type, key, reg = mark_info
            if discern_type == 'headers':
                discern_from_header(name, discern_type, key, reg)
            elif discern_type == 'index':
                discern_from_index(name, discern_type, key, reg)
    for i in result:
        try:
            k, *v = i.split(":")
            v = ' '.join(v)
            # 'X-Powered-By:Servlet 2.4; JBoss-4.0.3SP1 (build: CVSTag=JBoss_4_0_3_SP1 date=200510231054)/Tomcat-5.5'"
            result_dict[k] = v
        except:
            logger.error("webeye error split:" + repr(i))
    collector.add_domain_info(target, result_dict)
Esempio n. 29
0
def interruptHandler(signum, frame):
    """
    Fungsi untuk menghandle sinyal interupsi dari tombol
    CTRL-C dan CTRL-Z.
    """

    if SETTING.IGNORE_INTERRUPT:
        return

    print()
    registerInterruptHandler(reset=True)

    try:
        msg = "[?] What do you want? [(C)ontinue (default) / (s)kip target / (q)uit]: "
        jawaban = (raw_input(msg) or "c").lower()
        if jawaban.startswith("c"):
            pass

        elif jawaban.startswith("s"):
            raise BrutemapSkipTargetException

        elif jawaban.startswith("q"):
            errMsg = "User quit"
            logger.error(errMsg)

            raise BrutemapQuitException

    except KeyboardInterrupt:
        print()

        errMsg = "User aborted"
        logger.error(errMsg)

        if SETTING.BRUTE_SESSION:
            SETTING.EXIT_NOW = True
            raise BrutemapStopBruteForceException

        raise BrutemapQuitException

    # XXX: ketika sinyal interupsi didapat, webdriver otomatis ketutup?
    reinitWebDriver()
    registerInterruptHandler()
Esempio n. 30
0
def initialize():
    initDir()
    initWebDriver()

    try:
        for i, url in enumerate(SETTING.TARGETS):
            i += 1
            url = str(autoCompleteUrl(url))
            infoMsg = "Target url '%s' (%d)" % (url, i)
            logger.info(infoMsg)

            try:
                registerInterruptHandler()
                checkTarget(url)

            except BrutemapSkipTargetException:
                infoMsg = "Skipping target %s" % repr(url)
                logger.info(infoMsg)

            except BrutemapNextTargetException:
                pass

            registerInterruptHandler(reset=True)
            clearData()
            reinitWebDriver(reload_url=False)

    except (BrutemapNullValueException, BrutemapQuitException):
        pass

    except KeyboardInterrupt:
        print()
        errMsg = "User aborted"
        logger.error(errMsg)

    except:
        path = saveErrorMessage()
        errMsg = "An error has occurred! look at: %s (for full error messages). " % repr(
            path)
        errMsg += "And report to: %s (thanks!)" % repr(ISSUE_LINK)
        logger.error(errMsg)

    registerInterruptHandler(reset=True)
Esempio n. 31
0
def taobao_api(arg):
    api = "http://ip.taobao.com/service/getIpInfo.php?ip={0}".format(arg)
    try:
        r = requests.get(api)
    except Exception as e:
        logger.error("ip_location request faild:" + str(e))
        return False
    if r.status_code != 200:
        return False
    jsonp = r.text
    data = json.loads(jsonp)
    if data.get("data", None):
        d = {
            "country_id": data["data"]["country_id"],
            "country": data["data"]["country"],
            "region": data["data"]["region"]
        }
        return d
    else:
        return False
Esempio n. 32
0
    def get_html(self, url):
        '''
        :return: {'type': url返回内容类型, 一般js和html页面中会含有url, 暂不处理js}
        '''

        header = {
            "User-Agent":
                "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"
        }
        result = {"type": None}
        logger.info("request a url: %s" %url)
        try:
            req = requests.get(url, headers=header, timeout=4)
        except Exception, e:
            try:
                logger.error("%s @@ requests fail and the info is %s" %(url.encode('utf-8'), e))
            except:
                print url
                print isinstance(url, unicode)
            return result
Esempio n. 33
0
def setTarget():
    ori_str = conf.TARGET = sys.argv[1]
    for each in ori_str:
        if each.isalpha():
            conf.MODE = TARGET_MODE.DOMAIN
            break
    else:
        conf.MODE = TARGET_MODE.IP
        try:
            _list = IPy.IP(ori_str)
        except Exception, e:
            sys.exit(logger.error('Invalid IP, %s' % e))
Esempio n. 34
0
    def get_html(self, url):
        '''
        :return: {'type': url返回内容类型, 一般js和html页面中会含有url, 暂不处理js}
        '''

        header = {
            "User-Agent":
            "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"
        }
        result = {"type": None}
        logger.info("request a url: %s" % url)
        try:
            req = requests.get(url, headers=header, timeout=4)
        except Exception, e:
            try:
                logger.error("%s @@ requests fail and the info is %s" %
                             (url.encode('utf-8'), e))
            except:
                print url
                print isinstance(url, unicode)
            return result