Пример #1
0
    def proxy_iphuan(self):
        url, params = chambering("https://ip.ihuan.me/", strike=False)

        url = requester("https://ip.ihuan.me/", params, GET=True, timeout=None)
        links = [
            link.group()
            for link in self.dic['ip_huan']['link'].finditer(url.text)
        ]
        print(links)

        for i in range(len(links)):
            # print("".join(["https://ip.ihuan.me/",links[i]]))
            link = self.dic['ip_huan']['sub'].sub("", links[i])
            print(link)

            result = requester("".join(["https://ip.ihuan.me/", link]))
            text = regex.Espace_eliminate.sub("", result)
            proxy_ips, proxy_ports = self.dic['ip_huan']['ip'].finditer(text),\
                                     self.dic['ip_huan']['port'].finditer(text)

            for ips, ports in zip(proxy_ips, proxy_ports):
                ip, port, type = ips.group(),\
                                 self.dic['ip_huan']['sub'].sub(" ",ports),\
                                 "http"
                # print(ip)
                self.container.put((ip, port, type))
Пример #2
0
    def execution(self):
        try:
            if not self.file is None:
                self.initislis_file()
            if not self.subdomains_queue is None:
                self.initialis_subdomain()
            if not self.target is None:
                self.initis()

            while not self.target_url.empty():
                target = self.target_url.get()
                strike_pre = assault_pre()
                strike_pre.payload_provide()


                while not target.empty():
                    original = target.get()
                    # print(self.target_url.qsize())
                    # url = regex.URL_PATH.sub("=", original)
                    """and self.filter_(url,self.requests_seen)"""
                    # print("f*****g" + original)

                    if self.domain in original:
                        url, data = chambering(original,strike = False)
                        received_ = requester(url,data,GET = True)
                        print(f"{blue_green}[+][{time}] Vulnerability scanning is being performed on {original}{end}")
                        if not received_ is None:
                            self.url_extrator(received_.text)
                        else:
                            pass



                    if "=" in original:
                        url, data = chambering(original, strike=False)
                        received = requester(url, data, GET=True)

                        for vul_type, category in strike_pre.get_payload_category().items():
                            for count in range(category[1].qsize()):
                                payload = category[0]()
                                url, data = chambering(original,strike = True,payload=payload,type = vul_type)

                                if vul_type in ["SQLi","file_inclusion","command_injection","ssrf"]:
                                    Poisoned = requester(url,data,GET = True)
                                    code = Poisoned.status_code

                                    if not Poisoned is None and code < 500 and code != 404:
                                        if error_check(Poisoned):
                                            if receive_check(received.text,Poisoned.text,vul_type,payload):
                                                message = vul_message(vul_type,original,payload)
                                                self.logger.critical(message)
                                        else:
                                            pass
                                    else:
                                        pass


        except Exception:
            pass
Пример #3
0
def crawl(scheme, host, main_url, form, blindXSS, blindPayload, headers, delay,
          timeout, encoding):
    if form:
        for each in form.values():
            url = each['action']
            if url:
                if url.startswith(
                        main_url
                ):  #startswith用来判断当前字符串是否是以另外一个给定的子字符串“开头”的,根据判断结果返回 true 或 false
                    pass
                elif url.startswith('//') and url[2:].startswith(
                        host):  #[2:]代表url列表中第2+1项到最后一项
                    url = scheme + '://' + url[2:]  #scheme代表默认协议http/https
                elif url.startswith('/'):
                    url = scheme + '://' + host + url
                    #'\w'匹配字母或数字或下划线或汉字0-9、a-z、A-Z、_(下划线)、汉字和其他国家的语言符号
                elif re.match(r'\w', url[0]):
                    url = scheme + '://' + host + '/' + url
                if url not in core.config.globalVariables['checkedForms']:
                    core.config.globalVariables['checkedForms'][url] = []
                method = each['method']
                GET = True if method == 'get' else False
                inputs = each['inputs']
                paramData = {}
                for one in inputs:
                    paramData[one['name']] = one['value']
                    for paramName in paramData.keys():
                        if paramName not in core.config.globalVariables[
                                'checkedForms'][url]:
                            core.config.globalVariables['checkedForms'][
                                url].append(paramName)
                            paramsCopy = copy.deepcopy(paramData)
                            paramsCopy[paramName] = xsschecker
                            response = requester(url, paramsCopy, headers, GET,
                                                 delay, timeout)
                            occurences = htmlParser(response, encoding)
                            positions = occurences.keys()
                            #验证方式是判断generator函数是否生成了payload
                            efficiencies = filterChecker(
                                url, paramsCopy, headers, GET, delay,
                                occurences, timeout, encoding)
                            vectors = generator(occurences, response.text)
                            if vectors:
                                for confidence, vects in vectors.items():
                                    try:
                                        payload = list(vects)[0]
                                        logger.vuln(
                                            'Vulnerable webpage: %s%s%s' %
                                            (green, url, end))
                                        logger.vuln(
                                            'Vector for %s%s%s: %s' %
                                            (green, paramName, end, payload))
                                        break
                                    except IndexError:
                                        pass
                            if blindXSS and blindPayload:
                                paramsCopy[paramName] = blindPayload
                                requester(url, paramsCopy, headers, GET, delay,
                                          timeout)
Пример #4
0
def crawl(scheme, host, main_url, form, domURL, verbose, blindXSS,
          blindPayload, headers, delay, timeout, skipDOM, encoding):
    if domURL and not skipDOM:
        response = requester(domURL, {}, headers, True, delay, timeout).text
        highlighted = dom(response)
        if highlighted:
            print('%s Potentially vulnerable objects found at %s' %
                  (good, domURL))
            print(red + ('-' * 60) + end)
            for line in highlighted:
                print(line)
            print(red + ('-' * 60) + end)
    if form:
        for each in form.values():
            url = each['action']
            if url:
                if url.startswith(main_url):
                    pass
                elif url.startswith('//') and url[2:].startswith(host):
                    url = scheme + '://' + url[2:]
                elif url.startswith('/'):
                    url = scheme + '://' + host + url
                elif re.match(r'\w', url[0]):
                    url = scheme + '://' + host + '/' + url
                method = each['method']
                GET = True if method == 'get' else False
                inputs = each['inputs']
                paramData = {}
                for one in inputs:
                    paramData[one['name']] = one['value']
                    for paramName in paramData.keys():
                        paramsCopy = copy.deepcopy(paramData)
                        paramsCopy[paramName] = xsschecker
                        response = requester(url, paramsCopy, headers, GET,
                                             delay, timeout)
                        parsedResponse = htmlParser(response, encoding)
                        occurences = parsedResponse[0]
                        positions = parsedResponse[1]
                        efficiencies = filterChecker(url, paramsCopy, headers,
                                                     GET, delay, occurences,
                                                     timeout, encoding)
                        vectors = generator(occurences, response.text)
                        if vectors:
                            for confidence, vects in vectors.items():
                                try:
                                    payload = list(vects)[0]
                                    print('%s Vulnerable webpage: %s%s%s' %
                                          (good, green, url, end))
                                    print(
                                        '%s Vector for %s%s%s: %s' %
                                        (good, green, paramName, end, payload))
                                    break
                                except IndexError:
                                    pass
                        if blindXSS and blindPayload:
                            paramsCopy[paramName] = blindPayload
                            requester(url, paramsCopy, headers, GET, delay,
                                      timeout)
Пример #5
0
    def execution(self):        # 执行攻击
        try:
            if not self.file is None:       # 加载目标
                self.initislis_file()
            if not self.subdomains_queue is None:
                self.initialis_subdomain()
            if not self.target is None:
                self.initis()

            while not self.target_url.empty():
                target = self.target_url.get()      #
                # strike_pre = assault_pre()
                # strike_pre.payload_provide()

                while not target.empty():
                    original = target.get()
                    # url = regex.URL_PATH.sub("=", original)
                    """and self.filter_(url,self.requests_seen)"""

                    if self.domain in original:     # 目标属于传入的域名 baidu.com/a/b/text?a=2&b=21 属于 baidu.com
                        url, data = chambering(original,strike = False)
                        received_ = requester(url,data,GET = True,cookie = self.cookie,proxy = self.proxy)
                        if not received_ is None and received_.status_code == 403:  # 如果代理被ban则切换代理
                            if not self.proxy_queue is None and not self.proxy_queue.empty():
                                self.proxy = get_proxy(self.proxy_queue)
                        print(f"{blue_green}[+][{time}] Vulnerability scanning is being performed on {original}{end}")
                        if not received_ is None:
                            self.url_extrator(received_.text)   # 从这个域名中继续提取URL加入到目标中
                        else:
                            pass

                    if "=" in original:     # 如果有参数
                        url, data = chambering(original, strike=False)
                        strike_pre = assault_pre()      # 实例化payload预处理类
                        strike_pre.payload_provide()    # 加载payload
                        received = requester(url, data, GET=True,cookie = self.cookie,proxy = self.proxy)

                        for vul_type, category in strike_pre.get_payload_category().items():
                            for count in range(category[1].qsize()):    # category[1] 为payload队列
                                payload = category[0]()                 # category[0] 为遍历攻击队列的方法
                                url, data = chambering(original,strike = True,payload=payload,type = vul_type)

                                if vul_type in ["SQLi","XSS","file_inclusion","command_injection","ssrf"]:
                                    Poisoned = requester(url,data,GET = True,cookie = self.cookie,proxy = self.proxy)

                                    if not Poisoned is None and Poisoned.status_code < 400: # 如果攻击有响应
                                        if error_check(Poisoned.text):  # 如果页面存在
                                            if attack_check(received.text,Poisoned.text,vul_type,payload): # 如果页面不同
                                                message = vul_message(vul_type,original,payload)    # 输出攻击完成
                                                self.logger.critical(message)
                                        else:
                                            pass
                                    else:
                                        pass

        except Exception:
            pass
Пример #6
0
Файл: arjun.py Проект: w9w/Arjun
def initialize(url, include, headers, GET, delay, paramList, threadCount):
    url = stabilize(url)
    if not url:
        return {}
    else:
        firstResponse = requester(url, include, headers, GET, delay)

        originalFuzz = randomString(6)
        data = {originalFuzz: originalFuzz[::-1]}
        data.update(include)
        response = requester(url, data, headers, GET, delay)
        reflections = response.text.count(originalFuzz[::-1])

        originalResponse = response.text
        originalCode = response.status_code

        newLength = len(response.text)
        plainText = removeTags(originalResponse)
        plainTextLength = len(plainText)

        factors = {'sameHTML': False, 'samePlainText': False}
        if len(firstResponse.text) == len(originalResponse):
            factors['sameHTML'] = True
        elif len(removeTags(firstResponse.text)) == len(plainText):
            factors['samePlainText'] = True

        heuristic(firstResponse.text, paramList)

        fuzz = randomString(8)
        data = {fuzz: fuzz[::-1]}
        data.update(include)

        toBeChecked = slicer(paramList, 50)
        foundParamsTemp = []
        while True:
            toBeChecked = narrower(toBeChecked, url, include, headers, GET,
                                   delay, originalResponse, originalCode,
                                   reflections, factors, threadCount)
            toBeChecked = unityExtracter(toBeChecked, foundParamsTemp)
            if not toBeChecked:
                break

        foundParams = []

        for param in foundParamsTemp:
            exists = quickBruter([param], originalResponse, originalCode,
                                 reflections, factors, include, delay, headers,
                                 url, GET)
            if exists:
                foundParams.append(param)

        for each in foundParams:
            print('%s?%s' % (url, each))
        if not foundParams:
            pass
        return foundParams
Пример #7
0
def crawl(scheme, host, main_url, form, blindXSS, blindPayload, headers, delay,
          timeout, encoding):
    if form:
        for each in form.values():
            url = each['action']
            if url:
                if url.startswith(main_url):
                    pass
                elif url.startswith('//') and url[2:].startswith(host):
                    url = scheme + '://' + url[2:]
                elif url.startswith('/'):
                    url = scheme + '://' + host + url
                elif re.match(r'\w', url[0]):
                    url = scheme + '://' + host + '/' + url
                if url not in core.config.globalVariables['checkedForms']:
                    core.config.globalVariables['checkedForms'][url] = []
                method = each['method']
                GET = True if method == 'get' else False
                inputs = each['inputs']
                paramData = {}
                for one in inputs:
                    paramData[one['name']] = one['value']
                    for paramName in paramData.keys():
                        if paramName not in core.config.globalVariables[
                                'checkedForms'][url]:
                            core.config.globalVariables['checkedForms'][
                                url].append(paramName)
                            paramsCopy = copy.deepcopy(paramData)
                            paramsCopy[paramName] = xsschecker
                            response = requester(url, paramsCopy, headers, GET,
                                                 delay, timeout)
                            parsedResponse = htmlParser(response, encoding)
                            occurences = parsedResponse[0]
                            positions = parsedResponse[1]
                            efficiencies = filterChecker(
                                url, paramsCopy, headers, GET, delay,
                                occurences, timeout, encoding)
                            vectors = generator(occurences, response.text)
                            if vectors:
                                for confidence, vects in vectors.items():
                                    try:
                                        payload = list(vects)[0]
                                        logger.vuln(
                                            'Vulnerable webpage: %s%s%s' %
                                            (green, url, end))
                                        logger.vuln(
                                            'Vector for %s%s%s: %s' %
                                            (green, paramName, end, payload))
                                        break
                                    except IndexError:
                                        pass
                            if blindXSS and blindPayload:
                                paramsCopy[paramName] = blindPayload
                                requester(url, paramsCopy, headers, GET, delay,
                                          timeout)
Пример #8
0
def fuzzer(url, params, headers, GET, delay, timeout, WAF, encoding):
    fuzzer_report = list()
    for fuzz in fuzzes:
        report = dict()
        if delay == 0:
            delay = 0
        t = delay + randint(delay, delay * 2) + counter(fuzz)
        sleep(t)
        try:
            if encoding:
                fuzz = encoding(unquote(fuzz))
                report['encoding'] = str(encoding)
            data = replaceValue(params, xsschecker, fuzz, copy.deepcopy)
            response = requester(url, data, headers, GET, delay / 2, timeout)
        except:
            logger.error('WAF is dropping suspicious requests.')
            if delay == 0:
                logger.info('Delay has been increased to %s6%s seconds.' %
                            (green, end))
                delay += 6
            limit = (delay + 1) * 50
            timer = -1
            while timer < limit:
                logger.info(
                    '\rFuzzing will continue after %s%i%s seconds.\t\t\r' %
                    (green, limit, end))
                limit -= 1
                sleep(1)
            try:
                requester(url, params, headers, GET, 0, 10)
                logger.good(
                    'Pheww! Looks like sleeping for %s%i%s seconds worked!' %
                    (green, ((delay + 1) * 2), end))
            except:
                logger.error(
                    '\nLooks like WAF has blocked our IP Address. Sorry!')
                break
        if encoding:
            fuzz = encoding(fuzz)
        if fuzz.lower() in response.text.lower(
        ):  # if fuzz string is reflected in the response
            result = ('%s[passed]  %s' % (green, end))
            result_report = 'passed'
        # if the server returned an error (Maybe WAF blocked it)
        elif str(response.status_code)[:1] != '2':
            result = ('%s[blocked] %s' % (red, end))
            result_report = 'blocked'
        else:  # if the fuzz string was not reflected in the response completely
            result = ('%s[filtered]%s' % (yellow, end))
            result_report = 'filtered'
        logger.info('%s %s' % (result, fuzz))
        report['fuzz_string'] = fuzz
        report['status'] = result_report
        fuzzer_report.append(report)
    return fuzzer_report
Пример #9
0
def crawl(scheme, host, main_url, form, domURL, verbose, blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding):
    if domURL and not skipDOM:
        response = requester(domURL, {}, headers, True, delay, timeout).text
        highlighted = dom(response)
        if highlighted:
            print('%s Potentially vulnerable objects found at %s' %
                  (good, domURL))
            print(red + ('-' * 60) + end)
            for line in highlighted:
                print(line)
            print(red + ('-' * 60) + end)
    if form:
        for each in form.values():
            url = each['action']
            if url:
                if url.startswith(main_url):
                    pass
                elif url.startswith('//') and url[2:].startswith(host):
                    url = scheme + '://' + url[2:]
                elif url.startswith('/'):
                    url = scheme + '://' + host + url
                elif re.match(r'\w', url[0]):
                    url = scheme + '://' + host + '/' + url
                method = each['method']
                GET = True if method == 'get' else False
                inputs = each['inputs']
                paramData = {}
                for one in inputs:
                    paramData[one['name']] = one['value']
                    for paramName in paramData.keys():
                        paramsCopy = copy.deepcopy(paramData)
                        paramsCopy[paramName] = xsschecker
                        response = requester(
                            url, paramsCopy, headers, GET, delay, timeout)
                        parsedResponse = htmlParser(response, encoding)
                        occurences = parsedResponse[0]
                        positions = parsedResponse[1]
                        efficiencies = filterChecker(
                            url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
                        vectors = generator(occurences, response.text)
                        if vectors:
                            for confidence, vects in vectors.items():
                                try:
                                    payload = list(vects)[0]
                                    print('%s Vulnerable webpage: %s%s%s' %
                                          (good, green, url, end))
                                    print('%s Vector for %s%s%s: %s' %
                                          (good, green, paramName, end, payload))
                                    break
                                except IndexError:
                                    pass
                        if blindXSS and blindPayload:
                            paramsCopy[paramName] = blindPayload
                            requester(url, paramsCopy, headers,
                                      GET, delay, timeout)
Пример #10
0
def bruter(request, factors, params, mode='bruteforce'):
    if mem.var['kill']:
        return []
    response = requester(request, params)
    conclusion = error_handler(response, factors)
    if conclusion == 'retry':
        response = requester(request, params)
    elif conclusion == 'kill':
        return []
    comparison_result = compare(response, factors, params)
    if mode == 'verify':
        return comparison_result[0]
    return comparison_result[1]
Пример #11
0
def estimate_payload_char(payload_chars, chars_type, url, data, test_param,
                          context_info, GET, header):
    '''
    :param payload_chars: 用于尝试的payload字符
    :param chars_type:区分是breakers 或  exploiter
    :param url:
    :param data: 可以替换的参数 对于路径替换的情况为list,对于参数替换的情况为dict
    :param test_param: 当前被测试的参数
    :param context_info: 当前被测试参数在resp中的上下艾文信息
    :param GET: True则为 get请求方法
    :return:
    '''
    estimate_res = {"avail_chars": [], "sore": 0, "output_zone": ""}
    for char in payload_chars:
        scout_str = gen_scout_str()
        check_str = gen_check_str(scout_str, char)
        if isinstance(data, list):  # 路径处理
            repace_url = url  # 防止一个url被替换多次
            repace_url = repace_url.replace(test_param, check_str)
            resp = requester(repace_url,
                             data=None,
                             GET=GET,
                             headers=header,
                             delay=0,
                             timeout=30)
        else:
            check_params = copy.deepcopy(data)
            check_params[test_param] = check_str
            resp = requester(url,
                             data=check_params,
                             GET=GET,
                             headers=header,
                             delay=0,
                             timeout=30)
        text = resp.text
        text_list = text.split('\n')
        start_lineno = context_info['start_position'][0]
        try:
            end_lineno = context_info['end_position'][0]
        except KeyError:
            end_lineno = start_lineno
        output_zone = '\n'.join(text_list[start_lineno - 1:end_lineno])
        res = output_zone.find(check_str)  # 由于字符串变量中的某些字符会转义,故无法用正则
        if res != -1:
            estimate_res["avail_chars"].append(char)
            estimate_res["sore"] += PAYLOAD_CHARS_[
                context_info["context"]][chars_type][char]
            estimate_res["output_zone"] = output_zone  # 保存最后一个可行的回显位置
    return estimate_res
Пример #12
0
def fuzzer(url, params, headers, GET, delay, timeout, WAF, encoding):
    #遍历fuzz,将fuzz赋值给相关参数
    for fuzz in fuzzes:
        if delay == 0:
            delay = 0
        t = delay + randint(delay, delay * 2) + counter(fuzz)
        sleep(t)
        try:
            if encoding:
                fuzz = encoding(unquote(fuzz))
            data = replaceValue(params, xsschecker, fuzz, copy.deepcopy)
            #带着fuzz参数去请求
            response = requester(url, data, headers, GET, delay / 2, timeout)
        except:
            #若出现异常,说明waf丢弃了恶意请求
            logger.error('WAF is dropping suspicious requests.')
            #等待一段时间后,再请求一下,确认ip是否被屏蔽,被屏蔽了就停止fuzz
            if delay == 0:
                logger.info('Delay has been increased to %s6%s seconds.' %
                            (green, end))
                delay += 6
            limit = (delay + 1) * 50
            timer = -1
            while timer < limit:
                logger.info(
                    '\rFuzzing will continue after %s%i%s seconds.\t\t\r' %
                    (green, limit, end))
                limit -= 1
                sleep(1)
            try:
                requester(url, params, headers, GET, 0, 10)
                logger.good(
                    'Pheww! Looks like sleeping for %s%i%s seconds worked!' %
                    (green, ((delay + 1) * 2), end))
            except:
                logger.error(
                    '\nLooks like WAF has blocked our IP Address. Sorry!')
                break
        if encoding:
            fuzz = encoding(fuzz)
        if fuzz.lower() in response.text.lower(
        ):  # if fuzz string is reflected in the response
            result = ('%s[passed]  %s' % (green, end))
        # if the server returned an error (Maybe WAF blocked it)
        elif str(response.status_code)[:1] != '2':
            result = ('%s[blocked] %s' % (red, end))
        else:  # if the fuzz string was not reflected in the response completely
            result = ('%s[filtered]%s' % (yellow, end))
        logger.info('%s %s' % (result, fuzz))
Пример #13
0
def analyse(url, GET, data=None, PATH=False, header=HEADER):
    param_msg = {}  # 用于收集各个参数 对应的 context,position信息
    # 输入:GET、PATH\url\data
    # 明确 要请求的参数data,以及请求的url
    if GET:
        if PATH:  # 从url路径中取要替换的成分
            data = get_valid_paths(url)
        else:  # 从url参数中取要替换的成分
            url_parse_result = urlparse(
                url
            )  # ParseResult(scheme='http', netloc='192.168.1.46', path='/dvwa/vulnerabilities/xss_r/', params='', query='name=hi', fragment='')
            query = url_parse_result.query
            if query == "":
                query = url_parse_result.path.split("/")[-1]
            data = get_query_dict(query)  # 参数键值对
            url = get_url(url, GET)  # request库中url和参数是分开传参的,故得到没有参数的url
    # 对于post data就是data

    for param in data:

        scout_params = copy.deepcopy(data)
        scout_str = gen_scout_str()  # 特征字符串
        if PATH:  # 对于url路径参数
            repace_url = url  # 防止一个url被替换多次,故不改变原始url
            repace_url = repace_url.replace(param, scout_str)
            resp = requester(repace_url,
                             headers=header,
                             data=None,
                             GET=GET,
                             delay=0,
                             timeout=30)
        else:  # 对于get ,post参数
            scout_params[param] = scout_str
            resp = requester(url,
                             data=scout_params,
                             headers=header,
                             GET=GET,
                             delay=0,
                             timeout=30)

        text = resp.text
        parser = HtmlParser(target=scout_str)
        parser.feed(text)
        logger.info("参数{}的上下文:{}".format(param, parser.context))
        logger.red_line()

        param_msg.update({param: parser.context})
    # logger.info("param_msg:%s" % str(param_msg))
    get_effective_chars(url, data, GET, param_msg, header=header)
Пример #14
0
def fuzzer(url, params, headers, GET, delay, timeout, WAF, encoding):
    for fuzz in fuzzes:
        if delay == 0:
            delay = 0
        t = delay + randint(delay, delay * 2) + counter(fuzz)
        sleep(t)
        paramsCopy = copy.deepcopy(params)
        try:
            if encoding:
                fuzz = encoding(unquote(fuzz))
            data = replacer(paramsCopy, xsschecker, fuzz)
            response = requester(url, data, headers, GET, delay / 2, timeout)
        except:
            print('\n%s WAF is dropping suspicious requests.' % bad)
            if delay == 0:
                print('%s Delay has been increased to %s6%s seconds.' %
                      (info, green, end))
                delay += 6
            limit = (delay + 1) * 50
            timer = -1
            while timer < limit:
                print('\r%s Fuzzing will continue after %s%i%s seconds.\t\t' %
                      (info, green, limit, end),
                      end='\r')
                limit -= 1
                sleep(1)
            try:
                requester(url, params, headers, GET, 0, 10)
                print(
                    '\n%s Pheww! Looks like sleeping for %s%i%s seconds worked!'
                    % (good, green, (delay + 1) * 2), end)
            except:
                print(
                    '\n%s Looks like WAF has blocked our IP Address. Sorry!' %
                    bad)
                break
        if encoding:
            fuzz = encoding(fuzz)
        if fuzz.lower() in response.text.lower(
        ):  # if fuzz string is reflected in the response
            result = ('%s[passed]  %s' % (green, end))
        elif str(
                response.status_code
        )[:1] != '2':  # if the server returned an error (Maybe WAF blocked it)
            result = ('%s[blocked] %s' % (red, end))
        else:  # if the fuzz string was not reflected in the response completely
            result = ('%s[filtered]%s' % (yellow, end))
        print('%s %s' % (result, fuzz))
Пример #15
0
def jscanner(url):
    """Extract endpoints from JavaScript code."""
    response = requester(
        url,
        main_url,
        delay,
        cook,
        headers,
        timeout,
        host,
        proxies,
        user_agents,
        failed,
        processed,
    )
    # Extract URLs/endpoints
    matches = rendpoint.findall(response)
    # Iterate over the matches, match is a tuple
    for match in matches:
        # Combining the items because one of them is always empty
        match = match[0] + match[1]
        # Making sure it's not some JavaScript code
        if not re.search(r'[}{><"\']', match) and not match == "/":
            verb("JS endpoint", match)
            endpoints.add(match)
Пример #16
0
def shodan(ip):
    result = {ip: {}}
    response = requester('https://api.shodan.io/shodan/host/%s?key=%s' %
                         (ip, core.memory.config['shodan_api_key']))
    data = response.json()['data']
    core.memory.global_vars['shodan_queries'] += 1
    if data:
        for each in data:
            port = each['port']
            result[ip][port] = {}
            software, version = None, None
            if 'product' in each:
                software = each['product']
                result[ip][port]['software'] = software
            else:
                result[ip][port]['software'] = ''
            if 'cpe' in each:
                cpes = each['cpe']
                for cpe in cpes:
                    if software in cpe:
                        result[ip][port]['cpe'] = cpe
                        break
                else:
                    result[ip][port]['cpe'] = cpes[0]
            else:
                result[ip][port]['cpe'] = ''
            cpe_boolean = False
            if result[ip][port]['cpe']:
                cpe_boolean = True
            if 'version' in each:
                version = each['version']
                if cpe_boolean and cpe.count(':') > 3:
                    version = cpe.split(':')[-1]
                result[ip][port]['version'] = version
            elif cpe_boolean and cpe.count(':') > 3:
                result[ip][port]['version'] = cpe.split(':')[-1]
            else:
                result[ip][port]['version'] = ''
            if 'vulns' in each:
                cache(software, version, 'dummy', '')
            elif software and version:
                if cpe_boolean:
                    for cpe in cpes:
                        software = cpe
                        if cpe.count(':') > 3:
                            version = cpe.split(':')[-1]
                        is_vuln = vulners(software, version, cpe=cpe_boolean)
                        if is_vuln:
                            message = '%s %s running on %s:%s is outdated' % (
                                each['product'], version, ip, each['port'])
                            notify('[Vuln] %s' % message)
                            print('%s %s' % (good, message))
                else:
                    is_vuln = vulners(software, version, cpe=cpe_boolean)
                    if is_vuln:
                        message = '%s %s running on %s:%s is outdated' % (
                            each['product'], version, ip, each['port'])
                        notify('[Vuln] %s' % message)
                        print('%s %s' % (good, message))
    return result
Пример #17
0
def wafDetector(url, params, headers, GET, delay, timeout):
    with open('./db/wafSignatures.json', 'r') as file:
        wafSignatures = json.load(file)
    noise = '<script>alert("XSS")</script>'  #a payload which is noisy enough to provoke the WAF
    params['xss'] = noise
    response = requester(url, params, headers, GET, delay,
                         timeout)  # Opens the noise injected payload
    page = response.text
    code = str(response.status_code)
    headers = str(response.headers)
    if int(code) >= 400:
        bestMatch = [0, None]
        for wafName, wafSignature in wafSignatures.items():
            score = 0
            pageSign = wafSignature['page']
            codeSign = wafSignature['code']
            headersSign = wafSignature['headers']
            if pageSign:
                if re.search(pageSign, page, re.I):
                    score += 1
            if codeSign:
                if re.search(codeSign, code, re.I):
                    score += 0.5
            if headersSign:
                if re.search(headersSign, headers, re.I):
                    score += 1
            if score > bestMatch[0]:
                del bestMatch[:]
                bestMatch.extend([score, wafName])
        if bestMatch[0] != 0:
            return bestMatch[1]
        else:
            return None
    else:
        return None
Пример #18
0
def updater():
    """Update the current installation.

    git clones the latest version and merges it with the current directory.
    """
    print('%s Checking for updates' % run)
    # Changes must be separated by ;
    changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
    latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
    # Just a hack to see if a new version is available
    if changes not in latest_commit:
        changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
        # Splitting the changes to form a list
        changelog = changelog.group(1).split(';')
        print('%s A new version of Photon is available.' % good)
        print('%s Changes:' % info)
        for change in changelog: # print changes
            print('%s>%s %s' % (green, end, change))

        current_path = os.getcwd().split('/') # if you know it, you know it
        folder = current_path[-1] # current directory name
        path = '/'.join(current_path) # current directory path
        choice = input('%s Would you like to update? [Y/n] ' % que).lower()

        if choice != 'n':
            print('%s Updating Photon' % run)
            os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
                      % (folder))
            os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
                      % (path, folder, path, path, folder))
            print('%s Update successful!' % good)
    else:
        print('%s Photon is up to date!' % good)
Пример #19
0
 def rec(target):
     processed.add(target)
     printableTarget = '/'.join(target.split('/')[3:])
     if len(printableTarget) > 40:
         printableTarget = printableTarget[-40:]
     else:
         printableTarget = (printableTarget + (' ' *
                                               (40 - len(printableTarget))))
     print('%s Parsing %s' % (run, printableTarget), end='\r')
     url = getUrl(target, True)
     params = getParams(target, '', True)
     if '=' in target:  # if there's a = in the url, there should be GET parameters
         inps = []
         for name, value in params.items():
             inps.append({'name': name, 'value': value})
         forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
     response = requester(url, params, headers, True, delay, timeout).text
     forms.append(zetanize(response))
     matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
     for link in matches:  # iterate over the matches
         # remove everything after a "#" to deal with in-page anchors
         link = link.split('#')[0]
         if link[:4] == 'http':
             if link.startswith(main_url):
                 storage.add(link)
         elif link[:2] == '//':
             if link.split('/')[2].startswith(host):
                 storage.add(schema + link)
         elif link[:1] == '/':
             storage.add(main_url + link)
         else:
             storage.add(main_url + '/' + link)
Пример #20
0
    def generator_proxies(self):

        for name in self.list_name:
            if name in self.dic:
                url,params = chambering(origin_proxies[name],strike=False)
                result = requester(url,params,GET=True,timeout=None)
                response = regex.Espace_eliminate.sub("",result.text)
                ips, ports, types = self.dic[name]['ip'].finditer(response),\
                                    self.dic[name]['port'].finditer(response),\
                                    self.dic[name]['type'].finditer(response)

                for i, j, k in zip(ips,ports,types):

                    ip = self.dic[name]['sub'].sub(" ", i.group())
                    port = self.dic[name]['sub'].sub(" ", j.group())
                    type = self.dic[name]['sub'].sub(" ", k.group())

                    # self.count = self.count+1

                    # print((ip, port, type))

                    if Filter.filter(ip,self.filter_proxy):
                        proxy = eval(regex.Espace_eliminate.sub("", str((ip, port, type.lower()))))
                        self.logger.info(f"ip : {proxy[0]} port : {proxy[1]} type : {proxy[2]}")
                        self.container.put(proxy)
Пример #21
0
 def rec(target):
     processed.add(target)
     print('%s Parsing %s' % (run, target))
     url = getUrl(target, '', True)
     params = getParams(target, '', True)
     if '=' in target:
         inps = []
         for name, value in params.items():
             inps.append({'name': name, 'value': value})
         forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
     response = requester(url, params, headers, True, 0).text
     forms.append(zetanize(response))
     matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
     for link in matches:  # iterate over the matches
         link = link.split(
             '#'
         )[0]  # remove everything after a "#" to deal with in-page anchors
         if link[:4] == 'http':
             if link.startswith(main_url):
                 storage.add(link)
         elif link[:2] == '//':
             if link.split('/')[2].startswith(host):
                 storage.add(schema + link)
         elif link[:1] == '/':
             storage.add(main_url + link)
         else:
             storage.add(main_url + '/' + link)
Пример #22
0
def checker(url, params, headers, GET, delay, payload, positions, timeout, encoding):
    checkString = 'st4r7s' + payload + '3nd'
    if encoding:
        checkString = encoding(unquote(checkString))
    response = requester(url, replaceValue(
        params, xsschecker, checkString, copy.deepcopy), headers, GET, delay, timeout).text.lower()
    reflectedPositions = []
    for match in re.finditer('st4r7s', response):
        reflectedPositions.append(match.start())
    filledPositions = fillHoles(positions, reflectedPositions)
    #  Itretating over the reflections
    num = 0
    efficiencies = []
    for position in filledPositions:
        allEfficiencies = []
        try:
            reflected = response[reflectedPositions[num]
                :reflectedPositions[num]+len(checkString)]
            efficiency = fuzz.partial_ratio(reflected, checkString.lower())
            allEfficiencies.append(efficiency)
        except IndexError:
            pass
        if position:
            reflected = response[position:position+len(checkString)]
            if encoding:
                checkString = encoding(checkString.lower())
            efficiency = fuzz.partial_ratio(reflected, checkString)
            if reflected[:-2] == ('\\%s' % checkString.replace('st4r7s', '').replace('3nd', '')):
                efficiency = 90
            allEfficiencies.append(efficiency)
            efficiencies.append(max(allEfficiencies))
        else:
            efficiencies.append(0)
        num += 1
    return list(filter(None, efficiencies))
Пример #23
0
 def rec(target):
     processed.add(target)
     urlPrint = (target + (' ' * 60))[:60]
     print('%s Parsing %-40s' % (run, urlPrint), end='\r')
     url = getUrl(target, True)
     params = getParams(target, '', True)
     if '=' in target:  # if there's a = in the url, there should be GET parameters
         inps = []
         for name, value in params.items():
             inps.append({'name': name, 'value': value})
         forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
     raw_response = requester(url, params, True)
     response = raw_response.text
     js = js_extractor(response)
     scripts = script_extractor(response)
     for each in retirejs(url, response, checkedScripts):
         all_outdated_js.append(each)
     all_techs.extend(wappalyzer(raw_response, js, scripts))
     parsed_response = zetanize(response)
     forms.append(parsed_response)
     matches = re.finditer(
         r'<[aA][^>]*?(?:href|HREF)=[\'"`]?([^>]*?)[\'"`]?>', response)
     for link in matches:  # iterate over the matches
         # remove everything after a "#" to deal with in-page anchors
         link = link.group(1).split('#')[0]
         this_url = handle_anchor(target, link)
         if urlparse(this_url).netloc == host:
             storage.add(this_url)
Пример #24
0
 def rec(target):
     processed.add(target)
     printableTarget = '/'.join(target.split('/')[3:])
     if len(printableTarget) > 40:
         printableTarget = printableTarget[-40:]
     else:
         printableTarget = (printableTarget + (' ' * (40 - len(printableTarget))))
     print ('%s Parsing %s' % (run, printableTarget), end='\r')
     url = getUrl(target, True)
     params = getParams(target, '', True)
     if '=' in target:  # if there's a = in the url, there should be GET parameters
         inps = []
         for name, value in params.items():
             inps.append({'name': name, 'value': value})
         forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
     response = requester(url, params, headers, True, delay, timeout).text
     forms.append(zetanize(response))
     matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
     for link in matches:  # iterate over the matches
         # remove everything after a "#" to deal with in-page anchors
         link = link.split('#')[0]
         if link[:4] == 'http':
             if link.startswith(main_url):
                 storage.add(link)
         elif link[:2] == '//':
             if link.split('/')[2].startswith(host):
                 storage.add(schema + link)
         elif link[:1] == '/':
             storage.add(main_url + link)
         else:
             storage.add(main_url + '/' + link)
Пример #25
0
def singleFuzz(target, paramData, verbose, encoding, headers, delay, timeout):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {}, headers, GET, delay,
                                 timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    verboseOutput(params, 'params', verbose)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET,
                      delay, timeout)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    for paramName in params.keys():
        print('%s Fuzzing parameter: %s' % (info, paramName))
        paramsCopy = copy.deepcopy(params)
        paramsCopy[paramName] = xsschecker
        fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding)
Пример #26
0
def singleFuzz(target, paramData, verbose, encoding, headers, delay, timeout):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {},
                                 headers, GET, delay, timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    verboseOutput(params, 'params', verbose)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    WAF = wafDetector(
        url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    for paramName in params.keys():
        print('%s Fuzzing parameter: %s' % (info, paramName))
        paramsCopy = copy.deepcopy(params)
        paramsCopy[paramName] = xsschecker
        fuzzer(url, paramsCopy, headers, GET,
               delay, timeout, WAF, encoding)
Пример #27
0
    def generator_proxies(self):
        ''' 从默认代理池(list_name中定义的)获取可用代理 '''
        for name in self.list_name:
            if name in self.dic:
                url, params = chambering(origin_proxies[name], strike=False)
                result = requester(url, params, GET=True, timeout=None)
                if result == None:
                    continue
                response = regex.Espace_eliminate.sub("", result.text)
                ips, ports, types = self.dic[name]['ip'].finditer(response),\
                                    self.dic[name]['port'].finditer(response),\
                                    self.dic[name]['type'].finditer(response)

                for i, j, k in zip(ips, ports, types):

                    ip = self.dic[name]['sub'].sub(" ", i.group())
                    port = self.dic[name]['sub'].sub(" ", j.group())
                    type = self.dic[name]['sub'].sub(" ", k.group())

                    # self.count = self.count+1

                    # print((ip, port, type))

                    if Filter.filter(ip,
                                     self.filter_proxy):  # 不是特殊文件链接 就加入到代理集合中
                        proxy = eval(
                            regex.Espace_eliminate.sub(
                                "", str((ip, port, type.lower()))))
                        # temp=r.replace('(','').replace(')','')    # 字符串转元组安全方法
                        # a=tuple([int(i) for i in temp.split(',')])
                        self.logger.info(
                            f"ip : {proxy[0]} port : {proxy[1]} type : {proxy[2]}"
                        )
                        self.container.put(proxy)
Пример #28
0
def bruteforcer(target, paramData, payloadList, verbose, encoding, headers, delay, timeout):
    GET, POST = (False, True) if paramData else (True, False)
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    verboseOutput(params, 'params', verbose)
    for paramName in params.keys():
        progress = 1
        paramsCopy = copy.deepcopy(params)
        for payload in payloadList:
            print ('%s Bruteforcing %s[%s%s%s]%s: %i/%i' % (run, green, end, paramName, green, end, progress, len(payloadList)), end='\r')
            if encoding:
                payload = encoding(unquote(payload))
            paramsCopy[paramName] = payload
            response = requester(url, paramsCopy, headers,
                                 GET, delay, timeout).text
            if encoding:
                payload = encoding(payload)
            if payload in response:
                print('%s %s' % (good, payload))
            progress += 1
    print ()
Пример #29
0
def bruteforcer(target, paramData, payloadList, encoding, headers, delay, timeout):
    GET, POST = (False, True) if paramData else (True, False)
    host = urlparse(target).netloc  # Extracts host out of the url
    logger.debug('Parsed host to bruteforce: {}'.format(host))
    url = get_url(target, GET)
    logger.debug('Parsed url to bruteforce: {}'.format(url))
    params = getParams(target, paramData, GET)
    logger.debug_json('Bruteforcer params:', params)
    if not params:
        logger.error('No parameters to test.')
        quit()
    for paramName in params.keys():
        progress = 1
        paramsCopy = copy.deepcopy(params)
        for payload in payloadList:
            logger.run('Bruteforcing %s[%s%s%s]%s: %i/%i\r' %
                       (green, end, paramName, green, end, progress, len(payloadList)))
            if encoding:
                payload = encoding(unquote(payload))
            paramsCopy[paramName] = payload
            response = requester(url, paramsCopy, headers,
                                 GET, delay, timeout).text
            if encoding:
                payload = encoding(payload)
            if payload in response:
                logger.info('%s %s' % (good, payload))
            progress += 1
    logger.no_format('')
Пример #30
0
def bruteforcer(target, paramData, payloadList, verbose, encoding):
    GET, POST = (False, True) if paramData else (True, False)
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    verboseOutput(params, 'params', verbose)
    for paramName in params.keys():
        progress = 1
        paramsCopy = copy.deepcopy(params)
        for payload in payloadList:
            print('%s Progress: %i/%i' % (run, progress, len(payloadList)),
                  end='\r')
            if encoding:
                payload = encoding(unquote(payload))
            paramsCopy[paramName] = payload
            response = requester(url, paramsCopy, headers, GET, delay,
                                 timeout).text
            if encoding:
                payload = encoding(payload)
            if payload in response:
                print('%s %s' % (good, payload))
            progress += 1
    print('')
Пример #31
0
    def proxy_ip3366(self):

        for page in range(1,11):
            try:
                url, params = chambering(f"http://www.ip3366.net/?stype=1&page={page}", strike=False)
                result = requester(url,params,GET=True,timeout=None)
                text = regex.Espace_eliminate.sub("",result.text)

                proxy_ips, proxy_ports, proxy_types = self.dic['ip3366']['ip'].finditer(text),\
                                                      self.dic['ip3366']['port'].finditer(text),\
                                                      self.dic['ip3366']['type'].finditer(text)

                for ips, ports, types in zip(proxy_ips, proxy_ports, proxy_types):

                    ip, port, type = self.dic['ip3366']['sub'].sub(" ",ips.group()),\
                                     self.dic['ip3366']['sub'].sub(" ",ports.group()),\
                                     self.dic['ip3366']['sub'].sub(" ",types.group())

                    proxy = eval(regex.Espace_eliminate.sub("", str((ip,port,type.lower()))))

                    self.logger.info(f"ip : {proxy[0]} port : {proxy[1]} type : {proxy[2]}")

                    self.container.put(proxy)
            except:
                pass
Пример #32
0
 def rec(url):
     processed.add(url)
     urlPrint = (url + (' ' * 60))[:60]
     print ('%s Parsing %-40s' % (run, urlPrint), end='\r')
     url = getUrl(url, '', True)
     params = getParams(url, '', True)
     if '=' in url:
         inps = []
         for name, value in params.items():
             inps.append({'name': name, 'value': value})
         forms.append(
             {url: {0: {'action': url, 'method': 'get', 'inputs': inps}}})
     response = requester(url, params, headers, True, 0).text
     forms.append({url: zetanize(url, response)})
     matches = findall(
         r'<[aA][^>]*?(href|HREF)=["\']{0,1}(.*?)["\']', response)
     for link in matches:  # iterate over the matches
         # remove everything after a "#" to deal with in-page anchors
         link = link[1].split('#')[0].lstrip(' ')
         if link[:4] == 'http':
             if link.startswith(main_url):
                 storage.add(link)
         elif link[:2] == '//':
             if link.split('/')[2].startswith(host):
                 storage.add(scheme + '://' + link)
         elif link[:1] == '/':
             storage.add(remove_file(url) + link)
         else:
             usable_url = remove_file(url)
             if usable_url.endswith('/'):
                 storage.add(usable_url + link)
             elif link.startswith('/'):
                 storage.add(usable_url + link)
             else:
                 storage.add(usable_url + '/' + link)
Пример #33
0
def singleFuzz(target, paramData, encoding, headers, delay, timeout):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith("http"):
        try:
            response = requester("https://" + target, {}, headers, GET, delay, timeout)
            target = "https://" + target
        except:
            target = "http://" + target
    logger.debug("Single Fuzz target: {}".format(target))
    host = urlparse(target).netloc  # Extracts host out of the url
    logger.debug("Single fuzz host: {}".format(host))
    url = getUrl(target, GET)
    logger.debug("Single fuzz url: {}".format(url))
    params = getParams(target, paramData, GET)
    logger.debug_json("Single fuzz params:", params)
    if not params:
        logger.error("No parameters to test.")
        quit()
    WAF = wafDetector(
        url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout
    )
    if WAF:
        logger.error("WAF detected: %s%s%s" % (green, WAF, end))
    else:
        logger.good("WAF Status: %sOffline%s" % (green, end))

    for paramName in params.keys():
        logger.info("Fuzzing parameter: %s" % paramName)
        paramsCopy = copy.deepcopy(params)
        paramsCopy[paramName] = xsschecker
        fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding)
Пример #34
0
def quickBruter(params, originalResponse, originalCode, reflections, factors,
                include, delay, headers, url, GET):
    joined = joiner(params, include)
    newResponse = requester(url, joined, headers, GET, delay)
    if newResponse.status_code == 429:
        if core.config.globalVariables['stable']:
            print('%s Hit rate limit, stabilizing the connection..')
            time.sleep(30)
            return params
        else:
            print(
                '%s Target has rate limiting in place, please use --stable switch'
                % bad)
            raise ConnectionError
    if newResponse.status_code != originalCode:
        return params
    elif factors['sameHTML'] and len(
            newResponse.text) != (len(originalResponse)):
        return params
    elif factors['samePlainText'] and len(removeTags(originalResponse)) != len(
            removeTags(newResponse.text)):
        return params
    elif True:
        for param, value in joined.items():
            if param not in include and newResponse.text.count(
                    value) != reflections:
                return params
    else:
        return False
Пример #35
0
def updater():
    """Update the current installation.

    git clones the latest version and merges it with the current directory.
    """
    print('%s Checking for updates' % run)
    # Changes must be separated by ;
    changes = "cloning (mirroring) feature;fixed sitemap.xml parsing;reuse tcp connection to boost speed;handle redirect loops;csv export support;other minor bug fixes"
    latest_commit = requester(
        'https://raw.githubusercontent.com/s0md3v/Photon/master/photon.py',
        host='github.com').text
    # Just a hack to see if a new version is available
    if changes not in latest_commit:
        changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
        # Splitting the changes to form a list
        changelog = changelog.group(1).split(';')
        print('%s A new version of Photon is available.' % good)
        print('%s Changes:' % info)
        for change in changelog:  # print changes
            print('%s>%s %s' % (green, end, change))

        current_path = os.getcwd().split('/')  # if you know it, you know it
        folder = current_path[-1]  # current directory name
        path = '/'.join(current_path)  # current directory path
        choice = input('%s Would you like to update? [Y/n] ' % que).lower()

        if choice != 'n':
            print('%s Updating Photon' % run)
            os.system('git clone --quiet https://github.com/s0md3v/Photon %s' %
                      (folder))
            os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' %
                      (path, folder, path, path, folder))
            print('%s Update successful!' % good)
    else:
        print('%s Photon is up to date!' % good)
Пример #36
0
def checky(param, paraNames, url, headers, GET, delay, timeout):
    if param not in paraNames:
        response = requester(url, {param: xsschecker},
                             headers, GET, delay, timeout).text
        if '\'%s\'' % xsschecker in response or '"%s"' % xsschecker in response or ' %s ' % xsschecker in response:
            paraNames[param] = ''
            print('%s Valid parameter found : %s%s%s' %
                  (good, green, param, end))
Пример #37
0
def fuzzer(url, params, headers, GET, delay, timeout, WAF, encoding):
    for fuzz in fuzzes:
        if delay == 0:
            delay = 0
        t = delay + randint(delay, delay * 2) + counter(fuzz)
        sleep(t)
        try:
            if encoding:
                fuzz = encoding(unquote(fuzz))
            data = replaceValue(params, xsschecker, fuzz, copy.deepcopy)
            response = requester(url, data, headers, GET, delay/2, timeout)
        except:
            print ('\n%s WAF is dropping suspicious requests.' % bad)
            if delay == 0:
                print ('%s Delay has been increased to %s6%s seconds.' %
                       (info, green, end))
                delay += 6
            limit = (delay + 1) * 50
            timer = -1
            while timer < limit:
                print ('\r%s Fuzzing will continue after %s%i%s seconds.\t\t' % (info, green, limit, end), end='\r')
                limit -= 1
                sleep(1)
            try:
                requester(url, params, headers, GET, 0, 10)
                print ('\n%s Pheww! Looks like sleeping for %s%i%s seconds worked!' % (
                    good, green, (delay + 1) * 2), end)
            except:
                print ('\n%s Looks like WAF has blocked our IP Address. Sorry!' % bad)
                break
        if encoding:
            fuzz = encoding(fuzz)
        if fuzz.lower() in response.text.lower():  # if fuzz string is reflected in the response
            result = ('%s[passed]  %s' % (green, end))
        # if the server returned an error (Maybe WAF blocked it)
        elif str(response.status_code)[:1] != '2':
            result = ('%s[blocked] %s' % (red, end))
        else:  # if the fuzz string was not reflected in the response completely
            result = ('%s[filtered]%s' % (yellow, end))
        print ('%s %s' % (result, fuzz))
Пример #38
0
def jscanner(url):
    """Extract endpoints from JavaScript code."""
    response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
    # Extract URLs/endpoints
    matches = rendpoint.findall(response)
    # Iterate over the matches, match is a tuple
    for match in matches:
        # Combining the items because one of them is always empty
        match = match[0] + match[1]
        # Making sure it's not some JavaScript code
        if not re.search(r'[}{><"\']', match) and not match == '/':
            verb('JS endpoint', match)
            endpoints.add(match)
Пример #39
0
def extractor(url):
    """Extract details from the response body."""
    response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
    if clone:
        mirror(url, response)
    matches = rhref.findall(response)
    for link in matches:
        # Remove everything after a "#" to deal with in-page anchors
        link = link[1].replace('\'', '').replace('"', '').split('#')[0]
        # Checks if the URLs should be crawled
        if is_link(link, processed, files):
            if link[:4] == 'http':
                if link.startswith(main_url):
                    verb('Internal page', link)
                    internal.add(link)
                else:
                    verb('External page', link)
                    external.add(link)
            elif link[:2] == '//':
                if link.split('/')[2].startswith(host):
                    verb('Internal page', link)
                    internal.add(schema + '://' + link)
                else:
                    verb('External page', link)
                    external.add(link)
            elif link[:1] == '/':
                verb('Internal page', link)
                internal.add(remove_file(url) + link)
            else:
                verb('Internal page', link)
                usable_url = remove_file(url)
                if usable_url.endswith('/'):
                    internal.add(usable_url + link)
                elif link.startswith('/'):
                    internal.add(usable_url + link)
                else:
                    internal.add(usable_url + '/' + link)

    if not only_urls:
        intel_extractor(url, response)
        js_extractor(response)
    if args.regex and not supress_regex:
        regxy(args.regex, response, supress_regex, custom)
    if api:
        matches = rentropy.findall(response)
        for match in matches:
            if entropy(match) >= 4:
                verb('Key', match)
                keys.add(url + ': ' + match)
Пример #40
0
def arjun(url, GET, headers, delay, timeout):
    paraNames = {}
    response = requester(url, {}, headers, GET, delay, timeout).text
    matches = re.findall(
        r'<input.*?name=\'(.*?)\'.*?>|<input.*?name="(.*?)".*?>', response)
    for match in matches:
        try:
            foundParam = match[1]
        except UnicodeDecodeError:
            continue
        print('%s Heuristics found a potentially valid parameter: %s%s%s. Priortizing it.' % (
            good, green, foundParam, end))
        if foundParam in blindParams:
            blindParams.remove(foundParam)
            blindParams.insert(0, foundParam)
    threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount)
    futures = (threadpool.submit(checky, param, paraNames, url,
                                 headers, GET, delay, timeout) for param in blindParams)
    for i, _ in enumerate(concurrent.futures.as_completed(futures)):
        if i + 1 == len(blindParams) or (i + 1) % threadCount == 0:
            print('%s Progress: %i/%i' % (info, i + 1, len(blindParams)), end='\r')
    return paraNames
Пример #41
0
def wafDetector(url, params, headers, GET, delay, timeout):
    with open('./db/wafSignatures.json', 'r') as file:
        wafSignatures = json.load(file)
    # a payload which is noisy enough to provoke the WAF
    noise = '<script>alert("XSS")</script>'
    params['xss'] = noise
    # Opens the noise injected payload
    response = requester(url, params, headers, GET, delay, timeout)
    page = response.text
    code = str(response.status_code)
    headers = str(response.headers)
    if int(code) >= 400:
        bestMatch = [0, None]
        for wafName, wafSignature in wafSignatures.items():
            score = 0
            pageSign = wafSignature['page']
            codeSign = wafSignature['code']
            headersSign = wafSignature['headers']
            if pageSign:
                if re.search(pageSign, page, re.I):
                    score += 1
            if codeSign:
                if re.search(codeSign, code, re.I):
                    score += 0.5  # increase the overall score by a smaller amount because http codes aren't strong indicators
            if headersSign:
                if re.search(headersSign, headers, re.I):
                    score += 1
            # if the overall score of the waf is higher than the previous one
            if score > bestMatch[0]:
                del bestMatch[:]  # delete the previous one
                bestMatch.extend([score, wafName])  # and add this one
        if bestMatch[0] != 0:
            return bestMatch[1]
        else:
            return None
    else:
        return None
Пример #42
0
def scan(target, paramData, verbose, encoding, headers, delay, timeout, skipDOM, find, skip):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {},
                                 headers, GET, delay, timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    response = requester(target, {}, headers, GET, delay, timeout).text
    if not skipDOM:
        print('%s Checking for DOM vulnerabilities' % run)
        highlighted = dom(response)
        if highlighted:
            print('%s Potentially vulnerable objects found' % good)
            print(red + ('-' * 60) + end)
            for line in highlighted:
                print(line)
            print(red + ('-' * 60) + end)
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    verboseOutput(params, 'params', verbose)
    if find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    WAF = wafDetector(
        url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        print('%s Testing parameter: %s' % (info, paramName))
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        parsedResponse = htmlParser(response, encoding)
        occurences = parsedResponse[0]
        verboseOutput(occurences, 'occurences', verbose)
        positions = parsedResponse[1]
        verboseOutput(positions, 'positions', verbose)
        if not occurences:
            print('%s No reflection found' % bad)
            continue
        else:
            print('%s Reflections found: %s' % (info, len(occurences)))
        print('%s Analysing reflections' % run)
        efficiencies = filterChecker(
            url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
        verboseOutput(efficiencies, 'efficiencies', verbose)
        print('%s Generating payloads' % run)
        vectors = generator(occurences, response.text)
        verboseOutput(vectors, 'vectors', verbose)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            print('%s No vectors were crafted' % bad)
            continue
        print('%s Payloads generated: %i' % (info, total))
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                if core.config.globalVariables['path']:
                    vect = vect.replace('/', '%2F')
                printVector = vect
                progress += 1
                print ('%s Progress: %i/%i' % (run, progress, total), end='\r')
                if confidence == 10:
                    if not GET:
                        vect = unquote(vect)
                    efficiencies = checker(
                        url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding)
                    if not efficiencies:
                        for i in range(len(occurences)):
                            efficiencies.append(0)
                    bestEfficiency = max(efficiencies)
                    if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95):
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, bestEfficiency))
                        print('%s Confidence: %i' % (info, confidence))
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
                    elif bestEfficiency > minEfficiency:
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, bestEfficiency))
                        print('%s Confidence: %i' % (info, confidence))
                else:
                    if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()):
                        continue
                    vect = unquote(vect)
                    if encoding:
                        paramsCopy[paramName] = encoding(vect)
                    else:
                        paramsCopy[paramName] = vect
                    response = requester(url, paramsCopy, headers, GET, delay, timeout).text
                    success = browserEngine(response)
                    if success:
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, 100))
                        print('%s Confidence: %i' % (info, 10))
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
        print ('')