コード例 #1
0
ファイル: filterChecker.py プロジェクト: repo-backup/XSStrike
def filterChecker(url, params, headers, GET, delay, occurences, timeout):
    positions = {}
    environments = set(['<', '>'])
    sortedEfficiencies = {}
    for i in range(len(occurences) + 10):
        sortedEfficiencies[i] = {}
    for i, occurence in zip(range(len(occurences)), occurences.values()):
        environments.add(occurence['context'][1])
        location = occurence['context'][0]
        attribute = occurence['context'][3]
        positions[str(i)] = occurence['position']
        if location == 'comment':
            environments.add('-->')
        elif location == 'script':
            environments.add('</scRipT/>')
        elif attribute == 'srcdoc':
            environments.add('&lt;')
            environments.add('&gt;')
    for environment in environments:
        if environment == '':
            efficiencies = [100 for i in range(len(occurences))]
        else:
            efficiencies = checker(url, params, headers, GET, delay,
                                   environment, positions, timeout)
            if len(efficiencies) < len(occurences):
                for i in range(len(occurences) - len(efficiencies)):
                    efficiencies.append(0)
        for i, efficiency in zip(range(len(efficiencies)), efficiencies):
            sortedEfficiencies[i][environment] = efficiency
    for efficiency, occurence in zip(sortedEfficiencies.values(),
                                     occurences.values()):
        occurence['score'] = efficiency
    return occurences
コード例 #2
0
ファイル: filterChecker.py プロジェクト: bikrambox/pushupB
def filterChecker(url, params, headers, GET, delay, occurences, timeout,
                  encoding):
    positions = occurences.keys()
    sortedEfficiencies = {}
    # adding < > to environments anyway because they can be used in all contexts
    environments = set(['<', '>'])
    for i in range(len(positions)):
        sortedEfficiencies[i] = {}
    for i in occurences:
        occurences[i]['score'] = {}
        context = occurences[i]['context']
        if context == 'comment':
            environments.add('-->')
        elif context == 'script':
            environments.add(occurences[i]['details']['quote'])
            environments.add('</scRipT/>')
        elif context == 'attribute':
            if occurences[i]['details']['type'] == 'value':
                if occurences[i]['details'][
                        'name'] == 'srcdoc':  # srcdoc attribute accepts html data with html entity encoding
                    environments.add('&lt;')  # so let's add the html entity
                    environments.add('&gt;')  # encoded versions of < and >
            if occurences[i]['details']['quote']:
                environments.add(occurences[i]['details']['quote'])
    for environment in environments:
        if environment:
            efficiencies = checker(url, params, headers, GET, delay,
                                   environment, positions, timeout, encoding)
            efficiencies.extend([0] * (len(occurences) - len(efficiencies)))
            for occurence, efficiency in zip(occurences, efficiencies):
                occurences[occurence]['score'][environment] = efficiency
    return occurences
コード例 #3
0
def filterChecker(url, params, headers, GET, delay, occurences):
    environments = set(['<', '>'])
    sortedEfficiencies = {}
    for i in range(len(occurences) + 10):
        sortedEfficiencies[i] = {}
    for occurence in occurences.values():
        environments.add(occurence['context'][1])
        location = occurence['context'][0]
        if location == 'comment':
            environments.add('-->')
        elif location == 'script':
            environments.add('</scRipT/>')
    for environment in environments:
        if environment == '':
            efficiencies = [100 for i in range(len(occurences))]
        else:
            efficiencies = checker(url, params, headers, GET, delay, environment)
            if not efficiencies:
                for i in range(len(occurences)):
                    efficiencies.append(0)
        for i, efficiency in zip(range(len(efficiencies)), efficiencies):
            sortedEfficiencies[i][environment] = efficiency
    for efficiency, occurence in zip(sortedEfficiencies.values(), occurences.values()):
        occurence['score'] = efficiency
    return occurences
コード例 #4
0
def filterChecker(url, params, headers, GET, delay, occurences, timeout,
                  encoding):
    positions = {}
    sortedEfficiencies = {}
    # adding < > to environments anyway because they can be used in all contexts
    environments = set(['<', '>'])
    for i in range(len(occurences)):
        sortedEfficiencies[i] = {}
    for i, occurence in zip(range(len(occurences)), occurences.values()):
        environments.add(occurence['context'][1])
        location = occurence['context'][0]
        try:
            attributeName = list(occurence['context'][3].keys())[0]
            attributeValue = list(occurence['context'][3].values())[0]
        except AttributeError:
            attributeName = occurence['context'][3]
        positions[str(i)] = occurence['position']
        if location == 'comment':
            environments.add('-->')
        elif location == 'script':
            environments.add('</scRipT/>')
        elif attributeName == 'srcdoc':  # srcdoc attribute accepts html data with html entity encoding
            environments.add('&lt;')  # so let's add the html entity
            environments.add('&gt;')  # encoded versions of < and >

    for environment in environments:
        if environment == '':
            efficiencies = [100 for i in range(len(occurences))]
        else:
            efficiencies = checker(url, params, headers, GET, delay,
                                   environment, positions, timeout, encoding)
            if len(efficiencies) < len(occurences):
                for i in range(len(occurences) - len(efficiencies)):
                    efficiencies.append(0)
        for i, efficiency in zip(range(len(efficiencies)), efficiencies):
            try:
                sortedEfficiencies[i][environment] = efficiency
            except:
                sortedEfficiencies[i] = {}
                sortedEfficiencies[i][environment] = efficiency
    for efficiency, occurence in zip(sortedEfficiencies.values(),
                                     occurences.values()):
        occurence['score'] = efficiency
    return occurences
コード例 #5
0
ファイル: filterChecker.py プロジェクト: robdollard/XSStrike
def filterChecker(url, params, headers, GET, delay, occurences, timeout, encoding):
    positions = {}
    sortedEfficiencies = {}
    # adding < > to environments anyway because they can be used in all contexts
    environments = set(['<', '>'])
    for i in range(len(occurences)):
        sortedEfficiencies[i] = {}
    for i, occurence in zip(range(len(occurences)), occurences.values()):
        environments.add(occurence['context'][1])
        location = occurence['context'][0]
        try:
            attributeName = list(occurence['context'][3].keys())[0]
            attributeValue = list(occurence['context'][3].values())[0]
        except AttributeError:
            attributeName = occurence['context'][3]
        positions[str(i)] = occurence['position']
        if location == 'comment':
            environments.add('-->')
        elif location == 'script':
            environments.add('</scRipT/>')
        elif attributeName == 'srcdoc':  # srcdoc attribute accepts html data with html entity encoding
            environments.add('&lt;')  # so let's add the html entity
            environments.add('&gt;')  # encoded versions of < and >

    for environment in environments:
        if environment == '':
            efficiencies = [100 for i in range(len(occurences))]
        else:
            efficiencies = checker(
                url, params, headers, GET, delay, environment, positions, timeout, encoding)
            if len(efficiencies) < len(occurences):
                for i in range(len(occurences) - len(efficiencies)):
                    efficiencies.append(0)
        for i, efficiency in zip(range(len(efficiencies)), efficiencies):
            try:
                sortedEfficiencies[i][environment] = efficiency
            except:
                sortedEfficiencies[i] = {}
                sortedEfficiencies[i][environment] = efficiency
    for efficiency, occurence in zip(sortedEfficiencies.values(), occurences.values()):
        occurence['score'] = efficiency
    return occurences
コード例 #6
0
def filterChecker(url, params, headers, GET, delay, occurences, timeout,
                  encoding):
    positions = occurences.keys()
    sortedEfficiencies = {}
    # adding < > to environments anyway because they can be used in all contexts
    environments = set(["<", ">"])
    for i in range(len(positions)):
        sortedEfficiencies[i] = {}
    for i in occurences:
        occurences[i]["score"] = {}
        context = occurences[i]["context"]
        if context == "comment":
            environments.add("-->")
        elif context == "script":
            environments.add(occurences[i]["details"]["quote"])
            environments.add("</scRipT/>")
        elif context == "attribute":
            if occurences[i]["details"]["type"] == "value":
                if (
                        occurences[i]["details"]["name"] == "srcdoc"
                ):  # srcdoc attribute accepts html data with html entity encoding
                    environments.add("&lt;")  # so let's add the html entity
                    environments.add("&gt;")  # encoded versions of < and >
            if occurences[i]["details"]["quote"]:
                environments.add(occurences[i]["details"]["quote"])
    for environment in environments:
        if environment:
            efficiencies = checker(
                url,
                params,
                headers,
                GET,
                delay,
                environment,
                positions,
                timeout,
                encoding,
            )
            efficiencies.extend([0] * (len(occurences) - len(efficiencies)))
            for occurence, efficiency in zip(occurences, efficiencies):
                occurences[occurence]["score"][environment] = efficiency
    return occurences
コード例 #7
0
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {},
                                 headers, GET, delay, timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    logger.debug('Scan target: {}'.format(target))
    response = requester(target, {}, headers, GET, delay, timeout).text
    if not skipDOM:
        logger.run('Checking for DOM vulnerabilities')
        highlighted = dom(response)
        if highlighted:
            logger.good('Potentially vulnerable objects found')
            logger.red_line(level='good')
            for line in highlighted:
                logger.no_format(line, level='good')
            logger.red_line(level='good')
    host = urlparse(target).netloc  # Extracts host out of the url
    logger.debug('Host to scan: {}'.format(host))
    url = getUrl(target, GET)
    logger.debug('Url to scan: {}'.format(url))
    params = getParams(target, paramData, GET)
    logger.debug_json('Scan parameters:', params)
    if find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        logger.error('No parameters to test.')
        quit()
    WAF = wafDetector(
        url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
    if WAF:
        logger.error('WAF detected: %s%s%s' % (green, WAF, end))
    else:
        logger.good('WAF Status: %sOffline%s' % (green, end))

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        logger.info('Testing parameter: %s' % paramName)
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        parsedResponse = htmlParser(response, encoding)
        occurences = parsedResponse[0]
        logger.debug('Scan occurences: {}'.format(occurences))
        positions = parsedResponse[1]
        logger.debug('Scan positions: {}'.format(positions))
        if not occurences:
            logger.error('No reflection found')
            continue
        else:
            logger.info('Reflections found: %i' % len(occurences))

        logger.run('Analysing reflections')
        efficiencies = filterChecker(
            url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
        logger.debug('Scan efficiencies: {}'.format(efficiencies))
        logger.run('Generating payloads')
        vectors = generator(occurences, response.text)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            logger.error('No vectors were crafted.')
            continue
        logger.info('Payloads generated: %i' % total)
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                if core.config.globalVariables['path']:
                    vect = vect.replace('/', '%2F')
                loggerVector = vect
                progress += 1
                logger.run('Progress: %i/%i\r' % (progress, total))
                if confidence == 10:
                    if not GET:
                        vect = unquote(vect)
                    efficiencies = checker(
                        url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding)
                    if not efficiencies:
                        for i in range(len(occurences)):
                            efficiencies.append(0)
                    bestEfficiency = max(efficiencies)
                    if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95):
                        logger.red_line()
                        logger.good('Payload: %s' % loggerVector)
                        logger.info('Efficiency: %i' % bestEfficiency)
                        logger.info('Confidence: %i' % confidence)
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
                    elif bestEfficiency > minEfficiency:
                        logger.red_line()
                        logger.good('Payload: %s' % loggerVector)
                        logger.info('Efficiency: %i' % bestEfficiency)
                        logger.info('Confidence: %i' % confidence)
                else:
                    if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()):
                        continue
                    vect = unquote(vect)
                    if encoding:
                        paramsCopy[paramName] = encoding(vect)
                    else:
                        paramsCopy[paramName] = vect
                    response = requester(url, paramsCopy, headers, GET, delay, timeout).text
                    success = browserEngine(response)
                    if success:
                        logger.red_line()
                        logger.good('Payload: %s' % loggerVector)
                        logger.info('Efficiency: %i' % 100)
                        logger.info('Confidence: %i' % 10)
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
        logger.no_format('')
コード例 #8
0
ファイル: xsstrike.py プロジェクト: w7yuu/XSStrike
def singleTarget(target, paramData, verbose, encoding):
    if paramData:
        GET, POST = False, True
    else:
        GET, POST = True, False
    # If the user hasn't supplied the root url with http(s), we will handle it
    if target.startswith('http'):
        target = target
    else:
        try:
            response = requester('https://' + target, {}, headers, GET, delay,
                                 timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    response = requester(target, {}, headers, GET, delay, timeout).text
    if not skipDOM:
        print('%s Checking for DOM vulnerabilities' % run)
        highlighted = dom(response)
        if highlighted:
            print('%s Potentially vulnerable objects found' % good)
            print(red + ('-' * 60) + end)
            for line in highlighted:
                print(line)
            print(red + ('-' * 60) + end)
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    verboseOutput(params, 'params', verbose)
    if args.find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        quit()
    WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET,
                      delay, timeout)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    if fuzz:
        for paramName in params.keys():
            print('%s Fuzzing parameter: %s' % (info, paramName))
            paramsCopy = copy.deepcopy(params)
            paramsCopy[paramName] = xsschecker
            fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF,
                   encoding)
        quit()

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        print('%s Testing parameter: %s' % (info, paramName))
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        parsedResponse = htmlParser(response, encoding)
        occurences = parsedResponse[0]
        verboseOutput(occurences, 'occurences', verbose)
        positions = parsedResponse[1]
        verboseOutput(positions, 'positions', verbose)
        if not occurences:
            print('%s No reflection found' % bad)
            continue
        else:
            print('%s Reflections found: %s' % (info, len(occurences)))
        print('%s Analysing reflections' % run)
        efficiencies = filterChecker(url, paramsCopy, headers, GET, delay,
                                     occurences, timeout, encoding)
        verboseOutput(efficiencies, 'efficiencies', verbose)
        print('%s Generating payloads' % run)
        vectors = generator(occurences, response.text)
        verboseOutput(vectors, 'vectors', verbose)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            print('%s No vectors were crafted' % bad)
            continue
        print('%s Payloads generated: %i' % (info, total))
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                progress += 1
                print('%s Payloads tried [%i/%i]' % (run, progress, total),
                      end='\r')
                if not GET:
                    vect = unquote(vect)
                efficiencies = checker(url, paramsCopy, headers, GET, delay,
                                       vect, positions, timeout, encoding)
                if not efficiencies:
                    for i in range(len(occurences)):
                        efficiencies.append(0)
                bestEfficiency = max(efficiencies)
                if bestEfficiency == 100 or (vect[0] == '\\'
                                             and bestEfficiency >= 95):
                    print(('%s-%s' % (red, end)) * 60)
                    print('%s Payload: %s' % (good, vect))
                    print('%s Efficiency: %i' % (info, bestEfficiency))
                    print('%s Confidence: %i' % (info, confidence))
                    if not args.skip:
                        choice = input(
                            '%s Would you like to continue scanning? [y/N] ' %
                            que).lower()
                        if choice != 'y':
                            quit()
                elif bestEfficiency > minEfficiency:
                    print(('%s-%s' % (red, end)) * 60)
                    print('%s Payload: %s' % (good, vect))
                    print('%s Efficiency: %i' % (info, bestEfficiency))
                    print('%s Confidence: %i' % (info, confidence))
コード例 #9
0
def singleTarget(target, paramData):
    if paramData:
        GET, POST = False, True
    else:
        GET, POST = True, False
    # If the user hasn't supplied the root url with http(s), we will handle it
    if target.startswith('http'):
        target = target
    else:
        try:
            response = requests.get('https://' + target)
            target = 'https://' + target
        except:
            target = 'http://' + target
    try:
        response = requests.get(target).text
        if not skipDOM:
            print('%s Checking for DOM vulnerabilities' % run)
            if dom(response):
                print('%s Potentially vulnerable objects found' % good)
    except Exception as e:
        print('%s Unable to connect to the target' % bad)
        print('%s Error: %s' % (bad, e))
        quit()
    host = urlparse(target).netloc  # Extracts host out of the url
    url = getUrl(target, paramData, GET)
    params = getParams(target, paramData, GET)
    if args.find:
        params = arjun(url, GET, headers, delay)
    if not params:
        quit()
    WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET,
                      delay)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    if fuzz:
        for paramName in params.keys():
            print('%s Fuzzing parameter: %s' % (info, paramName))
            paramsCopy = copy.deepcopy(params)
            paramsCopy[paramName] = xsschecker
            fuzzer(url, paramsCopy, headers, GET, delay, WAF)
        quit()

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        print('%s Testing parameter: %s' % (info, paramName))
        paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay).text
        occurences = htmlParser(response)
        if not occurences:
            print('%s No reflection found' % bad)
            continue
        else:
            print('%s Reflections found: %s' % (info, len(occurences)))
        print('%s Analysing reflections' % run)
        efficiencies = filterChecker(url, paramsCopy, headers, GET, delay,
                                     occurences)
        print('%s Generating payloads' % run)
        vectors = generator(occurences, response)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            print('%s No vectors were crafted' % bad)
            continue
        print('%s Payloads generated: %i' % (info, total))
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                progress += 1
                print('%s Payloads tried [%i/%i]' % (run, progress, total),
                      end='\r')
                if not GET:
                    vect = unquote(vect)
                efficiencies = checker(url, paramsCopy, headers, GET, delay,
                                       vect)
                if not efficiencies:
                    for i in range(len(occurences)):
                        efficiencies.append(0)
                bestEfficiency = max(efficiencies)
                if bestEfficiency == 100 or (vect[0] == '\\'
                                             and bestEfficiency >= 95):
                    print(('%s-%s' % (red, end)) * 60)
                    print('%s Payload: %s' % (good, vect))
                    print('%s Efficiency: %i' % (info, bestEfficiency))
                    print('%s Cofidence: %i' % (info, confidence))
                    if GET:
                        flatParams = flattenParams(paramName, paramsCopy, vect)
                        if '"' not in flatParams and '}' not in flatParams and not skipPOC:
                            webbrowser.open(url + flatParams)
                    choice = input(
                        '%s Would you like to continue scanning? [y/N] ' %
                        que).lower()
                    if choice != 'y':
                        quit()
                elif bestEfficiency > minEfficiency:
                    print(('%s-%s' % (red, end)) * 60)
                    print('%s Payload: %s' % (good, vect))
                    print('%s Efficiency: %i' % (info, bestEfficiency))
                    print('%s Cofidence: %i' % (info, confidence))
コード例 #10
0
ファイル: scan.py プロジェクト: CrimsonJacket/ProjektFAST
def scan(target, form, paramData, encoding, headers, delay, timeout, skipDOM,
         skip):
    GET, POST = (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {}, headers, GET, delay,
                                 timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    response = requester(target, {}, headers, GET, delay, timeout).text

    url = target + form + "/"

    params = {p: "" for p in paramData}

    vuln_params = {}

    for paramName in params.keys():
        logger.info(f"Target URL: {target+form}")
        logger.info(f"Target Param: {paramName}")

        paramsCopy = copy.deepcopy(params)

        paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)

        logger.debug(f"Response: {response.text}")

        occurences = htmlParser(response, encoding)
        positions = occurences.keys()
        logger.debug('Scan occurences: {}'.format(occurences))
        if not occurences:
            logger.error('No reflection found')
            continue
        else:
            logger.info('   - Reflections found: %i' % len(occurences))

        efficiencies = filterChecker(url, paramsCopy, headers, GET, delay,
                                     occurences, timeout, encoding)
        logger.debug('Scan efficiencies: {}'.format(efficiencies))
        vectors = generator(occurences, response.text)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            logger.error('No vectors were crafted.')
            continue
        # logger.info('Payloads generated: %i' % total)
        progress = 0

        payloads_used = []

        for confidence, vects in vectors.items():
            for vect in vects:
                loggerVector = vect
                progress += 1
                logger.run(
                    f'Analysing Reflections - Progress: {progress}/{total}\r')
                if not GET:
                    vect = unquote(vect)
                efficiencies = checker(url, paramsCopy, headers, GET, delay,
                                       vect, positions, timeout, encoding)
                if not efficiencies:
                    for i in range(len(occurences)):
                        efficiencies.append(0)
                bestEfficiency = max(efficiencies)
                if bestEfficiency == 100 or (vect[0] == '\\'
                                             and bestEfficiency >= 95):
                    payloads_used.append(loggerVector)

        logger.red_line(level='good')
        vuln_params.update({paramName: payloads_used})

    # logger.info(f"{url, vuln_params}")
    return url, vuln_params
コード例 #11
0
ファイル: xsstrike.py プロジェクト: spongyfrog/XSStrike
 total = 0
 for v in vectors.values():
     total += len(v)
 if total == 0:
     print('%s No vectors were crafted' % bad)
     continue
 print('%s Payloads generated: %i' % (info, total))
 progress = 0
 for confidence, vects in vectors.items():
     for vect in vects:
         progress += 1
         print('%s Payloads tried [%i/%i]' % (run, progress, total),
               end='\r')
         if not GET:
             vect = unquote(vect)
         efficiencies = checker(url, paramsCopy, headers, GET, delay, vect)
         if not efficiencies:
             for i in range(len(occurences)):
                 efficiencies.append(0)
         bestEfficiency = max(efficiencies)
         if bestEfficiency == 100 or (vect[0] == '\\'
                                      and bestEfficiency >= 95):
             print(('%s-%s' % (red, end)) * 60)
             print('%s Payload: %s' % (good, vect))
             print('%s Efficiency: %i' % (info, bestEfficiency))
             print('%s Cofidence: %i' % (info, confidence))
             if GET:
                 flatParams = flattenParams(paramName, paramsCopy, vect)
                 if '"' not in flatParams and '}' not in flatParams:
                     webbrowser.open(url + flatParams)
             choice = input(
コード例 #12
0
ファイル: scan.py プロジェクト: riszkymf/XSStrike
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find,
         skip):
    reports = {}
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {}, headers, GET, delay,
                                 timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    logger.debug('Scan target: {}'.format(target))
    response = requester(target, {}, headers, GET, delay, timeout).text
    vulnerable_code = list()
    if not skipDOM:
        logger.run('Checking for DOM vulnerabilities')
        highlighted = dom(response)
        if highlighted:
            logger.good('Potentially vulnerable objects found')
            logger.red_line(level='good')
            for line in highlighted:
                vulnerable_code.append(line)
                logger.no_format(line, level='good')
            logger.red_line(level='good')
    potential_vulnerabilities = [{"code": vulnerable_code}]
    reports["potential_vulnerabilities"] = potential_vulnerabilities
    host = urlparse(target).netloc  # Extracts host out of the url
    logger.debug('Host to scan: {}'.format(host))
    url = getUrl(target, GET)
    logger.debug('Url to scan: {}'.format(url))
    params = getParams(target, paramData, GET)
    logger.debug_json('Scan parameters:', params)
    if find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        logger.error('No parameters to test.')
        reports['parameter_reports'] = "No parameters to test."
        return reports
    WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET,
                      delay, timeout)
    if WAF:
        logger.error('WAF detected: %s%s%s' % (green, WAF, end))
    else:
        logger.good('WAF Status: %sOffline%s' % (green, end))
    paramReports = list()
    for paramName in params.keys():
        paramReport = {"parameter": None, "encoding": None, "reflection": None}
        paramReport['parameter'] = paramName
        paramsCopy = copy.deepcopy(params)
        logger.info('Testing parameter: %s' % paramName)
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
            paramReport['encoding'] = str(encoding)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        occurences = htmlParser(response, encoding)
        positions = occurences.keys()
        logger.debug('Scan occurences: {}'.format(occurences))
        if not occurences:
            logger.error('No reflection found')
            paramReport['reflection'] = "No reflection found"
            continue
        else:
            logger.info('Reflections found: %i' % len(occurences))
            paramReport['reflection'] = len(occurences)

        logger.run('Analysing reflections')
        efficiencies = filterChecker(url, paramsCopy, headers, GET, delay,
                                     occurences, timeout, encoding)
        logger.debug('Scan efficiencies: {}'.format(efficiencies))
        logger.run('Generating payloads')
        vectors = generator(occurences, response.text)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            logger.error('No vectors were crafted.')
            continue
        logger.info('Payloads generated: %i' % total)
        paramReport['payloads_generated'] = total
        payloadLists = list()
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                payloaditem = {}
                if core.config.globalVariables['path']:
                    vect = vect.replace('/', '%2F')
                loggerVector = vect
                progress += 1
                logger.run('Progress: %i/%i\r' % (progress, total))
                if not GET:
                    vect = unquote(vect)
                try:
                    efficiencies = checker(url, paramsCopy, headers, GET,
                                           delay, vect, positions, timeout,
                                           encoding)
                except Exception as e:
                    payloaditem['error'] = str(e)
                    print("ERROR")
                    continue
                if not efficiencies:
                    for i in range(len(occurences)):
                        efficiencies.append(0)
                bestEfficiency = max(efficiencies)
                if bestEfficiency == 100 or (vect[0] == '\\'
                                             and bestEfficiency >= 95):
                    logger.red_line()
                    logger.good('Payload: %s' % loggerVector)
                    logger.info('Efficiency: %i' % bestEfficiency)
                    logger.info('Confidence: %i' % confidence)
                    if not skip:
                        choice = input(
                            '%s Would you like to continue scanning? [y/N] ' %
                            que).lower()
                        if choice != 'y':
                            quit()
                elif bestEfficiency > minEfficiency:
                    logger.red_line()
                    logger.good('Payload: %s' % loggerVector)
                    logger.info('Efficiency: %i' % bestEfficiency)
                    logger.info('Confidence: %i' % confidence)
                payloaditem['payload'] = loggerVector
                payloaditem['efficiency'] = bestEfficiency
                payloaditem['confidence'] = confidence
                payloadLists.append(payloaditem)
                print(payloaditem)
        paramReport['payload_reports'] = payloadLists
        logger.no_format('')
    reports['parameter_reports'] = paramReport
    return reports
コード例 #13
0
ファイル: scan.py プロジェクト: m00tiny/V3n0M-Scanner
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find,
         skip):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith("http"):
        try:
            response = requester("https://" + target, {}, headers, GET, delay,
                                 timeout)
            target = "https://" + target
        except:
            target = "http://" + target
    logger.debug("Scan target: {}".format(target))
    response = requester(target, {}, headers, GET, delay, timeout).text

    if not skipDOM:
        logger.run("Checking for DOM vulnerabilities")
        highlighted = dom(response)
        if highlighted:
            logger.good("Potentially vulnerable objects found")
            logger.red_line(level="good")
            for line in highlighted:
                logger.no_format(line, level="good")
            logger.red_line(level="good")
    host = urlparse(target).netloc  # Extracts host out of the url
    logger.debug("Host to scan: {}".format(host))
    url = getUrl(target, GET)
    logger.debug("Url to scan: {}".format(url))
    params = getParams(target, paramData, GET)
    logger.debug_json("Scan parameters:", params)
    if find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        logger.error("No parameters to test.")
        quit()
    WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET,
                      delay, timeout)
    if WAF:
        logger.error("WAF detected: %s%s%s" % (green, WAF, end))
    else:
        logger.good("WAF Status: %sOffline%s" % (green, end))

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        logger.info("Testing parameter: %s" % paramName)
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        occurences = htmlParser(response, encoding)
        positions = occurences.keys()
        logger.debug("Scan occurences: {}".format(occurences))
        if not occurences:
            logger.error("No reflection found")
            continue
        else:
            logger.info("Reflections found: %i" % len(occurences))

        logger.run("Analysing reflections")
        efficiencies = filterChecker(url, paramsCopy, headers, GET, delay,
                                     occurences, timeout, encoding)
        logger.debug("Scan efficiencies: {}".format(efficiencies))
        logger.run("Generating payloads")
        vectors = generator(occurences, response.text)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            logger.error("No vectors were crafted.")
            continue
        logger.info("Payloads generated: %i" % total)
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                if core.config.globalVariables["path"]:
                    vect = vect.replace("/", "%2F")
                loggerVector = vect
                progress += 1
                logger.run("Progress: %i/%i\r" % (progress, total))
                if not GET:
                    vect = unquote(vect)
                efficiencies = checker(
                    url,
                    paramsCopy,
                    headers,
                    GET,
                    delay,
                    vect,
                    positions,
                    timeout,
                    encoding,
                )
                if not efficiencies:
                    for i in range(len(occurences)):
                        efficiencies.append(0)
                bestEfficiency = max(efficiencies)
                if bestEfficiency == 100 or (vect[0] == "\\"
                                             and bestEfficiency >= 95):
                    logger.red_line()
                    logger.good("Payload: %s" % loggerVector)
                    logger.info("Efficiency: %i" % bestEfficiency)
                    logger.info("Confidence: %i" % confidence)
                    if not skip:
                        choice = input(
                            "%s Would you like to continue scanning? [y/N] " %
                            que).lower()
                        if choice != "y":
                            quit()
                elif bestEfficiency > minEfficiency:
                    logger.red_line()
                    logger.good("Payload: %s" % loggerVector)
                    logger.info("Efficiency: %i" % bestEfficiency)
                    logger.info("Confidence: %i" % confidence)
        logger.no_format("")
コード例 #14
0
ファイル: scan.py プロジェクト: robdollard/XSStrike
def scan(target, paramData, verbose, encoding, headers, delay, timeout, skipDOM, find, skip):
    GET, POST = (False, True) if paramData else (True, False)
    # If the user hasn't supplied the root url with http(s), we will handle it
    if not target.startswith('http'):
        try:
            response = requester('https://' + target, {},
                                 headers, GET, delay, timeout)
            target = 'https://' + target
        except:
            target = 'http://' + target
    response = requester(target, {}, headers, GET, delay, timeout).text
    if not skipDOM:
        print('%s Checking for DOM vulnerabilities' % run)
        highlighted = dom(response)
        if highlighted:
            print('%s Potentially vulnerable objects found' % good)
            print(red + ('-' * 60) + end)
            for line in highlighted:
                print(line)
            print(red + ('-' * 60) + end)
    host = urlparse(target).netloc  # Extracts host out of the url
    verboseOutput(host, 'host', verbose)
    url = getUrl(target, GET)
    verboseOutput(url, 'url', verbose)
    params = getParams(target, paramData, GET)
    verboseOutput(params, 'params', verbose)
    if find:
        params = arjun(url, GET, headers, delay, timeout)
    if not params:
        print('%s No parameters to test.' % bad)
        quit()
    WAF = wafDetector(
        url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout)
    if WAF:
        print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
    else:
        print('%s WAF Status: %sOffline%s' % (good, green, end))

    for paramName in params.keys():
        paramsCopy = copy.deepcopy(params)
        print('%s Testing parameter: %s' % (info, paramName))
        if encoding:
            paramsCopy[paramName] = encoding(xsschecker)
        else:
            paramsCopy[paramName] = xsschecker
        response = requester(url, paramsCopy, headers, GET, delay, timeout)
        parsedResponse = htmlParser(response, encoding)
        occurences = parsedResponse[0]
        verboseOutput(occurences, 'occurences', verbose)
        positions = parsedResponse[1]
        verboseOutput(positions, 'positions', verbose)
        if not occurences:
            print('%s No reflection found' % bad)
            continue
        else:
            print('%s Reflections found: %s' % (info, len(occurences)))
        print('%s Analysing reflections' % run)
        efficiencies = filterChecker(
            url, paramsCopy, headers, GET, delay, occurences, timeout, encoding)
        verboseOutput(efficiencies, 'efficiencies', verbose)
        print('%s Generating payloads' % run)
        vectors = generator(occurences, response.text)
        verboseOutput(vectors, 'vectors', verbose)
        total = 0
        for v in vectors.values():
            total += len(v)
        if total == 0:
            print('%s No vectors were crafted' % bad)
            continue
        print('%s Payloads generated: %i' % (info, total))
        progress = 0
        for confidence, vects in vectors.items():
            for vect in vects:
                if core.config.globalVariables['path']:
                    vect = vect.replace('/', '%2F')
                printVector = vect
                progress += 1
                print ('%s Progress: %i/%i' % (run, progress, total), end='\r')
                if confidence == 10:
                    if not GET:
                        vect = unquote(vect)
                    efficiencies = checker(
                        url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding)
                    if not efficiencies:
                        for i in range(len(occurences)):
                            efficiencies.append(0)
                    bestEfficiency = max(efficiencies)
                    if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95):
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, bestEfficiency))
                        print('%s Confidence: %i' % (info, confidence))
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
                    elif bestEfficiency > minEfficiency:
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, bestEfficiency))
                        print('%s Confidence: %i' % (info, confidence))
                else:
                    if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()):
                        continue
                    vect = unquote(vect)
                    if encoding:
                        paramsCopy[paramName] = encoding(vect)
                    else:
                        paramsCopy[paramName] = vect
                    response = requester(url, paramsCopy, headers, GET, delay, timeout).text
                    success = browserEngine(response)
                    if success:
                        print(('%s-%s' % (red, end)) * 60)
                        print('%s Payload: %s' % (good, printVector))
                        print('%s Efficiency: %i' % (info, 100))
                        print('%s Confidence: %i' % (info, 10))
                        if not skip:
                            choice = input(
                                '%s Would you like to continue scanning? [y/N] ' % que).lower()
                            if choice != 'y':
                                quit()
        print ('')