예제 #1
0
def result():
    global text
    global final_transcript
    data = []
    text = text.lower()
    final_transcript = final_transcript.lower()
    text = text.translate(str.maketrans('', '', string.punctuation))
    final_transcript = final_transcript.translate(str.maketrans('', '', string.punctuation))
    grade = str(int((1 - stringdist.rdlevenshtein_norm(text, final_transcript))*100)) + "%"
    print(grade)
    return grade
예제 #2
0
 def rdlevenshtein_norm_to_array(string, array_of_strings):
     g = lambda string_2: rdlevenshtein_norm(string, string_2)
     return list(map(g, array_of_strings))
예제 #3
0
 def test_rdlevenshtein_norm_transposition(self):
     """It should return right normalized dist when transposing involved"""
     self.assertEqual(rdlevenshtein_norm('abced', 'abcde'), 0.2)
예제 #4
0
 def test_rdlevenshtein_norm_substitution(self):
     """It should return right normalized dist when substitution involved"""
     self.assertEqual(rdlevenshtein_norm('abcd!', 'abcde'), 0.2)
예제 #5
0
 def test_rdlevenshtein_norm_matching(self):
     """It should return right normalized dist when strings match"""
     self.assertEqual(rdlevenshtein_norm('abcde', 'abcde'), 0)
예제 #6
0
파일: Analysis.py 프로젝트: yskPs/XSRFProbe
def Analysis():
    '''
    The main idea behind this is to observe and analyse
           the patterns in which the CSRF tokens
                  are generated by server.
    '''
    ctr = 0  # Counter variable set to 0
    # Checking if the no of tokens is greater than 1
    if len(REQUEST_TOKENS) > 1:
        verbout(color.RED, '\n +--------------+')
        verbout(color.RED, ' |   Analysis   |')
        verbout(color.RED, ' +--------------+\n')
        print(GR + 'Proceeding for post-scan analysis of tokens gathered...')
        verbout(
            G, 'A total of %s tokens was discovered during the scan' %
            (len(REQUEST_TOKENS)))
        # The idea behind this is to generate all possible combinations (not
        # considering permutations) from the given list of discovered tokens
        # and generate anti-CSRF token generation pattern.
        for tokenx1, tokenx2 in itertools.combinations(REQUEST_TOKENS, 2):
            try:
                verbout(
                    GR,
                    'Analysing 2 Anti-CSRF Tokens from gathered requests...')
                verbout(color.CYAN,
                        ' [+] First Token: ' + color.BLUE + tokenx1)
                verbout(
                    color.ORANGE, ' [+] Shannon Entropy: ' + color.GREEN +
                    '%s' % (calcEntropy(tokenx1)))
                verbout(color.CYAN,
                        ' [+] Second Token: ' + color.BLUE + tokenx2)
                verbout(
                    color.ORANGE, ' [+] Shannon Entropy: ' + color.GREEN +
                    '%s' % (calcEntropy(tokenx2)))
                # Calculating the edit distance via Damerau Levenshtein algorithm
                m = stringdist.rdlevenshtein(tokenx1, tokenx2)
                verbout(
                    color.CYAN, ' [+] Edit Distance Calculated: ' +
                    color.GREY + str(m) + '%')
                # Now its time to detect the alignment ratio
                n = stringdist.rdlevenshtein_norm(tokenx1, tokenx2)
                verbout(
                    color.CYAN,
                    ' [+] Alignment Ratio Calculated: ' + color.GREY + str(n))
                # If both tokens are same, then
                if len(tokenx1) == len(tokenx2):
                    verbout(
                        C, 'Token length calculated is same: ' + color.ORANGE +
                        'Each %s bytes' % len(byteString(tokenx1)))
                else:
                    verbout(
                        C, 'Token length calculated is different: ' +
                        color.ORANGE + 'By %s bytes' %
                        (len(byteString(tokenx1)) - len(byteString(tokenx2))))
                time.sleep(0.5)
                # In my experience with web security assessments, often the Anti-CSRF token
                # is composed of two parts, one of them remains static while the other one dynamic.
                #
                # For example, if the Anti CSRF Tokens “837456mzy29jkd911139” for one request, the
                # other is “837456mzy29jkd337221”, “837456mzy29jkd” part of the token remains same
                # in both requests.
                #
                # The main idea behind this is to detect the static and dynamic part via DL Algorithm
                # as discussed above by calculating edit distance.
                p = sameSequence(tokenx1, tokenx2)
                tokenx01 = tokenx1.replace(p, '')
                tokenx02 = tokenx2.replace(p, '')
                if n == 0.5 or m == len(tokenx1) / 2:
                    verbout(
                        GR,
                        'The tokens are composed of 2 parts (one static and other dynamic)... '
                    )
                    verbout(
                        C, 'Static Part : ' + color.GREY + p + color.END +
                        ' | Length: ' + color.CYAN + str(len(p)))
                    verbout(
                        O, 'Dynamic Part of Token 0x1: ' + color.GREY +
                        tokenx01 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx01)))
                    verbout(
                        O, 'Dynamic Part of Token 0x2: ' + color.GREY +
                        tokenx02 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx02)))
                    if len(len(tokenx1) / 2) <= 6:
                        verbout(
                            color.RED,
                            ' [-] Post-Analysis reveals that token might be ' +
                            color.BR + ' VULNERABLE ' + color.END + '!')
                        print(color.RED +
                              ' [+] Possible CSRF Vulnerability Detected!')
                        print(color.ORANGE + ' [!] Vulnerability Type: ' +
                              color.BR + ' Weak Dynamic Part of Tokens ' +
                              color.END)
                        print(color.GREY + ' [+] Tokens can easily be ' +
                              color.RED + 'Forged by Bruteforcing/Guessing' +
                              color.END + '!\n')
                        VulnLogger(
                            'Analysis',
                            'Tokens can easily be Forged by Bruteforcing/Guessing.',
                            '[i] Token 1: ' + tokenx1 + '\n[i] Token 2: ' +
                            tokenx2)
                elif n < 0.5 or m < len(tokenx1) / 2:
                    verbout(
                        R, 'Token distance calculated is ' + color.RED +
                        'less than 0.5!')
                    verbout(
                        C, 'Static Part : ' + color.GREY + p + color.END +
                        ' | Length: ' + color.CYAN + str(len(p)))
                    verbout(
                        O, 'Dynamic Part of Token 0x1: ' + color.GREY +
                        tokenx01 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx01)))
                    verbout(
                        O, 'Dynamic Part of Token 0x2: ' + color.GREY +
                        tokenx02 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx02)))
                    verbout(
                        color.RED,
                        ' [-] Post-Analysis reveals that token might be ' +
                        color.BR + ' VULNERABLE ' + color.END + '!')
                    print(color.GREEN +
                          ' [+] Possible CSRF Vulnerability Detected!')
                    print(color.ORANGE + ' [!] Vulnerability Type: ' +
                          color.BR + ' Weak Dynamic Part of Tokens ' +
                          color.END)
                    print(color.GREY + ' [+] Tokens can easily be ' +
                          color.RED + 'Forged by Bruteforcing/Guessing' +
                          color.END + '!\n')
                    VulnLogger(
                        'Analysis',
                        'Tokens can easily be Forged by Bruteforcing/Guessing.',
                        '[i] Token 1: ' + tokenx1 + '\n[i] Token 2: ' +
                        tokenx2)
                else:
                    verbout(
                        R, 'Token distance calculated is ' + color.GREEN +
                        'greater than 0.5!')
                    verbout(
                        C, 'Static Part : ' + color.GREY + p + color.END +
                        ' | Length: ' + color.CYAN + str(len(p)))
                    verbout(
                        O, 'Dynamic Part of Token 0x1: ' + color.GREY +
                        tokenx01 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx01)))
                    verbout(
                        O, 'Dynamic Part of Token 0x2: ' + color.GREY +
                        tokenx02 + color.END + ' | Length: ' + color.CYAN +
                        str(len(tokenx02)))
                    verbout(
                        color.GREEN,
                        ' [+] Post-Analysis reveals that tokens are ' +
                        color.BG + ' NOT VULNERABLE ' + color.END + '!')
                    print(color.ORANGE + ' [!] Vulnerability Mitigation: ' +
                          color.BG + ' Strong Dynamic Part of Tokens ' +
                          color.END)
                    print(color.GREY + ' [+] Tokens ' + color.GREEN +
                          'Cannot be Forged by Bruteforcing/Guessing' +
                          color.END + '!\n')
                    NovulLogger(
                        'Analysis',
                        'Tokens cannot be Forged by Bruteforcing/Guessing.')
                time.sleep(1)
            except KeyboardInterrupt:
                continue
        print(C + 'Post-Scan Analysis Completed!')
예제 #7
0
def fitness_(passwd, testWord):
    score = stringdist.rdlevenshtein_norm(passwd, testWord)
    return (1 - score) * 100
예제 #8
0
        sys.exit(0)
    with open(sys.argv[1], 'r') as sf:
        flows = json.load(sf)
        sites = {fl[0] for fl in flows}
        sites.update({fl[1] for fl in flows})

        with open(sys.argv[2], 'r') as lt:
            topo = yaml.load(lt)
            nets = [
                n for n in topo['nodes'] if topo['nodes'][n]['type'] == 'edge'
            ]

            site_map = {}
            for s in sites:
                dists = np.array(
                    [stringdist.rdlevenshtein_norm(s, n) for n in nets])
                i = dists.argmin()
                site_map[s] = nets[i]

            traffic_flows = [{
                'src_ip':
                random_ip(topo['nodes'][site_map[fl[0]]]['ip-prefixes']),
                'dst_ip':
                random_ip(topo['nodes'][site_map[fl[1]]]['ip-prefixes']),
                'start_time':
                fl[2],
                'end_time':
                fl[3],
                'volume':
                fl[4]
            } for fl in flows]