def Origin(url): """ Check if the remote web application verifies the Origin before processing the HTTP request. """ verbout(color.RED, '\n +-------------------------------------+') verbout(color.RED, ' | Origin Based Request Validation |') verbout(color.RED, ' +-------------------------------------+\n') # Make the request normally and get content verbout(O,'Making request on normal basis...') req0x01 = Get(url) global HEADER_VALUES # Set a fake Origin along with UA (pretending to be a # legitimate request from a browser) verbout(GR,'Setting generic headers...') gen_headers = HEADER_VALUES gen_headers['Origin'] = ORIGIN_URL # We put the cookie in request, if cookie supplied :D if COOKIE_VALUE: gen_headers['Cookie'] = ','.join(cookie for cookie in COOKIE_VALUE) # Make the request with different Origin header and get the content verbout(O,'Making request with '+color.CYAN+'Tampered Origin Header'+color.END+'...') req0x02 = Get(url, headers=gen_headers) HEADER_VALUES.pop('Origin', None) # Comparing the length of the requests' responses. If both content # lengths are same, then the site actually does not validate Origin # before processing the HTTP request which makes the site more # vulnerable to CSRF attacks. # # IMPORTANT NOTE: I'm aware that checking for the Origin header does # NOT protect the application against all cases of CSRF, but it's a # very good first step. In order to exploit a CSRF in an application # that protects using this method an intruder would have to identify # other vulnerabilities, such as XSS or open redirects, in the same # domain. # # TODO: This algorithm has lots of room for improvement if len(req0x01.content) != len(req0x02.content): verbout(color.GREEN,' [+] Endoint '+color.ORANGE+'Origin Validation'+color.GREEN+' Present!') print(color.GREEN+' [-] Heuristics reveal endpoint might be '+color.BG+' NOT VULNERABLE '+color.END+'...') print(color.ORANGE+' [+] Mitigation Method: '+color.BG+' Origin Based Request Validation '+color.END+'\n') NovulLogger(url, 'Presence of Origin Header based request Validation.') return True else: verbout(R,'Endpoint '+color.RED+'Origin Validation Not Present'+color.END+'!') verbout(R,'Heuristics reveal endpoint might be '+color.BY+' VULNERABLE '+color.END+' to Origin Based CSRFs...') print(color.CYAN+ ' [+] Possible CSRF Vulnerability Detected : '+color.GREY+url+'!') print(color.ORANGE+' [!] Possible Vulnerability Type: '+color.BY+' No Origin Based Request Validation '+color.END+'\n') VulnLogger(url, 'No Origin Header based request validation presence.', '[i] Response Headers: '+str(req0x02.headers)) return False
def SameSite(url): ''' This function parses and verifies the cookies with SameSite Flags. ''' verbout(color.RED, '\n +------------------------------------+') verbout(color.RED, ' | Cross Origin Cookie Validation |') verbout(color.RED, ' +------------------------------------+\n') # Some Flags we'd need later... foundx1 = 0x00 foundx2 = 0x00 foundx3 = 0x00 # Step 1: First we check that if the server returns any # SameSite flag on Cookies with the same Referer as the netloc verbout(color.GREY, ' [+] Lets examine how server reacts to same referer...') gen_headers = HEADER_VALUES gen_headers['User-Agent'] = USER_AGENT or RandomAgent() verbout(GR, 'Setting Referer header same as host...') # Setting the netloc as the referer for the first check. gen_headers['Referer'] = urlsplit(url).netloc if COOKIE_VALUE: for cook in COOKIE_VALUE: gen_headers['Cookie'] = cook getreq = Get(url, headers=gen_headers) # Making the request head = getreq.headers for h in head: #if search('cookie', h, I) or search('set-cookie', h, I): if 'Cookie'.lower() in h.lower(): verbout(G, 'Found cookie header value...') cookieval = head[h] verbout(color.ORANGE, ' [+] Cookie Received: ' + color.CYAN + str(cookieval)) m = cookieval.split(';') verbout(GR, 'Examining Cookie...') for q in m: if search('SameSite', q, I): verbout( G, 'SameSite Flag ' + color.ORANGE + ' detected on cookie!') foundx1 = 0x01 q = q.split('=')[1].strip() verbout(C, 'Cookie: ' + color.ORANGE + q) break else: foundx3 = 0x02 if foundx1 == 0x01: verbout( R, ' [+] Endpoint ' + color.ORANGE + 'SameSite Flag Cookie Validation' + color.END + ' Present!') # Step 2: Now we check security mechanisms when the Referer is # different, i.e. request originates from a different url other # than the host. (This time without the Cookie assigned) verbout( color.GREY, ' [+] Lets examine how server reacts to a fake external referer...') gen_headers = HEADER_VALUES gen_headers['User-Agent'] = USER_AGENT or RandomAgent( ) # Setting user-agents # Assigning a fake referer for the second check, but no cookie. gen_headers['Referer'] = REFERER_URL getreq = Get(url, headers=gen_headers) head = getreq.headers # Getting headers from requests for h in head: # If search('cookie', h, I) or search('set-cookie', h, I): if 'Cookie'.lower() in h.lower(): verbout(G, 'Found cookie header value...') cookieval = head[h] verbout(color.ORANGE, ' [+] Cookie Received: ' + color.CYAN + str(cookieval)) m = cookieval.split(';') verbout(GR, 'Examining Cookie...') for q in m: if search('SameSite', q, I): verbout( G, 'SameSite Flag ' + color.ORANGE + ' detected on cookie!') foundx2 = 0x01 q = q.split('=')[1].strip() verbout(C, 'Cookie: ' + color.ORANGE + q) break else: foundx3 = 0x02 if foundx1 == 0x01: verbout( R, ' [+] Endpoint ' + color.ORANGE + 'SameSite Flag Cookie Validation' + color.END + ' Present!') # Step 3: And finally comes the most important step. Lets see how # the site reacts to a valid cookie (ofc supplied by the user) coming # from a a different site, i.e Referer set to other than host. # This is the most crucial part of the detection. # # TODO: Improve the logic in detection. verbout( color.GREY, ' [+] Lets examine how server reacts to valid cookie from a different referer...' ) gen_headers = HEADER_VALUES gen_headers['User-Agent'] = USER_AGENT or RandomAgent() # Assigning a fake referer for third request, this time with cookie ;) gen_headers['Referer'] = REFERER_URL if COOKIE_VALUE: for cook in COOKIE_VALUE: gen_headers['Cookie'] = cook getreq = Get(url, headers=gen_headers) head = getreq.headers for h in head: # if search('cookie', h, I) or search('set-cookie', h, I): if 'Cookie'.lower() in h.lower(): verbout(G, 'Found cookie header value...') cookieval = head[h] verbout(color.ORANGE, ' [+] Cookie Received: ' + color.CYAN + str(cookieval)) m = cookieval.split(';') verbout(GR, 'Examining Cookie...') for q in m: if search('samesite', q.lower(), I): verbout( G, 'SameSite Flag ' + color.ORANGE + ' detected on cookie on Cross Origin Request!') foundx3 = 0x01 q = q.split('=')[1].strip() verbout(C, 'Cookie: ' + color.ORANGE + q) break else: foundx3 = 0x02 if foundx1 == 0x01: verbout( R, 'Endpoint ' + color.ORANGE + 'SameSite Flag Cookie Validation' + color.END + ' is Present!') if (foundx1 == 0x01 and foundx3 == 0x00) and (foundx2 == 0x00 or foundx2 == 0x01): print(color.GREEN + ' [+] Endpoint ' + color.BG + ' NOT VULNERABLE to ANY type of CSRF attacks! ' + color.END) print(color.GREEN + ' [+] Protection Method Detected : ' + color.BG + ' SameSite Flag on Cookies ' + color.END) NovulLogger(url, 'SameSite Flag set on Cookies on Cross-Origin Requests.') # If a SameSite flag is set on cookies, then the application is totally fool-proof # against CSRF attacks unless there is some XSS stuff on it. So for now the job of # this application is done. We need to confirm before we quit. oq = input(color.BLUE + ' [+] Continue scanning? (y/N) :> ') if oq.lower().startswith('n'): sys.exit('\n' + R + 'Shutting down XSRFProbe...\n') elif foundx1 == 0x02 and foundx2 == 0x02 and foundx3 == 0x02: print(color.GREEN + ' [+] Endpoint ' + color.BG + ' NOT VULNERABLE ' + color.END + color.GREEN + ' to CSRF attacks!') print(color.GREEN + ' [+] Type: ' + color.BG + ' No Cookie Set while Cross Origin Requests ' + color.END) NovulLogger(url, 'No cookie set on Cross-Origin Requests.') else: verbout( R, 'Endpoint ' + color.ORANGE + 'Cross Origin Cookie Validation' + color.END + ' Not Present!') verbout( R, 'Heuristic(s) reveal endpoint might be ' + color.BY + ' VULNERABLE ' + color.END + ' to CSRFs...') print(color.CYAN + ' [+] Possible CSRF Vulnerability Detected : ' + color.GREY + url + '!') print(color.ORANGE + ' [!] Possible Vulnerability Type: ' + color.BY + ' No Cross Origin Cookie Validation Presence ' + color.END) VulnLogger(url, 'No Cookie Validation on Cross-Origin Requests.', '[i] Headers: ' + str(head))
def Engine(): # lets begin it! os.system('clear') # Clear shit from terminal :p banner() # Print the banner banabout() # The second banner web, fld = inputin() # Take the input form1 = testFormx1() # Get the form 1 ready form2 = testFormx2() # Get the form 2 ready # For the cookies that we encounter during requests... Cookie0 = http.cookiejar.CookieJar() # First as User1 Cookie1 = http.cookiejar.CookieJar() # Then as User2 resp1 = build_opener(HTTPCookieProcessor(Cookie0)) # Process cookies resp2 = build_opener(HTTPCookieProcessor(Cookie1)) # Process cookies actionDone = [] # init to the done stuff csrf = '' # no token initialise / invalid token ref_detect = 0x00 # Null Char Flag ori_detect = 0x00 # Null Char Flags form = Debugger.Form_Debugger() # init to the form parser+token generator bs1 = BeautifulSoup(form1).findAll( 'form', action=True)[0] # make sure the stuff works properly bs2 = BeautifulSoup(form2).findAll('form', action=True)[0] # same as above init1 = web # First init resp1.open(init1) # Makes request as User2 resp2.open(init1) # Make request as User1 # Now there are 2 different modes of scanning and crawling here. # 1st -> Testing a single endpoint without the --crawl flag. # 2nd -> Testing all endpoints with the --crawl flag. try: # Implementing the first mode. [NO CRAWL] if not CRAWL_SITE: url = web response = Get(url).text try: verbout(O, 'Trying to parse response...') soup = BeautifulSoup(response) # Parser init except HTMLParser.HTMLParseError: verbout(R, 'BeautifulSoup Error: ' + url) i = 0 # Init user number if REFERER_ORIGIN_CHECKS: # Referer Based Checks if True... verbout( O, 'Checking endpoint request validation via ' + color.GREY + 'Referer' + color.END + ' Checks...') if Referer(url): ref_detect = 0x01 verbout(O, 'Confirming the vulnerability...') # We have finished with Referer Based Checks, lets go for Origin Based Ones... verbout( O, 'Confirming endpoint request validation via ' + color.GREY + 'Origin' + color.END + ' Checks...') if Origin(url): ori_detect = 0x01 # Now lets get the forms... verbout( O, 'Retrieving all forms on ' + color.GREY + url + color.END + '...') for m in Debugger.getAllForms( soup): # iterating over all forms extracted verbout(O, 'Testing form:\n' + color.CYAN) formPrettify(m.prettify()) verbout('', '') FORMS_TESTED.append('(i) ' + url + ':\n\n' + m.prettify() + '\n') try: if m['action']: pass except KeyError: m['action'] = '/' + url.rsplit('/', 1)[1] ErrorLogger(url, 'No standard form "action".') action = Parser.buildAction( url, m['action']) # get all forms which have 'action' attribute if not action in actionDone and action != '': # if url returned is not a null value nor duplicate... # If form submission is kept to True if FORM_SUBMISSION: try: # NOTE: Slow connections may cause read timeouts which may result in AttributeError # So the idea here is tp make requests pretending to be 3 different users. # Now a series of requests will be targeted against the site with different # identities. Refer to XSRFProbe wiki for more info. # # NOTE: Slow connections may cause read timeouts which may result in AttributeError result, genpoc = form.prepareFormInputs( m) # prepare inputs as user 1 r1 = Post( url, action, result ) # make request with token values generated as user1 result, genpoc = form.prepareFormInputs( m) # prepare inputs as user 2 r2 = Post( url, action, result ) # again make request with token values generated as user2 # Go for cookie based checks if COOKIE_BASED: Cookie(url, r1) # Go for token based entropy checks... try: if m['name']: query, token = Entropy( result, url, r1.headers, m.prettify(), m['action'], m['name']) except KeyError: query, token = Entropy(result, url, r1.headers, m.prettify(), m['action']) # Now its time to detect the encoding type (if any) of the Anti-CSRF token. fnd, detct = Encoding(token) if fnd == 0x01 and detct: VulnLogger( url, 'Token is a string encoded value which can be probably decrypted.', '[i] Encoding: ' + detct) else: NovulLogger( url, 'Anti-CSRF token is not a string encoded value.' ) # Go for token parameter tamper checks. if (query and token): txor = Tamper(url, action, result, r2.text, query, token) o2 = Get(url).text # make request as user2 try: form2 = Debugger.getAllForms(BeautifulSoup( o2))[i] # user2 gets his form except IndexError: verbout(R, 'Form Index Error') ErrorLogger(url, 'Form Index Error.') continue # Making sure program won't end here (dirty fix :( ) verbout(GR, 'Preparing form inputs...') contents2, genpoc = form.prepareFormInputs( form2) # prepare for form 3 as user3 r3 = Post( url, action, contents2 ) # make request as user3 with user3's form if (POST_BASED) and ((not query) or (txor)): try: if m['name']: PostBased(url, r1.text, r2.text, r3.text, m['action'], result, genpoc, m.prettify(), m['name']) except KeyError: PostBased(url, r1.text, r2.text, r3.text, m['action'], result, genpoc, m.prettify()) else: print( color.GREEN + ' [+] The form was requested with a Anti-CSRF token.' ) print(color.GREEN + ' [+] Endpoint ' + color.BG + ' NOT VULNERABLE ' + color.END + color.GREEN + ' to POST-Based CSRF Attacks!') NovulLogger( url, 'Not vulnerable to POST-Based CSRF Attacks.' ) except HTTPError as msg: # if runtime exception... verbout(R, 'Exception : ' + msg.__str__()) # again exception :( ErrorLogger(url, msg) actionDone.append(action) # add the stuff done i += 1 # Increase user iteration else: # Implementing the 2nd mode [CRAWLING AND SCANNING]. verbout(GR, "Initializing crawling and scanning...") crawler = Crawler.Handler(init1, resp1) # Init to the Crawler handler while crawler.noinit(): # Until 0 urls left url = next(crawler) # Go for next! print(C + 'Testing :> ' + color.CYAN + url) # Display what url its crawling try: soup = crawler.process(fld) # Start the parser if not soup: continue # Making sure not to end the program yet... i = 0 # Set count = 0 (user number 0, which will be subsequently incremented) if REFERER_ORIGIN_CHECKS: # Referer Based Checks if True... verbout( O, 'Checking endpoint request validation via ' + color.GREY + 'Referer' + color.END + ' Checks...') if Referer(url): ref_detect = 0x01 verbout(O, 'Confirming the vulnerability...') # We have finished with Referer Based Checks, lets go for Origin Based Ones... verbout( O, 'Confirming endpoint request validation via ' + color.GREY + 'Origin' + color.END + ' Checks...') if Origin(url): ori_detect = 0x01 # Now lets get the forms... verbout( O, 'Retrieving all forms on ' + color.GREY + url + color.END + '...') for m in Debugger.getAllForms( soup): # iterating over all forms extracted FORMS_TESTED.append('(i) ' + url + ':\n\n' + m.prettify() + '\n') try: if m['action']: pass except KeyError: m['action'] = '/' + url.rsplit('/', 1)[1] ErrorLogger(url, 'No standard "action" attribute.') action = Parser.buildAction( url, m['action'] ) # get all forms which have 'action' attribute if not action in actionDone and action != '': # if url returned is not a null value nor duplicate... # If form submission is kept to True if FORM_SUBMISSION: try: result, genpoc = form.prepareFormInputs( m) # prepare inputs as user 1 r1 = Post( url, action, result ) # make request with token values generated as user1 result, genpoc = form.prepareFormInputs( m) # prepare inputs as user 2 r2 = Post( url, action, result ) # again make request with token values generated as user2 if COOKIE_BASED: Cookie(url, r1) # Go for token based entropy checks... try: if m['name']: query, token = Entropy( result, url, r1.headers, m.prettify(), m['action'], m['name']) except KeyError: query, token = Entropy( result, url, r1.headers, m.prettify(), m['action']) ErrorLogger( url, 'No standard form "name".') # Now its time to detect the encoding type (if any) of the Anti-CSRF token. fnd, detct = Encoding(token) if fnd == 0x01 and detct: VulnLogger( url, 'String encoded token value. Token might be decrypted.', '[i] Encoding: ' + detct) else: NovulLogger( url, 'Anti-CSRF token is not a string encoded value.' ) # Go for token parameter tamper checks. if (query and token): txor = Tamper(url, action, result, r2.text, query, token) o2 = Get(url).text # make request as user2 try: form2 = Debugger.getAllForms( BeautifulSoup(o2))[ i] # user2 gets his form except IndexError: verbout(R, 'Form Index Error') ErrorLogger(url, 'Form Index Error.') continue # making sure program won't end here (dirty fix :( ) verbout(GR, 'Preparing form inputs...') contents2, genpoc = form.prepareFormInputs( form2) # prepare for form 3 as user3 r3 = Post( url, action, contents2 ) # make request as user3 with user3's form if (POST_BASED) and ((query == '') or (txor == True)): try: if m['name']: PostBased( url, r1.text, r2.text, r3.text, m['action'], result, genpoc, m.prettify(), m['name']) except KeyError: PostBased(url, r1.text, r2.text, r3.text, m['action'], result, genpoc, m.prettify()) else: print( color.GREEN + ' [+] The form was requested with a Anti-CSRF token.' ) print(color.GREEN + ' [+] Endpoint ' + color.BG + ' NOT VULNERABLE ' + color.END + color.GREEN + ' to P0ST-Based CSRF Attacks!') NovulLogger( url, 'Not vulnerable to POST-Based CSRF Attacks.' ) except HTTPError as msg: # if runtime exception... verbout( color.RED, ' [-] Exception : ' + color.END + msg.__str__()) # again exception :( ErrorLogger(url, msg) actionDone.append(action) # add the stuff done i += 1 # Increase user iteration except URLError as e: # if again... verbout(R, 'Exception at : ' + url) # again exception -_- time.sleep(0.4) verbout(O, 'Moving on...') ErrorLogger(url, e) continue # make sure it doesn't stop at exceptions # This error usually happens when some sites are protected by some load balancer # example Cloudflare. These domains return a 403 forbidden response in various # contexts. For example when making reverse DNS queries. except HTTPError as e: if str(e.code) == '403': verbout(R, 'HTTP Authentication Error!') verbout(R, 'Error Code : ' + O + str(e.code)) ErrorLogger(url, e) quit() GetLogger( ) # The scanning has finished, so now we can log out all the links ;) print('\n' + G + "Scan completed!" + '\n') Analysis() # For Post Scan Analysis except KeyboardInterrupt as e: # Incase user wants to exit :') (while crawling) verbout(R, 'User Interrupt!') time.sleep(1.5) Analysis() # For Post scan Analysis print(R + 'Aborted!') # say goodbye ErrorLogger('KeyBoard Interrupt', 'Aborted') GetLogger( ) # The scanning has interrupted, so now we can log out all the links ;) sys.exit(1) except Exception as e: verbout(R, e.__str__()) ErrorLogger(url, e) GetLogger() sys.exit(1)
def Entropy(req, url, headers, form, m_action, m_name=''): """ This function has the work of comparing and calculating Shannon Entropy and related POST Based requests' security. """ found = 0x00 # The minimum length of a csrf token should be 6 bytes. min_length = 6 # I have never seen a CSRF token longer than 256 bytes, # so the main concept here is doubling that and checking # to make sure we don't check parameters which are # files in multipart uploads or stuff like that. # # Multipart uploads usually have a trailing sequence of # characters which could be misunderstood as a CSRF token. # This is a very important step with respect to # decreasing [[ False Positives ]]. max_length = 256 * 2 # Shannon Entropy calculated for a particular CSRF token # should be at least 2.4. If the token entropy is less # than that, the application request can be easily # forged making the application vulnerable even in # presence of a CSRF token. min_entropy = 3.0 # Check for common CSRF token names _q, para = Token(req, headers) if (para and _q) == None: VulnLogger( url, 'Form Requested Without Anti-CSRF Token.', '[i] Form Requested: ' + form + '\n[i] Request Query: ' + req.__str__()) return '', '' verbout(color.RED, '\n +------------------------------+') verbout(color.RED, ' | Token Strength Detection |') verbout(color.RED, ' +------------------------------+\n') for para in REQUEST_TOKENS: # Coverting the token to a raw string, cause some special # chars might fu*k with the Shannon Entropy operation. value = r'%s' % para verbout( color.CYAN, ' [!] Testing Anti-CSRF Token: ' + color.ORANGE + '%s' % (value)) # Check length if len(value) <= min_length: print(color.RED + ' [-] CSRF Token Length less than 5 bytes. ' + color.ORANGE + 'Token value can be guessed/bruteforced...') print(color.ORANGE + ' [-] Endpoint likely ' + color.BR + ' VULNERABLE ' + color.END + color.ORANGE + ' to CSRF Attacks...') print(color.RED + ' [!] Vulnerability Type: ' + color.BR + ' Very Short/No Anti-CSRF Tokens ' + color.END) VulnLogger(url, 'Very Short Anti-CSRF Tokens.', 'Token: ' + value) if len(value) >= max_length: print(color.ORANGE + ' [+] CSRF Token Length greater than ' + color.CYAN + '256 bytes. ' + color.GREEN + 'Token value cannot be guessed/bruteforced...') print(color.GREEN + ' [+] Endpoint likely ' + color.BG + ' NOT VULNERABLE ' + color.END + color.GREEN + ' to CSRF Attacks...') print(color.GREEN + ' [!] CSRF Mitigation Method: ' + color.BG + ' Long Anti-CSRF Tokens ' + color.END) NovulLogger(url, 'Long Anti-CSRF tokens with Good Strength.') found = 0x01 # Checking entropy verbout( O, 'Proceeding to calculate ' + color.GREY + 'Shannon Entropy' + color.END + ' of Token audited...') entropy = calcEntropy(value) verbout(GR, 'Calculating Entropy...') verbout(color.BLUE, ' [+] Entropy Calculated: ' + color.CYAN + str(entropy)) if entropy >= min_entropy: verbout( color.ORANGE, ' [+] Anti-CSRF Token Entropy Calculated is ' + color.BY + ' GREATER than 3.0 ' + color.END + '... ') print(color.ORANGE + ' [+] Endpoint ' + color.BY + ' PROBABLY NOT VULNERABLE ' + color.END + color.ORANGE + ' to CSRF Attacks...') print(color.ORANGE + ' [!] CSRF Mitigation Method: ' + color.BY + ' High Entropy Anti-CSRF Tokens ' + color.END) NovulLogger(url, 'High Entropy Anti-CSRF Tokens.') found = 0x01 else: verbout( color.RED, ' [-] Anti-CSRF Token Entropy Calculated is ' + color.BY + ' LESS than 3.0 ' + color.END + '... ') print(color.RED + ' [-] Endpoint likely ' + color.BR + ' VULNERABLE ' + color.END + color.RED + ' to CSRF Attacks inspite of CSRF Tokens...') print(color.RED + ' [!] Vulnerability Type: ' + color.BR + ' Low Entropy Anti-CSRF Tokens ' + color.END) VulnLogger(url, 'Low Entropy Anti-CSRF Tokens.', 'Token: ' + value) if found == 0x00: if m_name: print(color.RED + '\n +---------+') print(color.RED + ' | PoC |') print(color.RED + ' +---------+\n') print(color.BLUE + ' [+] URL : ' + color.CYAN + url) print(color.CYAN + ' [+] Name : ' + color.ORANGE + m_name) print(color.GREEN + ' [+] Action : ' + color.ORANGE + m_action) else: # if value m_name not there :( print(color.RED + '\n +---------+') print(color.RED + ' | PoC |') print(color.RED + ' +---------+\n') print(color.BLUE + ' [+] URL : ' + color.CYAN + url) print(color.GREEN + ' [+] Action : ' + color.ORANGE + m_action) # Print out the params print(color.ORANGE + ' [+] Query : ' + color.GREY + urllib.parse.urlencode(req)) print('') return (_q, para) # Return the query paramter and anti-csrf token
def Tamper(url, action, req, body, query, para): ''' The main idea behind this is to tamper the Anti-CSRF tokens found and check the content length for related vulnerabilities. ''' verbout(color.RED, '\n +---------------------------------------+') verbout(color.RED, ' | Anti-CSRF Token Tamper Validation |') verbout(color.RED, ' +---------------------------------------+\n') # Null char flags (hex) flagx1, destx1 = 0x00, 0x00 flagx2, destx2 = 0x00, 0x00 flagx3, destx3 = 0x00, 0x00 verbout(GR, 'Proceeding for CSRF attack via Anti-CSRF token tampering...') # First of all lets get out token from request if para == '': return True # Coverting the token to a raw string, cause some special # chars might fu*k with the operation. value = r'%s' % para copy = req # Alright lets start... # [Step 1]: First we take the token and then replace a particular character # at a specific position (here at 4th position) and test the response body. # # Required check for checking if string at that position isn't the # same char we are going to replace with. verbout( GR, 'Tampering Token by ' + color.GREY + 'index replacement' + color.END + '...') if value[3] != 'a': tampvalx1 = replaceStrIndex(value, 3, 'a') else: tampvalx1 = replaceStrIndex(value, 3, 'x') verbout(color.BLUE, ' [+] Original Token: ' + color.CYAN + value) verbout(color.BLUE, ' [+] Tampered Token: ' + color.CYAN + tampvalx1) # Lets build up the request... req[query] = tampvalx1 resp = Post(url, action, req) # If there is a 40x (Not Found) or a 50x (Internal Error) error, # we assume that the tamper did not work :( But if there is a 20x # (Accepted) or a 30x (Redirection), then we know it worked. # # Or if the previous request has same content length as this tampered # request, then we have the vulnerability. # # NOTE: This algorithm has lots of room for improvement. if str(resp.status_code).startswith('2'): destx1 = 0x01 if not any(search(s, resp.text, I) for s in TOKEN_ERRORS): destx2 = 0x01 if len(body) == len(resp.text): destx3 = 0x01 if ((destx1 == 0x01 and destx2 == 0x01) or (destx3 == 0x01)): verbout( color.RED, ' [-] Anti-CSRF Token tamper by ' + color.GREY + 'index replacement' + color.RED + ' returns valid response!') flagx1 = 0x01 VulnLogger( url, 'Anti-CSRF Token tamper by index replacement returns valid response.', '[i] POST Query: ' + req.__str__()) else: verbout( color.RED, ' [+] Token tamper in request does not return valid response!') NovulLogger( url, 'Anti-CSRF Token tamper by index replacement does not return valid response.' ) # [Step 2]: Second we take the token and then remove a character # at a specific position and test the response body. verbout( GR, 'Tampering Token by ' + color.GREY + 'index removal' + color.END + '...') tampvalx2 = replaceStrIndex(value, 3) verbout(color.BLUE, ' [+] Original Token: ' + color.CYAN + value) verbout(color.BLUE, ' [+] Tampered Token: ' + color.CYAN + tampvalx2) # Lets build up the request... req[query] = tampvalx2 resp = Post(url, action, req) # If there is a 40x (Not Found) or a 50x (Internal Error) error, # we assume that the tamper did not work :( But if there is a 20x # (Accepted) or a 30x (Redirection), then we know it worked. # # NOTE: This algorithm has lots of room for improvement. if str(resp.status_code).startswith('2'): destx1 = 0x02 if not any(search(s, resp.text, I) for s in TOKEN_ERRORS): destx2 = 0x02 if len(body) == len(resp.text): destx3 = 0x02 if ((destx1 == 0x02 and destx2 == 0x02) or destx3 == 0x02): verbout( color.RED, ' [-] Anti-CSRF Token tamper by ' + color.GREY + 'index removal' + color.RED + ' returns valid response!') flagx2 = 0x01 VulnLogger( url, 'Anti-CSRF Token tamper by index removal returns valid response.', '[i] POST Query: ' + req.__str__()) else: verbout( color.RED, ' [+] Token tamper in request does not return valid response!') NovulLogger( url, 'Anti-CSRF Token tamper by index removal does not return valid response.' ) # [Step 3]: Third we take the token and then remove the whole # anticsrf token and test the response body. verbout( GR, 'Tampering Token by ' + color.GREY + 'Token removal' + color.END + '...') # Removing the anti-csrf token from request del req[query] verbout(color.GREY, ' [+] Removed token parameter from request!') # Lets build up the request... resp = Post(url, action, req) # If there is a 40x (Not Found) or a 50x (Internal Error) error, # we assume that the tamper did not work :(. But if there is a 20x # (Accepted) or a 30x (Redirection), then we know it worked. # # NOTE: This algorithm has lots of room for improvement. if str(resp.status_code).startswith('2'): destx1 = 0x03 if not any(search(s, resp.text, I) for s in TOKEN_ERRORS): destx2 = 0x03 if len(body) == len(resp.text): destx3 = 0x03 if ((destx1 == 0x03 and destx2 == 0x03) or destx3 == 0x03): verbout( color.RED, ' [-] Anti-CSRF' + color.GREY + ' Token removal' + color.RED + ' returns valid response!') flagx3 = 0x01 VulnLogger(url, 'Anti-CSRF Token removal returns valid response.', '[i] POST Query: ' + req.__str__()) else: verbout( color.RED, ' [+] Token tamper in request does not return valid response!') NovulLogger(url, 'Anti-CSRF Token removal does not return valid response.') # If any of the forgeries worked... if ((flagx1 == 0x01 and flagx2 == 0x01) or (flagx1 == 0x01 and flagx3 == 0x01) or (flagx2 == 0x01 and flagx3 == 0x01)): verbout( color.RED, ' [+] The tampered token value works! Endpoint ' + color.BR + ' VULNERABLE to Replay Attacks ' + color.END + '!') verbout( color.ORANGE, ' [-] The Tampered Anti-CSRF Token requested does NOT return a 40x or 50x response! ' ) print(color.RED + ' [-] Endpoint ' + color.BR + ' CONFIRMED VULNERABLE ' + color.END + color.RED + ' to Request Forgery Attacks...') print(color.ORANGE + ' [!] Vulnerability Type: ' + color.BR + ' Non-Unique Anti-CSRF Tokens in Requests ' + color.END + '\n') VulnLogger(url, 'Anti-CSRF Tokens are not Unique. Token Reuse detected.', '[i] Request: ' + str(copy)) return True else: print( color.RED + ' [-] The Tampered Anti-CSRF Token requested returns a 40x or 50x response... ' ) print(color.GREEN + ' [-] Endpoint ' + color.BG + ' NOT VULNERABLE ' + color.END + color.ORANGE + ' to CSRF Attacks...') print(color.ORANGE + ' [!] CSRF Mitigation Method: ' + color.BG + ' Unique Anti-CSRF Tokens ' + color.END + '\n') NovulLogger(url, 'Unique Anti-CSRF Tokens. No token reuse.') return False
def Persistence(url, postq): ''' The main idea behind this is to check for Cookie Persistence. ''' verbout(color.RED, '\n +-----------------------------------+') verbout(color.RED, ' | Cookie Persistence Validation |') verbout(color.RED, ' +-----------------------------------+\n') # Checking if user has supplied a value. verbout( GR, 'Proceeding to test for ' + color.GREY + 'Cookie Persistence' + color.END + '...') time.sleep(0.7) found = 0x00 # Now let the real test begin... # # [Step 1]: Lets examine now whether cookies set by server are persistent or not. # For this we'll have to parse the cookies set by the server and check for the # time when the cookie expires. Lets do it! # # First its time for GET type requests. Lets prepare our request. cookies = [] verbout( C, 'Proceeding to test cookie persistence via ' + color.CYAN + 'Prepared GET Requests' + color.END + '...') gen_headers = HEADER_VALUES gen_headers[ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36' if COOKIE_VALUE: for cookie in COOKIE_VALUE: gen_headers['Cookie'] = cookie verbout(GR, 'Making the request...') req = Get(url, headers=gen_headers) if req.cookies: for cook in req.cookies: if cook.expires: print(color.GREEN + ' [+] Persistent Cookies found in Response Headers!') print(color.GREY + ' [+] Cookie: ' + color.CYAN + cook.__str__()) # cookie.expires returns a timestamp value. I didn't know it. :( Spent over 2+ hours scratching my head # over this, until I stumbled upon a stackoverflow answer comment. So to decode this, we'd need to # convert it a human readable format. print(color.GREEN + ' [+] Cookie Expiry Period: ' + color.ORANGE + datetime.fromtimestamp(cook.expires).__str__()) found = 0x01 VulnLogger(url, 'Persistent Session Cookies Found.', '[i] Cookie: ' + req.headers.get('Set-Cookie')) else: NovulLogger(url, 'No Persistent Session Cookies.') if found == 0x00: verbout( R, 'No persistent session cookies identified on GET Type Requests!') verbout( C, 'Proceeding to test cookie persistence on ' + color.CYAN + 'POST Requests' + color.END + '...') # Now its time for POST Based requests. # # NOTE: As a standard method, every web application should supply a cookie upon a POST query. # It might or might not be in case of GET requests. if postq.cookies: for cookie in postq.cookies: if cookie.expires: print(color.GREEN + ' [+] Persistent Cookies found in Response Headers!') print(color.GREY + ' [+] Cookie: ' + color.CYAN + cookie.__str__()) # So to decode this, we'd need to convert it a human readable format. print(color.GREEN + ' [+] Cookie Expiry Period: ' + color.ORANGE + datetime.fromtimestamp(cookie.expires).__str__()) found = 0x01 VulnLogger(url, 'Persistent Session Cookies Found.', '[i] Cookie: ' + req.headers.get('Set-Cookie')) print(color.ORANGE + ' [!] Probable Insecure Practice: ' + color.BY + ' Persistent Session Cookies ' + color.END) else: NovulLogger(url, 'No Persistent Cookies.') if found == 0x00: verbout( R, 'No persistent session cookies identified upon POST Requests!') print(color.ORANGE + ' [+] Endpoint might be ' + color.BY + ' NOT VULNERABLE ' + color.END + color.ORANGE + ' to CSRF attacks!') print(color.ORANGE + ' [+] Detected : ' + color.BY + ' No Persistent Cookies ' + color.END) # [Step 2]: The idea here is to try to identify cookie persistence on basis of observing # variations in cases of using different user-agents. For this test we have chosen 5 different # well used and common user-agents (as below) and then we observe the variation of set-cookie # header under different conditions. # # We'll test this method only when we haven't identified requests based on previous algo. if found != 0x01: verbout( C, 'Proceeding to test cookie persistence via ' + color.CYAN + 'User-Agent Alteration' + color.END + '...') user_agents = { 'Chrome on Windows 8.1': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36', 'Safari on iOS': 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4', 'IE6 on Windows XP': 'Mozilla/5.0 (Windows; U; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)', 'Opera on Windows 10': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991', 'Chrome on Android': 'Mozilla/5.0 (Linux; U; Android 2.3.1; en-us; MID Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1' } verbout(GR, 'Setting custom generic headers...') gen_headers = HEADER_VALUES for name, agent in user_agents.items(): verbout(C, 'Using User-Agent : ' + color.CYAN + name) verbout(GR, 'Value : ' + color.ORANGE + agent) gen_headers['User-Agent'] = agent if COOKIE_VALUE: for cookie in COOKIE_VALUE: gen_headers['Cookie'] = cookie req = Get(url, headers=gen_headers) # We will append this to stuff only when set-cookie is being supplied. if req.headers.get('Set-Cookie'): resps.append(req.headers.get('Set-Cookie')) if resps: if checkDuplicates(resps): verbout( G, 'Set-Cookie header does not change with varied User-Agents...' ) verbout(color.ORANGE, ' [+] Possible persistent session cookies found...') print(color.RED + ' [+] Possible CSRF Vulnerability Detected : ' + color.ORANGE + url + '!') print(color.ORANGE + ' [!] Probable Insecure Practice: ' + color.BY + ' Persistent Session Cookies ' + color.END) VulnLogger(url, 'Persistent Session Cookies Found.', '[i] Cookie: ' + req.headers.get('Set-Cookie')) else: verbout( G, 'Set-Cookie header changes with varied User-Agents...') verbout(R, 'No possible persistent session cookies found...') verbout( color.ORANGE, ' [+] Endpoint ' + color.BY + ' PROBABLY NOT VULNERABLE ' + color.END + color.ORANGE + ' to CSRF attacks!') verbout( color.ORANGE, ' [+] Application Practice Method Detected : ' + color.BY + ' No Persistent Cookies ' + color.END) NovulLogger(url, 'No Persistent Cookies.') else: verbout(R, 'No cookies are being set on any requests.')
def Analysis(): ''' The main idea behind this is to observe and analyse the patterns in which the CSRF tokens are generated by server. ''' ctr = 0 # Counter variable set to 0 # Checking if the no of tokens is greater than 1 if len(REQUEST_TOKENS) > 1: verbout(color.RED, '\n +--------------+') verbout(color.RED, ' | Analysis |') verbout(color.RED, ' +--------------+\n') print(GR + 'Proceeding for post-scan analysis of tokens gathered...') verbout( G, 'A total of %s tokens was discovered during the scan' % (len(REQUEST_TOKENS))) # The idea behind this is to generate all possible combinations (not # considering permutations) from the given list of discovered tokens # and generate anti-CSRF token generation pattern. for tokenx1, tokenx2 in itertools.combinations(REQUEST_TOKENS, 2): try: verbout( GR, 'Analysing 2 Anti-CSRF Tokens from gathered requests...') verbout(color.CYAN, ' [+] First Token: ' + color.BLUE + tokenx1) verbout( color.ORANGE, ' [+] Shannon Entropy: ' + color.GREEN + '%s' % (calcEntropy(tokenx1))) verbout(color.CYAN, ' [+] Second Token: ' + color.BLUE + tokenx2) verbout( color.ORANGE, ' [+] Shannon Entropy: ' + color.GREEN + '%s' % (calcEntropy(tokenx2))) # Calculating the edit distance via Damerau Levenshtein algorithm m = stringdist.rdlevenshtein(tokenx1, tokenx2) verbout( color.CYAN, ' [+] Edit Distance Calculated: ' + color.GREY + str(m) + '%') # Now its time to detect the alignment ratio n = stringdist.rdlevenshtein_norm(tokenx1, tokenx2) verbout( color.CYAN, ' [+] Alignment Ratio Calculated: ' + color.GREY + str(n)) # If both tokens are same, then if len(tokenx1) == len(tokenx2): verbout( C, 'Token length calculated is same: ' + color.ORANGE + 'Each %s bytes' % len(byteString(tokenx1))) else: verbout( C, 'Token length calculated is different: ' + color.ORANGE + 'By %s bytes' % (len(byteString(tokenx1)) - len(byteString(tokenx2)))) time.sleep(0.5) # In my experience with web security assessments, often the Anti-CSRF token # is composed of two parts, one of them remains static while the other one dynamic. # # For example, if the Anti CSRF Tokens “837456mzy29jkd911139” for one request, the # other is “837456mzy29jkd337221”, “837456mzy29jkd” part of the token remains same # in both requests. # # The main idea behind this is to detect the static and dynamic part via DL Algorithm # as discussed above by calculating edit distance. p = sameSequence(tokenx1, tokenx2) tokenx01 = tokenx1.replace(p, '') tokenx02 = tokenx2.replace(p, '') if n == 0.5 or m == len(tokenx1) / 2: verbout( GR, 'The tokens are composed of 2 parts (one static and other dynamic)... ' ) verbout( C, 'Static Part : ' + color.GREY + p + color.END + ' | Length: ' + color.CYAN + str(len(p))) verbout( O, 'Dynamic Part of Token 0x1: ' + color.GREY + tokenx01 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx01))) verbout( O, 'Dynamic Part of Token 0x2: ' + color.GREY + tokenx02 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx02))) if len(len(tokenx1) / 2) <= 6: verbout( color.RED, ' [-] Post-Analysis reveals that token might be ' + color.BR + ' VULNERABLE ' + color.END + '!') print(color.RED + ' [+] Possible CSRF Vulnerability Detected!') print(color.ORANGE + ' [!] Vulnerability Type: ' + color.BR + ' Weak Dynamic Part of Tokens ' + color.END) print(color.GREY + ' [+] Tokens can easily be ' + color.RED + 'Forged by Bruteforcing/Guessing' + color.END + '!\n') VulnLogger( 'Analysis', 'Tokens can easily be Forged by Bruteforcing/Guessing.', '[i] Token 1: ' + tokenx1 + '\n[i] Token 2: ' + tokenx2) elif n < 0.5 or m < len(tokenx1) / 2: verbout( R, 'Token distance calculated is ' + color.RED + 'less than 0.5!') verbout( C, 'Static Part : ' + color.GREY + p + color.END + ' | Length: ' + color.CYAN + str(len(p))) verbout( O, 'Dynamic Part of Token 0x1: ' + color.GREY + tokenx01 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx01))) verbout( O, 'Dynamic Part of Token 0x2: ' + color.GREY + tokenx02 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx02))) verbout( color.RED, ' [-] Post-Analysis reveals that token might be ' + color.BR + ' VULNERABLE ' + color.END + '!') print(color.GREEN + ' [+] Possible CSRF Vulnerability Detected!') print(color.ORANGE + ' [!] Vulnerability Type: ' + color.BR + ' Weak Dynamic Part of Tokens ' + color.END) print(color.GREY + ' [+] Tokens can easily be ' + color.RED + 'Forged by Bruteforcing/Guessing' + color.END + '!\n') VulnLogger( 'Analysis', 'Tokens can easily be Forged by Bruteforcing/Guessing.', '[i] Token 1: ' + tokenx1 + '\n[i] Token 2: ' + tokenx2) else: verbout( R, 'Token distance calculated is ' + color.GREEN + 'greater than 0.5!') verbout( C, 'Static Part : ' + color.GREY + p + color.END + ' | Length: ' + color.CYAN + str(len(p))) verbout( O, 'Dynamic Part of Token 0x1: ' + color.GREY + tokenx01 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx01))) verbout( O, 'Dynamic Part of Token 0x2: ' + color.GREY + tokenx02 + color.END + ' | Length: ' + color.CYAN + str(len(tokenx02))) verbout( color.GREEN, ' [+] Post-Analysis reveals that tokens are ' + color.BG + ' NOT VULNERABLE ' + color.END + '!') print(color.ORANGE + ' [!] Vulnerability Mitigation: ' + color.BG + ' Strong Dynamic Part of Tokens ' + color.END) print(color.GREY + ' [+] Tokens ' + color.GREEN + 'Cannot be Forged by Bruteforcing/Guessing' + color.END + '!\n') NovulLogger( 'Analysis', 'Tokens cannot be Forged by Bruteforcing/Guessing.') time.sleep(1) except KeyboardInterrupt: ctr += 1 continue print(C + 'Post-Scan Analysis Completed!')