Ejemplo n.º 1
0
def main():
    print("Siva Vulnerability Scanner v0.4")
    user_name = input("USER NAME: ")
    password = getpass.getpass()
    try:
        connection = pymysql.connect(host="localhost",
                                     user=user_name,
                                     password=password,
                                     db="siva")
    except pymysql.err.OperationalError:
        print("[-] WRONG PASSWORD. EXITING...")
        sys.exit()
    project_id = int(input("Project ID: "))
    url = input("URL: ")
    scheme = URL().get_scheme(url)
    host_name = URL().get_host_name(url)
    project_url = scheme + "://" + host_name
    Directory.create_directory(
        "projects"
    )  # create projects for stroing files from remote host to your local PC
    Directory.create_directory(
        "projects/project-" +
        str(project_id))  # and create a working directory for the project
    Directory.create_directory(
        "projects/project-" + str(project_id) +
        "/images")  # images directory for proof of concept.
    if SivaDB().create_project(connection=connection,
                               project_id=project_id,
                               url=project_url):
        check_for_vulnerabilities(connection, project_id, url)
    else:
        print("Cannot be scanned!")
Ejemplo n.º 2
0
 def __init__(self, project_id, connection, thread_semaphore,
              database_semaphore, url):
     """
     Paramters:
     ==========
     :param thread_semaphore: This semaphore is used to control the running threads
     :param database_semaphore: This semaphore is used to add control the threads which
     adds information to the database
     :param url: The url for which the information is to be gathered
     :param connection: MySQL database connection object
     :return: None
     """
     self.__project_id = project_id
     self.__connection = connection
     self.__thread_semaphore = thread_semaphore
     self.__database_semaphore = database_semaphore
     self.__url = url
     # get the ip address of the url
     with ThreadPoolExecutor(max_workers=1) as executor:
         ip = executor.submit(URL().get_ip, self.__url)
         self.__ip = ip.result()
     # we will get the headers of the request
     if URL().get_head_request(
             url=self.__url,
             user_agent=UserAgent.get_user_agent()) is not None:
         self.__headers = URL().get_head_request(
             url=self.__url, user_agent=UserAgent.get_user_agent()).headers
     else:
         self.__headers = ""
Ejemplo n.º 3
0
 def test_query_present(self):
     self.assertEqual(URL().is_query_present("https://github.com"), False)
     self.assertEqual(
         URL().is_query_present("https://github.com/kdfhh/index?id=1"),
         True)
     self.assertEqual(URL().is_query_present("https://github.com/kdfhh/"),
                      False)
Ejemplo n.º 4
0
 def __preprocess(self):
     print("[+] PRE-PROCESSING ROBOTS.TXT")
     self.__thread_semaphore.acquire(timeout=10)
     robots_file = open(self.__robots_path, "r")
     robots_file_contents = robots_file.readlines()
     for content in robots_file_contents:
         content = content.replace("\n", "")
         content = content.strip()
         try:
             # If it is a comment
             if content[0] == "#":
                 SivaDB.update_raw_info(connection=self.__connection, project_id=self.__project_id,
                                        info_source="robots.txt", information=content,
                                        database_semaphore=self.__database_semaphore)
             else:
                 if "U" in content[0]:
                     self.__robots_preprocessed_file.write(content+"\n")
                 else:
                     full_content = ""  # Minor fix @v0.2
                     if "D" in content[0]:
                         content = content.replace("Disallow:", "")
                         content = content.strip()
                         full_url = URL.join_urls(self.__url, content)
                         full_content = "Disallow: "+ full_url
                     elif "U" in content[0]:
                         content = content.replace("Allow:", "")
                         content = content.strip()
                         full_url = URL.join_urls(self.__url, content)
                         full_content = "Allow: "+full_url
                     self.__robots_preprocessed_file.write(full_content + "\n")
         except IndexError:
             self.__robots_preprocessed_file.write("\n")
     self.__thread_semaphore.release()
Ejemplo n.º 5
0
 def test_get_request(self):
     self.assertNotEqual(
         first=URL().get_request(url="https://www.kalasalingam.ac.in"),
         second=None)
     self.assertEqual(first=URL().get_request(
         url="https://www.kaladbfjsdwljgfjsalingam.ac.in"),
                      second=None)
     self.assertNotEqual(
         first=URL().get_request(url="https://www.google.co.in"),
         second=None)
Ejemplo n.º 6
0
 def __check_programming_language(self, url):
     """
     Description:
     ============
     This method will try its level best to get the name of the programming
     language used to build the website.
     Notes:
     ======
     This method will heavily used URL class from url package
     :return:
     """
     self.__thread_semaphore.acquire()
     print("[+] ANALYSING PROGRAMMING LANGUAGE")
     # These are the popular programming languages used for designing websites
     language_names = {
         ".php": "PHP",
         ".jsp": "JSP",
         ".asp": "ASP",
         ".aspx": "ASPX",
         ".py": "PYTHON",
         ".pl": "PERL"
     }
     user_agent = UserAgent.get_user_agent()
     r = URL().get_request(url=url, user_agent=user_agent)
     if r is not None:
         soup = BeautifulSoup(r.content, "html.parser")
         for i in soup.find_all("a"):
             try:
                 partial_url = i.get("href")
                 if "http" not in partial_url:
                     new_url = URL.join_urls(url, partial_url)
                 else:
                     new_url = partial_url if URL.is_same_domain(
                         url, new_url) else ""
                 file_name = URL.get_file_name(new_url)
                 for i in language_names:
                     if i in file_name:
                         self.__programming_language_used = language_names[
                             i]
                         # Now we will update the programming language used into the database
                         InfoGatheringPhaseOneDatabase.update_programming_language(
                             self.__database_semaphore, self.__connection,
                             self.__project_id,
                             self.__programming_language_used)
                         break
                     if i in file_name:
                         break
             except Exception:
                 pass
     self.__thread_semaphore.release()
Ejemplo n.º 7
0
 def crawl(self, url):
     """
     Description:
     ------------
     This will crawl the urls completely
     :param url: The url to be crawled
     :return: None
     """
     start_time = time.time()
     r = URL().get_request(url=url, user_agent=UserAgent.get_user_agent())
     end_time = time.time()
     total_time = end_time - start_time
     self.__bob_object.predict(total_time)
     if r is not None:
         soup = BeautifulSoup(r.content, "html.parser")
         # At this stage we have got the beautiful soup objects
         #First find all the href links
         for i in soup.find_all("a"):
             try:
                 partial_url = i.get("href")
                 url_to_be_scanned = None  # we will scan this urls
                 # Check if the partial url is actually a partial url
                 if "http" in partial_url:
                     if URL.is_same_domain(self.__base_url, partial_url):
                         if partial_url not in self.__crawled_urls:
                             self.__urls.put(partial_url)
                             self.__crawled_urls.append(partial_url)
                             url_to_be_scanned = partial_url
                 else:
                     full_url = URL.join_urls(self.__base_url, partial_url)
                     if full_url not in self.__crawled_urls:
                         self.__urls.put(full_url)
                         self.__crawled_urls.append(full_url)
                         url_to_be_scanned = full_url
                 # run a simple scan in the url
                 if url_to_be_scanned is not None:
                     print("[i] CURRENTLY SCANNING [GET]: ",
                           url_to_be_scanned)
                     # Make the scanning as a new process
                     SimpleScan(
                         project_id=self.__project_id,
                         thread_semaphore=self.__thread_semaphore,
                         database_semaphore=self.__database_semaphore,
                         url=url_to_be_scanned,
                         connection=self.__connection,
                         poc_object=self.__poc_object)
             except Exception as e:
                 print("[-] EXCEPTION OCCURED ", e)
     while not self.__urls.empty():
         self.crawl(self.__urls.get())
Ejemplo n.º 8
0
 def __check_escape_sequence_vulnerability(self):
     """
     Description:
     ------------
     We will append a single quote (') to check if the sql vulnerability is happended or not
     :return:
     """
     # We will append ' to all the individual parameters and store it to payloaded urls
     self.__thread_semaphore.acquire()
     payloaded_urls = Query().append_payload_to_all_queries(
         url=self.__url, payload="'")
     for payloaded_url in payloaded_urls:
         print(payloaded_url)
         r = URL().get_request(
             url=payloaded_url, user_agent=UserAgent.get_user_agent())
         if r is not None:
             new_soup_object = BeautifulSoup(r.content, "html.parser")
             # Now compare bot soup objects
             SQLErrorIdentifier(
                 project_id=self.__project_id,
                 thread_semaphore=self.__thread_semaphore,
                 database_semaphore=self.__database_semaphore,
                 original_soup_object=self.__soup_object,
                 payloaded_soup_object=new_soup_object,
                 original_url=self.__url,
                 payloaded_url=payloaded_url,
                 connection=self.__connection,
                 poc_object=self.__poc_object)
     self.__thread_semaphore.release()
Ejemplo n.º 9
0
 def __check_numerical_vulnerability(self):
     """
     Description:
     -----------
     This method is used to check the numerical SQL vulnerability in the give url.
     See:
     -----
     Numerical Vulnerability in references.txt
     :return: None
     """
     self.__thread_semaphore.acquire()
     payloaded_urls = Query.add_one(self.__url)
     for payloaded_url in payloaded_urls:
         r = URL().get_request(
             url=payloaded_url, user_agent=UserAgent.get_user_agent())
         if r is not None:
             new_soup_object = BeautifulSoup(r.content, "html.parser")
             if self.__soup_object == new_soup_object:
                 print("[+] NUMERICAL VULNERABILITY FOUND IN THE DATABASE")
                 print("[+] PAYLOAD: ", payloaded_url)
                 SivaDB.update_analysis(
                     connection=self.__connection,
                     database_semaphore=self.__database_semaphore,
                     project_id=self.__project_id,
                     method="GET",
                     source=self.__url,
                     payload=payloaded_url,
                     description="NUMERICAL VULNERABILITY")
     self.__thread_semaphore.release()
Ejemplo n.º 10
0
 def __init__(self, project_id, url, thread_semaphore, database_semaphore,
              connection):
     """
     :param project_id: The id of the project
     :param url: The website for which the administrator page is to be found
     :param thread_semaphore:
     :param database_semaphore:
     """
     self.__project_id = project_id
     self.__url = url
     self.__thread_semaphore = thread_semaphore
     self.__database_semaphore = database_semaphore
     self.__connection = connection
     admin_contents = File.read_to_list("admin.txt")
     for admin_page in tqdm(admin_contents, ncols=100):
         self.__thread_semaphore.acquire()
         admin_url = URL.join_urls(self.__url, admin_page)
         t = Thread(target=self.add_if_page_found, args=(admin_url, ))
         t.start()
     print("[+] WAITING FOR THE THREADS TO COMPLETE THEIR TASKS")
     for thread in self.__threads:
         if thread.is_alive():
             thread.join()
     # Now display and add the admin pages in database table named "admin_table"
     for admin_page in self.__admin_pages:
         print("[+] ADMIN PAGE: ", admin_page)
         self.update_admin_page(
             project_id=project_id,
             url=admin_page,
             connection=self.__connection,
             database_semaphore=self.__database_semaphore)
Ejemplo n.º 11
0
 def gather_information(self):
     """
     This method is used to gather all the webserver information
     :return: None
     """
     # By now we have obtained the url and the I.P address of the website
     # Now scan for firewalls
     if self.__ip is not None:
         firewall_check = Thread(target=self.__check_for_firewall)
         firewall_check.start()
         firewall_check.join()
         # self.__firwall now has the name of the firewall if present
     """
     @ This stage we have acquired self.__url, self.__ip and self.__firewall
     """
     if self.__firewall is None:
         server_name = Thread(target=self.__get_webserver_name)
         server_name.start()
         server_name.join()
         # Now we have the web server name
     # Now get the web server os
     if self.__webserver_name is not None:
         if "Win" in self.__webserver_name:
             self.__webserver_os = "Windows"
         if "Uni" in self.__webserver_name:
             self.__webserver_os = "Unix"
         if "Lin" in self.__webserver_name:
             self.__webserver_os = "Linux"
     # Now get the programming language
     programming_lang = Thread(target=self.__get_programming_language)
     programming_lang.start()
     programming_lang.join()
     # Now let us see what we have got
     print("IP:       ", self.__ip)
     print("DOMAIN:   ", URL().get_host_name(self.__url))
     print("SERVER:   ", self.__webserver_name)
     print("OS:       ", self.__webserver_os)
     print("Firewall: ", self.__firewall)
     print("Language: ", self.__programming_language_used)
     if self.__ip is None:
         self.__ip = "None"
     if self.__webserver_name is None:
         self.__webserver_name = "None"
     if self.__webserver_os is None:
         self.__webserver_os = "None"
     if self.__programming_language_used is None:
         self.__programming_language_used = "None"
     if self.__firewall is None:
         self.__firewall = "None"
     # Now add the information to database
     query = "insert into info_gathering values(%s,%s,%s,%s,%s,%s,%s)"
     args = (self.__project_id, "PRELIMINARY", self.__ip,
             self.__webserver_name, self.__webserver_os,
             self.__programming_language_used, self.__firewall)
     # A thread to add the information to the database
     database_adding_thread = Thread(
         target=self.add_info_gathering_phase_one,
         args=(self.__database_semaphore, self.__connection, query, args))
     database_adding_thread.start()
     database_adding_thread.join()
Ejemplo n.º 12
0
 def __get_programming_language(self):
     """
     We will use to get the programming language from headers of the request
     :return: None
     """
     self.__thread_semaphore.acquire()
     try:
         self.__programming_language_used = self.__headers['X-Powered-By']
     except KeyError:
         self.__programming_language_used = None
     except Exception as e:
         print(e)
         self.__programming_language_used = None
     # If we didn't get the programming language we will try to get
     # it from the cookies
     if self.__programming_language_used is None:
         r = URL().get_request(url=self.__url,
                               user_agent=UserAgent.get_user_agent())
         cookies = r.cookies if r is not None else ""
         session_id = requests.utils.dict_from_cookiejar(cookies)
         # session_id contains the session id of the targetted url
         if "PHPSESSID" in session_id:
             self.__programming_language_used = "PHP"
         elif "JSESSIONID" in session_id:
             self.__programming_language_used = "J2EE"
         elif "ASP.NET_SessionId" in session_id:
             self.__programming_language_used = "ASP.NET"
         elif "CFID & CFTOKEN" in session_id:
             self.__programming_language_used = "COLDFUSION"
         else:
             self.__programming_language_used = "None"
     self.__thread_semaphore.release()
Ejemplo n.º 13
0
 def test_post_request(self):
     self.assertNotEqual(first=URL().post_request(
         "http://edu.kalasalingam.ac.in/sis/",
         data={
             'log': '9915004240',
             'pwd': '07091998'
         }),
                         second=None)
     status_code_for_corrent_auth = URL().post_request(
         "http://edu.kalasalingam.ac.in/sis/",
         data={
             'log': '9915004240',
             'pwd': '07091998'
         }).status_code
     self.assertEqual(first=status_code_for_corrent_auth,
                      second=200,
                      msg="Not properly logging in!")
Ejemplo n.º 14
0
 def brutforce(self):
     self.__browser = webdriver.PhantomJS(Static.phantomjs)
     for partial_url in self.__keywords:
         new_url = URL.join_urls(self.__website, partial_url)
         self.__browser.get(new_url)
         print(self.__browser.current_url)
         print(self.__browser.get_log("har"))
     self.__browser.quit()
Ejemplo n.º 15
0
 def __init__(self, project_id, url, thread_semaphore, database_semaphore,
              soup_object, connection, poc_object):
     self.__project_id = project_id
     self.__url = url
     self.__thread_semaphore = thread_semaphore
     self.__database_semaphore = database_semaphore
     self.__connection = connection
     self.__poc_object = poc_object
     # NOTE: self.__soup_object is the original unaltered BeautifulSoup object
     if soup_object is not None:
         self.__soup_object = soup_object
     else:
         r = URL().get_request(
             url=self.__url, user_agent=UserAgent.get_user_agent())
         self.__soup_object = BeautifulSoup(r.content, "html.parser")
     if URL.is_query_present(self.__url):
         self.__check_escape_sequence_vulnerability()
         self.__check_numerical_vulnerability()
Ejemplo n.º 16
0
 def run(self):
     self.__requests_object = URL().get_request(
         url=self.__url, user_agent=UserAgent.get_user_agent())
     self.__soup_object = BeautifulSoup(self.__requests_object.content,
                                        "html.parser")
     # By now we have got the requests object and soup object
     #================== SQL Injection Test ====================
     sqli_thread = Thread(target=self.check_sql_injection)
     sqli_thread.start()
     # ================= HTML VULNERABILITIES ============
     self.check_html_vulnerabilities()
Ejemplo n.º 17
0
 def __get_robots(self):
     """
     Description:
     ------------
     This method is used to get the robots.txt file from the remote server
     :return:
     """
     self.__thread_semaphore.acquire()
     robots_url = URL.join_urls(self.__url, "/robots.txt")
     print("[+] GETTING ROBOTS.TXT AT ", robots_url)
     r = URL().get_head_request(url=self.__url,
                                user_agent=UserAgent.get_user_agent())
     if r is not None:
         if r.status_code == 200:
             robots_file_location = "projects/project-" + str(
                 self.__project_id) + "/robots.txt"
             File.download_file(local_file_location=robots_file_location,
                                remote_file_location=robots_url)
         else:
             print("[-] NO robots.txt FOUND IN THE SERVER")
     self.__thread_semaphore.release()
Ejemplo n.º 18
0
 def test_file_name(self):
     self.assertEqual(URL().get_file_name("https://github.com/scikit-learn/scikit-learn"), None, "Not wokring for urls without file nanes")
     self.assertEqual(URL().get_file_name("https://github.com/scikit-learn/scikit-learn/index.html"), "index.html")
     self.assertEqual(URL().get_file_name("https://github.com"), None)
     self.assertEqual(URL().get_file_name("h"), None)
     self.assertEqual(URL().get_file_name("/a."), "a.")
     self.assertEqual(URL().get_file_name(None), None)
Ejemplo n.º 19
0
 def test_is_same_domain(self):
     self.assertEqual(URL().is_same_domain("https://github.com", "https://github.com"), True)
     self.assertEqual(URL().is_same_domain("https://github.com", ""), False)
     self.assertEqual(URL().is_same_domain(None, "https://github.com"), False)
     self.assertEqual(URL().is_same_domain("https://github.com", "https://github.com"), True)
     self.assertEqual(URL().is_same_domain("https://github.com", None), False)
     self.assertEqual(URL().is_same_domain(None, None), False)
Ejemplo n.º 20
0
 def __set_response_time_of_fastest_website(self):
     """
     Description:
     ------------
     This method will calculate the response time of the fastest website
     :return:
     """
     start_time = time.time()
     r = URL().get_request(
         url=self.__fastest_website, user_agent=UserAgent.get_user_agent())
     end_time = time.time()
     if r is not None:
         self.__response_time_of_fastest_website = end_time - start_time
Ejemplo n.º 21
0
 def add_if_page_found(self, url):
     """
     Description:
     ------------
     This will add the information to the database if admin page is found
     :param url: The url to be added to the database
     :return: None
     """
     r = URL().get_head_request(url=url,
                                user_agent=UserAgent.get_user_agent())
     try:
         if r.status_code == 200:
             if url not in self.__admin_pages:
                 self.__admin_pages.append(url)
     except AttributeError:
         pass
     self.__thread_semaphore.release()
Ejemplo n.º 22
0
 def create_project(connection, project_id, url):
     """
     Description:
     =============
     This method is used to create the project, and the project id
     :param connection: The database connection object
     :param project_id: The id of the project
     :param url: The url of the project
     :return: True if the process is competed else false
     """
     try:
         cursor = connection.cursor()
         cursor.execute("insert into project values(%s, %s)",
                        (project_id, URL().get_host_name(url)))
         connection.commit()
         return True
     except Exception as e:
         print(e)
         return False