Esempio n. 1
0
# Pages fetched from the database, after they have been verified to have contained exploits are marked as exploitable
PAGES_FOUND_WITH_EXPLOITS = []

# List if pages from the database
DB_FETECHED_PAGES_CHECK_FOR_EXPLOITS = []

# START SCRIPT
##############################################################################################
# U.cls()
# U.show_crawling_screen()
JOB = Db.job_get(JOB_URL)
if (JOB):
    JOB_ID = str(JOB['_id'])
    U.cprint_show_job(JOB)
    if JOB['scan_finished'] == 1:
        U.cprint('JOB HAS BEEN ANALYSED', 'WARNING', False, False)
        U.hr(False)
    else:
        # Good to go and start the script
        DB_FETECHED_PAGES_CHECK_FOR_EXPLOITS = Db.page_get_unscanned(JOB_ID)
        found = len(DB_FETECHED_PAGES_CHECK_FOR_EXPLOITS)

        if found == 0:
            U.cprint('JOB: ANAYLYSING: NO URLS FOR THIS JOB', 'FAIL', True)
        else:
            U.cprint('JOB ANALYSIS START [' + str(found) + "] PAGES",
                     'OKGREEN', False, False)
            U.hr(False)

        # START VERIFYING PAGES AGAINTS ALL EXPLOITS
        #######################################################################################
Esempio n. 2
0
            if  found:
                U.cprint("Exploit Worked: [ " +  exploits.__class__.__name__ + ']', 'BOLD', False)
                U.cprint("Page : [ " + domain + "/" + path + ']', 'OKGREEN', False)
                U.cprint("Fruits: [" + str(found) + "]", 'BOLD', True)
                SUCCESS_COUNT = SUCCESS_COUNT + 1
        
        
# START SCRIPT
##############################################################################################

JOB = Db.job_get(JOB_URL)
if (JOB):
    JOB_ID = str(JOB['_id'])
    U.cprint_show_job(JOB)
    if JOB['attack_finished'] == 1:
        U.cprint('JOB HAS BEEN ATTACKED', 'WARNING', True)
    else:
        # Good to go and start the script
        DB_FETECHED_PAGES_ATTACK_FOR_EXPLOITS = Db.page_get_attackable(JOB_ID)
        found = len(DB_FETECHED_PAGES_ATTACK_FOR_EXPLOITS)

        if found == 0:
            U.cprint('JOB: ATTACK: NO URLS FOR THIS JOB', 'FAIL', True)
        else:
            U.cprint('JOB ATTACKING [' + str(found) +
                        "] PAGES WITH [" + str(len(EXPLOITS_LIST)) + "] EXPLOITS EACH", 'OKGREEN', False, False)
            U.hr(False)
            for page in DB_FETECHED_PAGES_ATTACK_FOR_EXPLOITS:
                # We are only interested in pages with status code 200
                if page['status_code'] == 200:
                    # For each exploit in our List, we call its process method on the html
                "page_html": response.body,
                "page_path": U.path_from_url(response.request.url),
                "page_full_url": response.request.url,
                "external_links_list": GLOBAL_UNIQUE_EXTERNAL_LINKS,
                "internal_links_list": GLOBAL_UNIQUE_INTERNAL_LINKS
            }


# Crawling Start
##############################################################################################
JOB = Db.job_get(JOB_URL)
if (JOB):
    JOB_ID = str(JOB['_id'])
    U.cprint_show_job(JOB)
    if JOB['crawl_finished'] == 1:
        U.cprint('JOB HAS BEEN CRAWLED', 'WARNING', False, False)
        U.hr(False)
    else:
        # Good to go and start the script
        U.cprint('JOB CRAWLING START', 'OKGREEN', False, False)
        U.hr(False)
        process = CrawlerProcess()

        if "" == JOB_LOGIN_URL:
            process.crawl(NonAuthCrawler)
        else:
            process.crawl(AuthCrawler)

        process.start(
        )  # the script will block here until all crawling jobs are finished
else: