def xss_check(n, i, url):

    vulnerable = False

    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.addheaders = [('User-agent', 'Firefox')]

    br.open(url)
    br.select_form(nr=0)

    try:
        br.form[id[n]] = payloads[i]

        #Press Submit
        sub = br.submit()

        # Print url after submitting
        print "URL to page after injecting payload: " + sub.geturl() + '\n'
        FileSetter.append_to_file(
            os.path.join(Path_To_Project_Dir, 'XSS_Results.txt'),
            "URL to page after injecting payload: " + sub.geturl() + '\n')
        response = br.open(sub.geturl())

        if payloads[i] in response.read():
            vulnerable = True
            FileSetter.append_to_file(
                os.path.join(Path_To_Project_Dir, 'XSS_Results.txt'),
                "Page is vulnerable here is the http response: \n" +
                br.open(sub.geturl()).read() + '\n')
            print "Page is vulnerable here is the http response:"
            response = br.open(sub.geturl())
            print response.read()

        else:
            response = br.open(sub.geturl())
            print response.read()

        br.close()
    except ValueError:
        pass

    try:
        if i < len(payloads):
            xss_check(n, i + 1, url)
        elif i >= len(payloads):
            xss_check(n + 1, 0, url)
    except IndexError:
        if not vulnerable:
            FileSetter.append_to_file(
                os.path.join(Path_To_Project_Dir, 'XSS_Results.txt'),
                'All text input fields of url: ' + url +
                ' were checked, no vulnerabilities found' + '\n\n')
            print 'All text input fields of url: ' + url + ' were checked, no vulnerabilities found'
        elif vulnerable:
            FileSetter.append_to_file(
                os.path.join(Path_To_Project_Dir, 'XSS_Results.txt'),
                'Vulnerabilities in: ' + url + ' were found!' + '\n\n')
            print 'Vulnerabilities in: ' + url + ' were found!'
Ejemplo n.º 2
0
def store_input_field_ids(i):
    try:
        with open(os.path.join(Path_To_Project_Dir, 'allLinks.txt'),
                  'rt') as f:
            line = f.read().split('\n')
            if len(line) >= i + 1:
                FileSetter.put_ids_in_file(
                    id_crawler(line[i]),
                    os.path.join(Path_To_Project_Dir, 'ids.txt'))
                if len(line[i]) > 1:
                    print str(i + 1) + '. link scanned'
                store_input_field_ids(i + 1)
            elif len(line) < i + 1:
                pass
    except:
        pass
Ejemplo n.º 3
0
def store_links(i):
    if time.time() - old_time >= time_to_scan:
        print('Crawling Done. Closing Thread ' + str(i+1) + '\n')
    else:
        try:
            with open(os.path.join(Path_To_Project_Dir, 'queued.txt'), 'rt') as f:
                line = f.read().split('\n')
                if len(line) >= i+1:
                    FileSetter.remove_empty_lines(os.path.join(Path_To_Project_Dir, 'queued.txt'))
                    FileSetter.put_links_in_file(link_crawler(line[i]), os.path.join(Path_To_Project_Dir, 'queued.txt'))
                    FileSetter.remove_empty_lines(os.path.join(Path_To_Project_Dir, 'queued.txt'))
                    store_links(i)
                elif len(line) < i+1:
                    sleep(10)
                    store_links(i)
        except:
            pass
Ejemplo n.º 4
0
def link_crawler(url):

    links = []
    try:
        x = urllib.urlopen(url)
        y = x.read()

        link = re.findall(r'<a[^>]* href="([^"]*)"', y)
        jslink = re.findall(r'<script[^>]* src="([^"]*)"', y)
        js2link = re.findall(r'window.location.replace\s*\("([^"]*)"\)', y)
        js3link = re.findall(r'window.location.href\s*=\s*"([^"]*)"', y)

        FileSetter.transfer_links(url, Path_To_Project_Dir)

        for link in link:
            link = urlparse.urljoin(url, link)
            if FileSetter.check_if_written(link, Path_To_Project_Dir):
                print("Link already Crawled, skipping...")
            elif FileSetter.check_if_base_url(link,
                                              url) and not check_external_urls:
                print("Link not part of base URL, skipping...")
            else:
                print('Found Link: ' + link)
                links.append(link)
        for jslink in jslink:
            jslink = urlparse.urljoin(url, jslink)
            if FileSetter.check_if_written(jslink, Path_To_Project_Dir):
                print("Link already Crawled, skipping...")
            elif FileSetter.check_if_base_url(jslink,
                                              url) and not check_external_urls:
                print("Link not part of base URL, skipping...")
            else:
                print('Found Link: ' + jslink)
                links.append(jslink)
        for js2link in js2link:
            js2link = urlparse.urljoin(url, js2link)
            if FileSetter.check_if_written(js2link, Path_To_Project_Dir):
                print("Link already Crawled, skipping...")
            elif FileSetter.check_if_base_url(js2link,
                                              url) and not check_external_urls:
                print("Link not part of base URL, skipping...")
            else:
                print('Found Link: ' + js2link)
                links.append(js2link)
        for js3link in js3link:
            js3link = urlparse.urljoin(url, js3link)
            if FileSetter.check_if_written(js3link, Path_To_Project_Dir):
                print("Link already Crawled, skipping...")
            elif FileSetter.check_if_base_url(js3link,
                                              url) and not check_external_urls:
                print("Link not part of base URL, skipping...")
            else:
                print('Found Link: ' + js3link)
                links.append(js3link)
            links.remove(start_link)

    except (KeyError, ValueError, IndexError, IOError):
        pass

    links = FileSetter.filter_duplicates(links)

    return links
Ejemplo n.º 5
0
                ' were checked, no vulnerabilities found' + '\n\n')
            print 'All text input fields of url: ' + url[
                u] + ' were checked, no vulnerabilities found'
        elif vulnerable:
            FileSetter.append_to_file(
                os.path.join(Path_To_Project_Dir, 'XSS_Results.txt'),
                'Vulnerabilities in: ' + url[u] + ' were found!' + '\n\n')
            print 'Vulnerabilities in: ' + url[u] + ' were found!'


# Scanning starts here
print "\nScanning started. Scanning for " + str(
    time_to_scan) + " seconds" + "\n"

# These two functions create the main directory and create the crawled.txt and queued.txt file
FileSetter.create_main_directory(Path_To_Project_Dir)
FileSetter.create_result_files(Path_To_Project_Dir, start_link)

print "\nResults will be stored in " + os.path.join(
    Path_To_Project_Dir +
    '/allLinks.txt') + " and " + os.path.join(Path_To_Project_Dir +
                                              '/XSS_Results.txt') + "\n"

# This for-loop creates several threads and runs the store-link function
t = {}
for n in range(0, threads):
    t["t{0}".format(n)] = threading.Thread(target=store_links, args=(n, ))
    t["t{0}".format(n)].start()

# Wait another 10 seconds until every previously created thread except the main thread is closed
sleep(time_to_scan + 10)
Ejemplo n.º 6
0
                    FileSetter.put_links_in_file(link_crawler(line[i]), os.path.join(Path_To_Project_Dir, 'queued.txt'))
                    FileSetter.remove_empty_lines(os.path.join(Path_To_Project_Dir, 'queued.txt'))
                    store_links(i)
                elif len(line) < i+1:
                    sleep(10)
                    store_links(i)
        except:
            pass


# Scanning starts here
print "\nScanning started. Scanning for " + str(time_to_scan) + " seconds" + "\n"


# These two functions create the main directory and create the crawled.txt and queued.txt file
FileSetter.create_main_directory(Path_To_Project_Dir)
FileSetter.create_result_files(Path_To_Project_Dir, start_link)

print "\nResults will be stored in " + os.path.join(Path_To_Project_Dir + '/allLinks.txt') + "\n"

# This for-loop creates several threads and runs the store-link function
t = {}
for n in range(0, threads):
        t["t{0}".format(n)] = threading.Thread(target=store_links, args=(n,))
        t["t{0}".format(n)].start()

# Wait another 10 seconds until every previously created thread except the main thread is closed
sleep(time_to_scan + 10)
time_to_scan = 0

# Store every found link from queued.txt and crawled.txt into one file called allLinks.txt
import re
import urllib
import os

# Set target
start_link = raw_input(
    "Enter Target Address (Please use a full domain name address): ")

# Set path to folder where you want to save your project
Path_To_Project_Dir = raw_input(
    "Enter path to folder where you want to save your project: ")

# Scanning starts here
print "\nScanning started.\n"

FileSetter.create_main_directory(Path_To_Project_Dir)
FileSetter.create_all_results_txt_file(Path_To_Project_Dir)

print "\nResults will be stored in " + os.path.join(Path_To_Project_Dir +
                                                    '/XSS_Results.txt') + "\n"


# Function that returns id's of input fields of a given url
def id_crawler(url):

    ids = []
    try:
        x = urllib.urlopen(url)
        y = x.read()

        inputs = re.findall(r'<input[^>]* name="([^"]*)"', y)