Exemple #1
0
def manualCrawl(website):
    website = addHTTP(website)
    webs = removeHTTP(website)
    request = Request(website, _timeout=5, _encode=True)
    soup = BeautifulSoup(request, 'lxml')
    ### Links are in ['a', 'link', 'img', 'svg', 'iframe', 'embed', 'audio']

    _links = []

    a = soup.find_all("a")
    for links in a:
        _links.append(links['href'])

    link = soup.find_all("link")
    for links in a:
        _links.append(links['href'])

    img = soup.find_all("img")
    for links in img:
        _links.append(links['src'])

    iframe = soup.find_all("iframe")
    for links in iframe:
        _links.append(links['src'])

    embed = soup.find_all("embed")
    for links in embed:
        _links.append(links['src'])

    _links = set(_links)
    for __links in _links:
        if str(webs) in __links:
            write(var="~", color=c, data=__links)
Exemple #2
0
def reverseViaYGS(website):
	website = addHTTP(website); webs = removeHTTP(website)
	url = "https://domains.yougetsignal.com/domains.php"
	post = {
        'remoteAddress' : webs,
        'key' : ''
    }
	request = requests.post(url, headers=_headers, data=post)
	request = request.text.encode('UTF-8')

	grab = json.loads(request)
	# print(json.dumps(grab, indent=4))

	Status 			= grab['status']
	IP 				= grab['remoteIpAddress']
	Domain 			= grab['remoteAddress']
	Total_Domains 	= grab['domainCount']
	Array 			= grab['domainArray']

	if (Status == 'Fail'):
		write(var="#", color=r, data="Sorry! Reverse Ip Limit Reached.")
	else:
		write(var="$", color=c, data="IP: " + IP + "")
		write(var="$", color=c, data="Domain: " + Domain + "")
		write(var="$", color=c, data="Total Domains: " + Total_Domains + "\n")

        domains = []

        for x, y in Array:
            domains.append(x)

        for res in domains:
			write(var="#", color=b, data=res)
Exemple #3
0
def grabBanner(website):
    website = addHTTP(website)
    request = get(website, timeout=5, headers=_headers).headers.items()

    for headers in request:
        res = f"{headers[0]}: {headers[1]}"
        write(var="#", color=c, data=res)
Exemple #4
0
def whoIS(website):
	website = "http://api.whoapi.com/?apikey=66ca3039356c0287ff63ed472f528478&r=whois&domain={url}&ip=".format(url=website)
	req = Request(website, _timeout=8, _encode=True)
	js = json.loads(req)
	whois = js['whois_raw']
	for result in whois.split("\n"):
		if len(result) != 0:
			write(var="~", color=c, data=result)
Exemple #5
0
def browserspyRep(website):
	url = "http://browserspy.dk/webserver.php"
	_data = {
		'server': removeHTTP(website)
	}
	request = requests.post(url, headers=_headers, data=_data).text.encode('UTF-8')
	_data = re.findall(r'<tr class="(.*)">\n<td class="property">(.*)</td>\n<td class="value">(.*)</td>\n</tr>', request)
	for res in _data:
		result = res[1].capitalize() + ": " + res[2]
		write(var="#", color=c, data=result)
Exemple #6
0
def whoIS(website):
	website 	= removeHTTP(website)
	url 		= f"https://www.whois.com/whois/{website}"
	
	try:
		request 	= Request(url, _timeout=5, _encode=None)
		bs 			= BeautifulSoup(request, 'html.parser')
		result 		= bs.find_all('pre', {'class': 'df-raw'})[0].text.encode('UTF-8')
		print(f"\r{c}{result.decode()}")
	
	except:
		write(var="!", color=r, data="Sorry, whois cannot be performed right now...!!! :[")
Exemple #7
0
def reverseViaHT(website):
	website = addHTTP(website); webs = removeHTTP(website)
	url = "http://api.hackertarget.com/reverseiplookup/?q="
	combo = "{url}{website}".format(url=url, website=webs)
	request = Request(combo, _timeout=5, _encode=True)
	if len(request) != 5:
		list = request.strip("").split("\n")
		for _links in list:
			if len(_links) != 0:
				write(var="#", color=c, data=_links)
	else:
		write(var="@", color=r, data="Sorry, The webserver of the website you entered have no domains other then the one you gave :')")
Exemple #8
0
def googleCrawl(website):
	search = ("site:" + str(removeHTTP(website)))
	webs = removeHTTP(website)
	for loop in range(0,10):
		url = "https://google.com/search?q=" + str(search) + "&ie=utf-8&oe=utf-8&aq=t&start=" + str(loop) + "0"
		request = requests.get(url, headers=_headers)
		content = request.text.encode('UTF-8')
		soup = BeautifulSoup(content, 'lxml')
		sub_links = soup.find_all('div', class_='r')
		for links in sub_links:
			links = links.a['href']
			if str(webs) in links:
				write(var="~", color=c, data=links)
Exemple #9
0
def bingCrawl(website):
    search = ("site:" + str(removeHTTP(website)))
    webs = removeHTTP(website)
    link = []
    _link = []
    _links = []
    for loop in range(0, 50):
        url = "http://www.bing.com/search?q=" + str(search) + "&first=" + str(
            loop) + "0"
        request = requests.get(url, headers=_headers, timeout=5)
        content = request.text.encode('UTF-8')
        links = re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', content)[5]
        link.append(links[0])

    _link = set(link)
    for links in _link:
        if str(webs) in links:
            write(var="~", color=g, data=links)
Exemple #10
0
def bingCrawl(website):
	search = ("site:" + str(removeHTTP(website)))
	webs = removeHTTP(website)
	link = []
	for loop in range(0,50):
		url = "http://www.bing.com/search?q=" + str(search) + "&first=" + str(loop) + "0"
		try:
			request = requests.get(url, headers=_headers)
			content = request.text.encode('UTF-8')
			# print(content)
			links = re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', content)[5]
			# print(links[0])
			link.append(links[0])
		except requests.exceptions.ConnectionError as e:
			pass

	_link = set(link)
	for links in _link:
		if str(webs) in links:
			write(var="~", color=g, data=links)
Exemple #11
0
def bingCrawl(website):
	search 	= f"site:{str(removeHTTP(website))}"
	webs 	= removeHTTP(website)
	link 	= []
	
	for loop in range(0, 10):
		url = f"http://www.bing.com/search?q={str(search)}&first={str(loop)}0"

		try:
			request = requests.get(url, headers=_headers)
			links 	= re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', request.text)

			for _links in links:
				link.append(_links[0])
		
		except requests.exceptions.ConnectionError as e:
			pass

	_link = set(link)
	for links in _link:
		if str(webs) in links:
			write(var="~", color=g, data=links)
def cloudflare(website, _verbose=None):
    if _verbose != None:
        write(var="#", color=c, data=f"Checking for Cloudflare in {website}")

    combo = f"http://api.hackertarget.com/httpheaders/?q={str(website)}"
    request = Request(combo, _timeout=3, _encode=True).decode()

    if "cloudflare" in request:
        write(var="~", color=g, data="Cloudflare Found!\n")

    else:
        if _verbose != None:
            write(var="^", color=g, data=f"{website} is not using Cloudflare!")
Exemple #13
0
def websiteSpeed(website):
    website = addHTTP(website)
    urlinfo = urlparse.urlparse(website)

    start = time.time()
    ip = socket.gethostbyname(urlinfo.netloc)
    dns_tm = time.time() - start
    _dns = "{:<10}:{:>40} seconds".format(" DNS", dns_tm)
    write(var="~", color=g, data=_dns)

    start = time.time()
    _data = urllib.urlopen(website).read()
    load_tm = time.time() - start
    _load = "{:<10}:{:>40} seconds".format(" Load", load_tm)
    _wo = "{:<10}:{:>40} seconds".format(" W/O DNS", load_tm - dns_tm)

    write(var="#", color=c, data=_load)
    write(var="~", color=g, data=_wo)
Exemple #14
0
def cloudflare(website, _verbose=None):
    if _verbose != None:
        write(var="#", color=c, data="Checking For Cloudflare In " + website)
    combo = ("http://api.hackertarget.com/httpheaders/?q=" + str(website))
    request = Request(combo, _timeout=3, _encode=True)
    if "cloudflare" in request:
        if _verbose != None:
            write(var="~", color=g, data="Cloudflare Found!\n")
            write(var="#", color=y, data="Trying To Bypass Cloudflare!\n")
        req = "http://www.crimeflare.biz/cgi-bin/cfsearch.cgi"
        pos = {'cfS': website}
        res = requests.post(req, headers=_headers,
                            data=pos).text.encode('utf-8')
        real_ip = None
        if re.findall(r'\d+\.\d+\.\d+\.\d+', res):
            reg = re.findall(r'\d+\.\d+\.\d+\.\d+', res)
            real_ip = reg[1]
        else:
            write(var="!",
                  color=r,
                  data="Sorry! Cloudflare Wasn't Bypassed :')")
        request = Request("http://" + str(real_ip), _timeout=3, _encode=True)
        if not "cloudflare" in request.lower():
            if _verbose != None:
                if real_ip != None:
                    write(var="@", color=c, data="Cloudflare Bypassed!")
                    write(var="~",
                          color=g,
                          data="Real IP --> " + fc + str(real_ip))
            return (str(real_ip))
        else:
            if _verbose != None:
                write(var="!",
                      color=r,
                      data="Sorry! Cloudflare Wasn't Bypassed :')")
    else:
        if _verbose != None:
            write(var="$", color=b, data=website + " Is not using Cloudflare!")


# cloudflare("http://mukarramkhalid.com")
Exemple #15
0
def nameServers(website):
    website = removeHTTP(website)
    res = Nameservers(website, 'NS')

    for nameservers in res:
        write(var="#", color=c, data=nameservers)