Exemple #1
0
    def RunWhoIS(self):
        '''
        Run the Whois CLI Command
        '''
        self.wass.CurrentTask = "Whois"
        # Setup the logging, we are about to use it in wasslib.WassCommon.Common !!!!!
        self.wass.WhoisWassLog = self.wass.CurrentTask + "_" + self.wass.TargetDomain + "_" + self.wass.ReportDate + ".log"
        self.wass.WhoisTargetXMLReport = self.wass.CurrentTask + "_" + self.wass.TargetDomain + "_" + self.wass.ReportDate + ".xml"
        self.wass.WassGetVersion._GetWhoisVersion()
        self.wass.WassLogging.CreateLogfile(self.wass.WhoisWassLog)
        self.wass.WassCommon.printInfo()

        # Get the Dynamic parameters
        self.wass.WassCommon.getDynamic()
        self.wass.WhoisTargetIPXMLReport = self.wass.CurrentTask + "_" + self.wass.TargetIP + "_" + self.wass.ReportDate + ".xml"
        self.wass.WassLogging.info("############### Whois WASS Run Starting ###############")
        # Dont do a Whois lookup if the IP Address of the target is Private
        if (self.wass.TargetIPType == True):
            self.wass.WassLogging.info("The IP Address is a private one so we don't do the Whois Lookup")
        else:
            self.wass.WassLogging.info("The IP Address is a public one so we do the Whois Lookup")
            self.wass.WassLogging.info("The Whois query for the Domain is: %s " % self.wass.TldDomainName)
            self.wass.WassLogging.info("The Whois query for the IP Address is: %s " % self.wass.TargetIP)

            emailBody = "This is the result email for the " + self.wass.CurrentTask + " run against: " + self.wass.TargetDomain
            emailBody += "\n\nFollowing is the Whois Lookup for the Top Level Domain Name:\n\n"
            tlDomain = pythonwhois.net.get_whois_raw(self.wass.TldDomainName)

            self.wass.WassXML.createWhoisXML(pythonwhois.get_whois(self.wass.TldDomainName), self.wass.TldDomainName, self.wass.WhoisTargetXMLReport)

            for line in tlDomain:
                emailBody += line

            emailBody += "\n\nFollowing is the Whois lookup on the IP Address:\n\n"

            tlDomain = pythonwhois.net.get_whois_raw(self.wass.TargetIP)
            self.wass.WassXML.createWhoisXML(pythonwhois.get_whois(self.wass.TargetIP), self.wass.TargetIP, self.wass.WhoisTargetIPXMLReport)
            for line in tlDomain:
                emailBody += line

            self.wass.WassLogging.infoNoFormatting(emailBody)

            if (self.wass.SendEmail):
                self.wass.WassCommon.SendEMail(emailBody)

        self.wass.WassLogging.info("############### Whois WASS Run Done ###############")
        finalResultDir = self.wass.WassCommon.createResultDir()
        self.wass.WassCommon.moveResultFile(self.wass.WhoisTargetXMLReport, finalResultDir)
        self.wass.WassCommon.moveResultFile(self.wass.WhoisTargetIPXMLReport, finalResultDir)
        self.wass.WassCommon.printInfo()
        #Now we need to stop the current logging so we can copy the log file into the result directory for the current run
        self.wass.WassLogging.stopLogging()
        self.wass.WassCommon.moveLogFile(self.wass.WhoisWassLog, finalResultDir)
def query_pinyin_domains(limit=10000):
    '''获取词频前 10000 的拼音词'''
    c = conn.cursor()

    c.execute(
        'SELECT rowid, pinyin, frequency, last_updated FROM pinyin_domains WHERE expiry_date_com IS NULL ORDER BY frequency DESC LIMIT 10000')

    for row in c.fetchall():

        available_com = ''
        available_cn = ''

        # 打印 ID
        print row[0]

        if not row[1]:
            break

        # 拼合 COM 域名和 CN 域名
        pinyin = row[1]
        domain_com = pinyin + '.com'
        domain_cn = pinyin + '.cn'

        # 查询 Whois
        print domain_com
        try:
            w_com = pythonwhois.get_whois(domain_com, True)
        except:
            w_com = None

        if is_available(w_com):
            available_com = domain_com
        else:
            expiry_date_com = get_expiry_date(w_com)

        # 查询 Whois
        print domain_cn
        try:
            w_cn = pythonwhois.get_whois(domain_cn, True)
        except:
            w_cn = None

        if is_available(w_cn):
            available_cn = domain_cn
        else:
            expiry_date_cn = get_expiry_date(w_cn)

        # 更新数据库记录, 状态、有效期
        sql = 'UPDATE "main"."pinyin_domains" SET "available_com"="{1}", "expiry_date_com"="{2}", "available_cn"="{3}", "expiry_date_cn"="{4}", "last_updated"="{5}" WHERE rowid={0}'.format(
            row[0], available_com, expiry_date_com, available_cn, expiry_date_cn, time.time())
        c.execute(sql)
        conn.commit()
Exemple #3
0
 def _whois_search(self, domain):
     # TODO - fix this
     try: 
         results = pythonwhois.get_whois(domain)
         emails = pythonwhois.get_whois(domain)
     except: return pd.DataFrame()
     emails = filter(None, results['contacts'].values())
     emails = pd.DataFrame(emails)
     emails['domain'] = domain
     for index, row in emails.iterrows():
         name = FullContact()._normalize_name(row['name'])
         email = row.email.strip()
         pattern = EmailGuessHelper()._find_email_pattern(name, row.email)
         emails.ix[index, 'pattern'] = pattern
     CompanyEmailPatternCrawl()._persist(emails, "whois_search")
Exemple #4
0
def contact(domain):
        domain = remove_www(domain)
        whois = pythonwhois.get_whois(domain)
        result = whois['contacts']
	for key, value in result.items():
		admin = value
		return admin        		
Exemple #5
0
    def enter_callback(self, widget, entry, lblResult):
        try:
            domainName = entry.get_text()
            lblResult.set_text("Checking...")
            domainArr = domainName.split('.')
            if domainArr[0] == "www" or  domainArr[0] == "http://www":
                domainName = ""
                for (i, item) in enumerate(domainArr):
                    if(i == 0):continue;
                    if(i > 1): domainName += "."
                    domainName += domainArr[i]

            w = pythonwhois.get_whois(domainName)
            result = "Domain : %s \n" % domainName
            if w["contacts"]["registrant"] is None:
                result += "\nDomain Is Available"
            else:
                result += "Registrant : %s \n" % w["contacts"]["registrant"]["name"]
                result += "Created at : %s \n" % w["creation_date"][0].strftime("%d/%m/%Y")
                result += "Expired at : %s \n" % w["expiration_date"][0].strftime("%d/%m/%Y")
        except pythonwhois.shared.WhoisException:
            lblResult.set_text("An Error Occurred!: Please check the domain!")
        except :
            lblResult.set_text("An Error Occurred!: Please check the domain!")
        else:
            lblResult.set_text(result)
def _who_is(domain, queue):
    try:
        who = pythonwhois.get_whois(domain)
        queue.put([domain, who['raw'][0].replace('\n', ' ').replace('\r', ' ')])
    except (SocketError, WhoisException):
        # we don't know - write a question mark to the queue for this domain
        queue.put([domain, '?'])
Exemple #7
0
 def get_whois(self, domain):
     """ Fetch whois data for the given domain."""
     try:
         whois = pythonwhois.get_whois(domain)
     except:
         whois = "Timed Out/Not Found Blah!!!!"
     return whois # check for normalizations
Exemple #8
0
    def whois(self):
        if not self.values:
            return "The Doctor"
            
        url = self.values[0] 
        results = pythonwhois.get_whois(url)

        print results

        try:
            r = results['contacts']['registrant']
            expires = results['expiration_date'].pop(0).strftime('%m/%d/%Y') 
            order = [
                'name',
                'street',
                'city',
                'state',
                'postalcode',
                'country',
                'phone',
                'email',
            ]
            output = []
            for entry in order:
                output.append(r[entry])

            reformat = ', '.join(output)
            return '%s: Registered by %s. Expires %s' % (url, reformat, expires)
        except:
            return 'No results, or parsing failure.'
def get_whois_data(domain,return_type):
	try:
		whois = pythonwhois.get_whois(domain.strip())
	except:
		return 0
	try:
		creation_date = whois['creation_date']
		updated_date = whois['updated_date']
		expiry_date = whois['expiration_date']
		organisation = str(whois['contacts']['registrant']['organization'])
		name = str(whois['contacts']['registrant']['name'])
		email = str(whois['contacts']['registrant']['email'])
		phone = str(whois['contacts']['registrant']['phone'])
		street = str(whois['contacts']['registrant']['street'])
		city = str(whois['contacts']['registrant']['city'])
		postcode = str(whois['contacts']['registrant']['postalcode'])
		country = str(whois['contacts']['registrant']['country'])
	except:
		return 0
	if return_type == 1:
		return (domain.strip(),creation_date[0].strftime('%m/%d/%Y %H:%M'),updated_date[0].strftime('%m/%d/%Y %H:%M'),expiry_date[0].strftime('%m/%d/%Y %H:%M'),organisation,name,email,phone,street,city,postcode,country)
	else:
		data_list = OrderedDict([('Creation Date',creation_date[0].strftime('%m/%d/%Y %H:%M')), 
					 ('Updated Date',updated_date[0].strftime('%m/%d/%Y %H:%M')),
					 ('Expiration Date',expiry_date[0].strftime('%m/%d/%Y %H:%M')),
					 ('Organisation', organisation),
					 ('Name', name),
					 ('Email', email),
					 ('Phone', phone),
					 ('Street', street),
					 ('City', city),
					 ('Postcode', postcode),
					 ('Country',country)
					])
		return data_list
Exemple #10
0
def domain_info(domain):
    """Get as much information as possible for a given domain name."""
    domain = get_registered_domain(domain)
    result = pythonwhois.get_whois(domain)
    registrar = []
    if 'registrar' in result and len(result['registrar']) > 0:
        registrar = result['registrar'][0]
    nameservers = result.get('nameservers', [])
    days_until_expires = None
    expires = None
    if 'expiration_date' in result:
        if (isinstance(result['expiration_date'], list)
                and len(result['expiration_date']) > 0):
            expires = result['expiration_date'][0]
            if isinstance(expires, datetime.datetime):
                days_until_expires = (expires - datetime.datetime.now()).days
                expires = utils.get_time_string(time_obj=expires)
            else:
                days_until_expires = (utils.parse_time_string(expires) -
                                      datetime.datetime.now()).days
    return {
        'name': domain,
        'whois': result['raw'],
        'registrar': registrar,
        'nameservers': nameservers,
        'days_until_expires': days_until_expires,
        'expiration_date': expires,
    }
Exemple #11
0
def whois_host(phenny, input):
	if whois_available:
		domain = input.group(2)
		result = pythonwhois.get_whois(domain, normalized=True)
		
		if result is not None:
			if len(result) <= 2:
				phenny.say("The domain \x0304%s\x03 does not seem to exist." % domain)
			else:
				try:
					registrar = result["registrar"][0]
				except KeyError, e:
					registrar = "unknown registrar"
				
				try:
					creation_date = result['creation_date'][0].isoformat()
				except KeyError, e:
					creation_date = "unknown"
				
				try:
					expiration_date = result['expiration_date'][0].isoformat()
				except KeyError, e:
					expiration_date = "unknown"
				
				try:
					holder = "%s (%s)" % (result["contacts"]["registrant"]["name"], result["contacts"]["registrant"]["organization"])
				except Exception, e:
					try:
						holder = result["contacts"]["registrant"]["name"]
					except Exception, e:
						try:
							holder = result["contacts"]["registrant"]["organization"]
						except Exception, e:
							holder = "unknown"
Exemple #12
0
def whoisweb():
    print(''+R+'Example - example.com')
    h = raw_input(''+T+'' + color.UNDERLINE + 'Website>' + color.END)
    domains = [h]
    for dom in domains:
        details = pythonwhois.get_whois(dom)
        print details['contacts']['registrant'] 
def main():
  messages = ''
  problems = ''
  for domain, exts in domainnames.items():
    for ext in exts:
      d = domain+"."+ext
      try:
        w = pythonwhois.get_whois(d)
        if w:
          if type(w['expiration_date'][0]) is not datetime.date:
            days = (w['expiration_date'][0] - datetime.datetime.utcnow()).days
            for p in periods:
              if days < p:
                messages = messages + "Domain "+d+" will expire in "+str(days)+" days!\n"
          else:
            problems = problems+"No expiration date found for: "+d+"\n"

        else:
          problems = problems+"Domain not found: "+d+"\n"
      except Exception as e:
        problems = problems+d+": "+str(e)+"\n"

  if messages != '' and problems == '':
    for email in emails:
      send_mail(email, messages)

  if problems != '' and messages != '':
    for email in emails:
        send_mail(email, messages+"\n I encountered some problems: \n"+problems)
Exemple #14
0
def whois(text):
    domain = text.strip().lower()

    whois = pythonwhois.get_whois(domain, normalized=True)

    info = []

    try:
        i = "\x02Registrar\x02: {}".format(whois["registrar"][0])
        info.append(i)
    except:
        pass

    try:
        i = "\x02Registered\x02: {}".format(whois["creation_date"][0].strftime("%d-%m-%Y"))
        info.append(i)
    except:
        pass

    try:
        i = "\x02Expires\x02: {}".format(whois["expiration_date"][0].strftime("%d-%m-%Y"))
        info.append(i)
    except:
        pass

    pprint(whois)

    info_text = ", ".join(info)
    return "{} - {}".format(domain, info_text)
Exemple #15
0
    def do_live_query(self, obj, config):
        try:
            results = pythonwhois.get_whois(obj.domain)
        except pythonwhois.shared.WhoisException as e:
            self._error("Unable to find WHOIS information. %s" % str(e))
            return

        contacts = results.get('contacts', {})
        for contact_type in contacts.keys():
            # If not provided it defaults to None.
            if not contacts[contact_type]:
                continue
            for k, v in contacts[contact_type].iteritems():
                self._add_result("Live: " + contact_type + " Contact", v, {'Key': k})

        for ns in results.get('nameservers', []):
            self._add_result('Live: Nameservers', ns, {'Key': 'Nameserver'})

        for registrar in results.get('registrar', []):
            self._add_result('Live: Registrar', registrar, {'Key': 'Registrar'})

        for key in ['creation_date', 'expiration_date', 'updated_date']:
            for date in results.get(key, []):
                if date:
                    self._add_result('Live: Dates', date, {'Key': key})
Exemple #16
0
def dns_data():
	for row in reader:
		host_addr = d[row][5]
		host_port = d[row][6]
		client_adr = d[row][7]
		client_port = d[row][8]
		proto = d[row][9]
		domain = pythonwhois.get_whois(client_adr)
Exemple #17
0
def whois_domain(name):
    try:
        query = whois.get_whois(name)
        if 'raw' in query:
            return query['raw'][0].split('<<<')[0].lstrip().rstrip()

    except socket.gaierror as e:
        logging.warning('Whois lookup failed for ' + name)
Exemple #18
0
def whois_domain(name):
    try:
        query = whois.get_whois(name)
        if "raw" in query:
            return query["raw"][0].split("<<<")[0].lstrip().rstrip().encode("utf-8")

    except socket.gaierror as e:
        logging.warning("Whois lookup failed for " + name)
Exemple #19
0
def _whois_domain(domain):
    result = {}
    try:
        pythonwhois.net.socket.setdefaulttimeout(10)
        result = pythonwhois.get_whois(domain)
    except Exception as e:
        logger.debug("domain whois error: " + domain.encode("utf-8") + " - " + str(e))
        result["error"] = str(e)
    return result
Exemple #20
0
 def rate_by_date_created(self):
     year_created_string = pythonwhois.get_whois(self.domain)['creation_date'][0].strftime("%Y")
     year_created_int = int(year_created_string)
     current_year = date.today().year
     year_difference = current_year - year_created_int
     if year_difference >= 10:
         rating = 10
     else:
         rating = year_difference
     return rating
Exemple #21
0
def check(name):
	try:
		data = pythonwhois.get_whois(name)
		if 'expiration_date' in data:
			logger.info('%-20s %s',name, make_red(u'\u00F8'))
		else:
			logger.info('%-20s %s',name, make_green(u'\u0298'))
	except Exception as e:
		logger.info('%-20s %s',name, make_red(u'\u00F8'))
		pass
def gather_info(name, url):
    domain_name = get_domain_name(url)
    ip_address = get_ip_address(domain_name)
    nmap = nmap1.PortScanner()
    nmap = nmap.scan(ip_address)
    try:
        robots_txt = get_robots_txt(url)
    except urllib.error.HTTPError:
        robots_txt = ''
    whois = get_whois(domain_name)
    create_report(name, url, domain_name, nmap, robots_txt, whois)
Exemple #23
0
def whodat(host):
#	print "******", host

	try:
		who = pythonwhois.get_whois(host)["contacts"]
		dat = who["registrant"]
		if not dat:
			dat = pythonwhois.get_whois(host)["admin"]

		state = ""
		if "state" in dat.keys():
			state = dat["state"]
		country = ""
		if "country" in dat.keys():
			country = dat["country"]

		addr = urlify(dat["street"], dat["city"], state, country)
	except:
		return None
	return get_lat_lng(addr)
Exemple #24
0
def ip_info(ip_address):
    """Get as much information as possible for a given ip address."""
    if not utils.is_valid_ip_address(ip_address):
        error = "`%s` is an invalid IP address." % ip_address
        raise errors.SatoriInvalidIP(error)

    result = pythonwhois.get_whois(ip_address)

    return {
        'whois': result['raw']
    }
Exemple #25
0
def run_whois_on_domains(words, tld='sh'):
    whois_results={}
    for word in words:
        domain=word[:-len(tld)]+'.'+tld
        print "checking " + domain
        try:
            whois_result=pythonwhois.get_whois(domain)
        except:
            print "failed on " + domain
            continue
        whois_results[domain]=whois_result            
    return whois_results
Exemple #26
0
def action_whois(domain):

    try:
        whois_things = pythonwhois.get_whois(domain)
        company = whois_things['contacts']['registrant']['name']
    except KeyError, pythonwhois.net.socket.errno.ETIMEDOUT:
        print colored('\nWhoisError: You may be behind a proxy or firewall preventing whois lookups. Please supply the registered company name, if left blank the domain name ' + '"' + domain + '"' +' will be used for the Linkedin search. The results may not be as accurate.','red')
        temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
        if temp_company == '':
            company = domain
        else:
            company = temp_company
Exemple #27
0
def domain_info(domain):
    """Get as much information as possible for a given domain name."""
    domain = get_registered_domain(domain)
    result = pythonwhois.get_whois(domain)
    expires = result['expiration_date'][0]
    days_until_expires = (expires - datetime.datetime.now()).days
    return {
        'name': domain,
        'whois': result['raw'],
        'registrar': result['registrar'][0],
        'nameservers': result['nameservers'],
        'days_until_expires': days_until_expires,
    }
Exemple #28
0
def chk_whois(domain):
    #   *** .ca is not working, check more ***
    email = ns = createdate = expirationdate = updateddate = registrar = registrant = tel = ''
    w = dict(email=email, registrar=registrar, registrant=registrant, tel=tel, ns=ns, createdate=createdate, expirationdate=expirationdate, updateddate=updateddate)
    
    if chk_domain(domain):
        hostname, secondLD = chk_hostname(domain)
        try:
            #   find whois for second-level domain
            msg = 'parsing whois data of: %s ... ' % (secondLD)
            #logging.info(msg)
            ans = pythonwhois.get_whois(secondLD, True)
        except:
            return w
        if ans['contacts'].has_key('admin'):
            if ans['contacts']['admin'] is not None:
                if ans['contacts']['admin'].has_key('email'):
                    if ans['contacts']['admin']['email'] is not None:
                        email = ans['contacts']['admin']['email']
        if ans['contacts'].has_key('registrant'):
            if ans['contacts']['registrant'] is not None:
                if ans['contacts']['registrant'].has_key('name'):
                    if ans['contacts']['registrant']['name'] is not None:
                        registrant = ans['contacts']['registrant']['name']
                if ans['contacts']['registrant'].has_key('phone'):
                    if ans['contacts']['registrant']['phone'] is not None:
                        tel = ans['contacts']['registrant']['phone']
        if ans.has_key('registrar'):
            if ans['registrar'] is not None:
                registrar = ans['registrar']
            if type(ans['registrar']) is list:
                registrar = ans['registrar'][0]
        if ans.has_key('nameservers'):
            if ans['nameservers'] is not None:
                ns = ans['nameservers'][0]
        if ans.has_key('creation_date'):
            if ans['creation_date'] is not None:
                createdate = ans['creation_date']
        if ans.has_key('expiration_date'):
            if ans['expiration_date'] is not None:
                expirationdate = ans['expiration_date']
        if ans.has_key('updated_date'):
            if ans['updated_date'] is not None:
                updateddate = ans['updated_date']
        if createdate == '' and updateddate != '':
            createdate = updateddate
        w = dict(email=email, registrar=registrar.encode('utf-8'), registrant=registrant.encode('utf-8'), tel=tel, ns=ns, createdate=createdate, expirationdate=expirationdate, updateddate=updateddate)
    else:
        msg = '[*] no whois record: %s ...' % (domain)
        logging.info(msg)
    return w
Exemple #29
0
def IndividualWhoisLookups(domains):
    print '[ ] Starting individual domain lookups in 10 seconds'
    print '[*]    If the output "hangs" on a lookup, press CTRL-C to go to next entry'
    time.sleep(10)

    for line in domains:
        # Make a whois lookup of the domain
        try:
            w = pythonwhois.get_whois(line['domain'], normalized=True)

            print '------------------------------------------------------------'
            print '"%s","%s","%s"\n' % (line['domain'],line['created_date'],line['registrar'])

            # Look for false positives in web output by doing search of whois results
            if re.match('NOT FOUND', w['raw'][0]):
                # Some 'found' content fails specific whois. This is a false positive.
                print '[!]   ERROR: No valid Whois data for %s' % line['domain']
                outfile.write('[!]   ERROR: No valid Whois data for %s' % line['domain'])
                continue

            elif not re.findall(args.domain, w['raw'][0], flags=re.IGNORECASE) and not re.findall(args.domain, w['raw'][1], flags=re.IGNORECASE):
                # Is the search domain actually in any of the output?
                print '[!]   ERROR: %s not found in %s' % (args.domain, line['domain'])
                outfile.write('[!]   ERROR: %s not found in %s' % (args.domain, line['domain']))
                continue

            elif re.search('No match for ', w['raw'][0], flags=re.IGNORECASE):
                # The Whois failed
                print '[!]   ERROR: %s no match in Whois' % args.domain
                outfile.write('[!]   ERROR: %s no match in Whois' % args.domain)
                continue

            else:
                # Print all the things except the "raw" element
                del w['raw']
                pp.pprint(w)

                # Output to outfile
                if args.outfile:
                    outfile.write('------------------------------------------------------------\n')
                    outfile.write('Domain: %s, Registered on: %s, Registrar: %s\n' % (line['domain'],line['created_date'],line['registrar']))
                    pp.pprint(w, stream=outfile, indent=4)

        except KeyboardInterrupt:
            # Sense and continue if user presses ctrl-c (used for times the script gets...er...stuck)
            continue

        except Exception, e:
            print '[!]   ERROR: Exception caught: %s' % str(e)
Exemple #30
0
    def get_whois_domain(self,num=0):
        '''
        Whois lookup on self.domain. Saved in self.whois_domain as string, 
        since each top-level domain has a way of representing data.
        This makes it hard to index it, almost a project on its on.
        '''
        try:
            if self.domain:
                query = whois.get_whois(self.domain)
                
                if 'raw' in query:
                    self.whois_domain = query['raw'][0].lstrip().rstrip()

        except Exception as e:
            Scan.error(e,sys._getframe().f_code.co_name)
            pass
Exemple #31
0
    def get_whois(self, domain, tld, retries=3, queried_servers=set(), remaining_servers=set()):
        ''' given a domain, find whois information in a semi-intelligent way
            using the TLD to server IP list in aux, rotate between whois server IPs authoritative for the TLD
            if all the IPs are throttling us, sleep and try again (sleeping decrements retries)
        '''
        self._log_this(domain, 'received whois call')
        # base case
        if retries < 1:
            self._log_this(domain, 'whois failed all time, bailing')
            return {}

        tld = '.' + tld.strip('.')

        # we know a set of IPs responsible for whois info for this tld, so we try to rotate between them 
        if tld in self.whois_server_ips:
            self._log_this(domain, 'tld found in whois_server_ips') 
            # this is the first iteration 
            if len(queried_servers) == 0 and len(remaining_servers) == 0:
                remaining_servers.update([ip for hostname in self.whois_server_ips[tld] for ip in self.whois_server_ips[tld][hostname]])
                self._log_this(domain, 'iterating over the following whois servers: %s' % (remaining_servers))

            # we've queried all the servers we can and now need to try sleeping
            if len(remaining_servers) == 0 and len(queried_servers) > 0:
                self._log_this(domain, 'querying whois with no specified server')
                try:
                    w = pythonwhois.get_whois(domain)
                except:
                    sys.stderr.write('domain: %s whois returned no results retries remaining: %d\n' % (domain, retries)) 
                    time.sleep(self.whois_sleep_seconds)
                    return self.get_whois(domain, tld, retries=retries-1)
            # remaining servers exist, let's try querying them before trying sleep
            else:
                server = random.sample(remaining_servers, 1)[0]
                queried_servers.add(server)
                remaining_servers.remove(server)
                self._log_this(domain, 'querying whois with specific server: %s' % (server))
                try:
                    w = pythonwhois.parse.parse_raw_whois(pythonwhois.net.get_whois_raw(domain, server=server))
                except:
                    sys.stderr.write('domain: %s whois returned no results from server: %s, retries remaining: %d\n' % (domain, server, retries)) 
                    # NO SLEEP
                    return self.get_whois(domain, tld, retries=retries, remaining_servers=remaining_servers, queried_servers=queried_servers)
        # the tld is not in our whois server list and we must use sleep to avoid being throttled
        else:
            self._log_this(domain, 'querying whois with no specified server')
            try:
                w = pythonwhois.get_whois(domain)
            except:
                sys.stderr.write('domain: %s whois returned no results retries remaining: %d\n' % (domain, retries)) 
                time.sleep(self.whois_sleep_seconds)
                return self.get_whois(domain, tld, retries=retries-1)
            
        # once we have a response...
        # messagepack (used by zerorpc) can't serialize datetime objects, so we make them strings :\
        for date in ('expiration_date', 'creation_date', 'updated_date', 'changedate'):
            if date in w:
                w[date] = self._datetime_list_to_str(w[date])
            for category in ('registrant', 'tech', 'billing', 'admin'):
                if ('contacts' in w) and (category in w['contacts']) and (w['contacts'][category] is not None) and (date in w['contacts'][category]):
                    w['contacts'][category][date] = self._datetime_to_str(w['contacts'][category][date])
        return w
Exemple #32
0
def is_domain_available(domain):
    whois = pythonwhois.get_whois(domain)
    return not ("id" in whois)
Exemple #33
0
def is_registered(site):
    """Check if a domain has an WHOIS record."""
    details = pythonwhois.get_whois(site)
    return not details['raw'][0].startswith('No match for')
Exemple #34
0
def whois():
    result = socket.gethostbyname(url)

    print("Whois ========>> ", pythonwhois.get_whois(result))
Exemple #35
0
#wtf so lets make a killswitch that generates a single ascii char  uber malware here ;)
def gen_switches():
    switches = []
    for i in range(26):
        kill_switch = 'iuqerfsodp9ifjaposdfjhgosurijfaewrwergwe' + random.choice(
            string.ascii_lowercase) + '.com'
        print(kill_switch)
        switches.append(kill_switch)
    return switches


registerd_switches = []
killswitch_domains = gen_switches()
for dom in killswitch_domains:
    details = pythonwhois.get_whois(dom)
    if details:
        try:
            print(details['contacts']['registrant'])
            local_info = {
                'domain': dom,
                'whois_info': details['contacts']['registrant']
            }
            registerd_switches.append(local_info)

        except:
            pass

for info in registerd_switches:
    print(info)
Exemple #36
0
def get_whois(url):
    whois_dict = { 'status':'OK' }
    try:
        whois_raw = pythonwhois.get_whois(url)
    except pythonwhois.shared.WhoisException, e:
        whois_dict['status'] = 'Error: %s' % str(e)
Exemple #37
0
def action_whois(domain):

    try:
        whois_things = pythonwhois.get_whois(domain)
        try:
            company = whois_things['contacts']['registrant']['name']
        except Exception:
            print '\nThere seems to be no Registrar for this domain.'
            company = domain
            pass
        splitup = company.lower().split()
        patern = re.compile('|'.join(splitup))
        while True:
            if patern.search(domain):
                info('Whois Results Are Good ' + company)
                print '\nThe Whois Results Look Promising: ' + colored('{}','green').format(company)
                accept = raw_input(colored('\nIs The Search Term sufficient?: ','green')).lower()
                if accept in ('y', 'yes'):
                    company = company
                    break
                elif accept in ('n', 'no'):
                    temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                    if temp_company == '':
                        info('User Supplied Blank Company')
                        company = domain
                        break
                    else:
                        info('User Supplied Company ' + company)
                        company = temp_company
                        break
                else:
                    print '\nThe Options Are yes|no Or y|no Not {}'.format(accept)

            else:
                info('Whois Results Not Good ' + company)
                print colored("\n\tThe Whois Results Don't Look Very Promissing: '{}'","red") .format(company)
                print'\nPlease Supply The Company Name\n\n\tThis Will Be Used To Query LinkedIn'
                temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
                if temp_company == '':
                    info('User Supplied Blank Company')
                    company = domain
                    break
                else:
                    info('User Supplied Company ' + company)
                    company = temp_company
                    break


    except pythonwhois.shared.WhoisException:
        pass
    except socket.error:
        pass
    except KeyError:
        pass
    except pythonwhois.net.socket.errno.ETIMEDOUT:
        print colored('\nWhoisError: You may be behind a proxy or firewall preventing whois lookups. Please supply the registered company name, if left blank the domain name ' + '"' + domain + '"' +' will be used for the Linkedin search. The results may not be as accurate.','red')
        temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
        if temp_company == '':
            company = domain
        else:
            company = temp_company
    except Exception:
        info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
    if 'company' not in locals():
        print 'There is no Whois data for this domain.\n\nPlease supply a company name.'
        while True:
            temp_company = raw_input(colored('\nRegistered Company Name: ','green'))
            if temp_company == '':
                info('User Supplied Blank Company')
                company = domain
                break
            else:
                company = temp_company
                info('User Supplied Company ' + company)
                break

    return company
Exemple #38
0
    def on_load(self):
        __version__ = '1.8'
        __author__ = "Josh"
        __date__ = datetime.datetime.now()
        __tools__ = "16"

        gloom_tools = [
            'admin_panel_finder', 'web_whois', 'ip_geolocation',
            'discover_lan_devices', 'website_ip_resolver', 'wifi_jammer',
            'listener', 'waf_scanning', 'port_scanner', 'payload_generator'
            'alot_more_tools'
        ]

        try:
            gloom_prompt = raw_input("\n[" + colored('gloom', 'red') + "] ")

            if gloom_prompt == 'exit':
                sys.exit(0)

            if gloom_prompt == 'clear':
                os.system('clear')
                return gloom_main.on_load()

            if gloom_prompt == 'info':
                print("Developer: " + colored(__author__, 'green'))
                print("Version: " + colored(__version__, 'green'))
                print("Date: " + colored(__date__, 'green'))
                print("Tools: " + colored(__tools__, 'blue'))
                return gloom_main.on_load()

            if gloom_prompt == 'load':
                load_mod = gloom_prompt + ''
                if load_mod not in gloom_tools:
                    print("[gloom_error] " +
                          colored("Failed To Load Module", 'red'))
                    return gloom_main.on_load()

                elif load_mod in gloom_tools:
                    print(gloom_prompt + '(' + colored(load_mod, 'red') + ')')

            if gloom_prompt == 'help':
                print "\n"
                print "\t\t|Core Commands|"
                print "=" * 40
                print "help :: " + colored("Show This List Again", 'blue')
                print "clear :: " + colored("Clear Screen", 'blue')
                print "exit :: " + colored("Leaves Gloom Framework", 'blue')
                print "info :: " + colored("Shows Tool Information", 'blue')
                print "\n"
                print "\t\t|Tools|"
                print "=" * 40
                print "android_attack :: " + colored(
                    "Sends an android payload via. Email and Spawns a meterpreter shell.",
                    'blue')
                print "admin_panel_finder :: " + colored(
                    "Find Website Admin Panels", 'blue')
                print "ip_geolocation :: " + colored("Geolocate an IP Address",
                                                     'blue')
                print "web_whois :: " + colored(
                    "Gather WHOIS Information on a Target", 'blue')
                print "\n"
                print "discover_lan_devices :: " + colored(
                    "Detect Users/Devices on LAN", 'blue')
                print "website_ip_resolver :: " + colored(
                    "Get IP Address of Website", 'blue')
                print "payload_generator :: " + colored(
                    "Generate Windows Payload", 'blue')
                print "\n"
                print "wifi_jammer :: " + colored(
                    "Wireless Deauthentication Attack", 'blue')
                print "listener :: " + colored(
                    "Listen For Connections on Network", 'blue')
                print "check_email_breach :: " + colored(
                    "Check if an Email has Been Comprimised", 'blue')
                print "\n"
                print "waf_scanning :: " + colored("Scan for Website Firewall",
                                                   'blue')
                print "port_scanner :: " + colored("Scan for Open Ports",
                                                   'blue')
                print "anon_email :: " + colored("Send An Anonymous Email",
                                                 'blue')
                print "\n"
                print "\t\t|Fuzzing|"
                print "=" * 40
                print "tcp_fuzzer :: " + colored(
                    "Fuzz Applications via. TCP(Port 443)", 'blue')
                print "ftp_fuzzer :: " + colored(
                    "Fuzz Applications via. FTP(Port 21)", 'blue')
                print "http_fuzzer :: " + colored(
                    "Fuzz Applications via. HTTP(Port 80)", 'blue')

                return gloom_main.on_load()

            elif gloom_prompt == 'admin_panel_finder':
                findAdmin()
                return gloom_main.on_load()

            elif gloom_prompt == 'ip_geolocation':
                geolocate()
                return gloom_main.on_load()

            elif gloom_prompt == 'discover_lan_devices':
                DiscoverLiveHosts()
                return gloom_main.on_load()

            elif gloom_prompt == 'website_ip_resolver':
                ResolveIP()
                return gloom_main.on_load()

            elif gloom_prompt == 'web_whois':
                os.system('clear')
                CORE_STRING = colored("[web_whois]", 'blue')
                TARGET = raw_input(CORE_STRING + " Website> ")
                domains = [TARGET]
                for dom in domains:
                    details = pythonwhois.get_whois(dom)
                    print details['contacts']['registrant']

                return gloom_main.on_load()

            elif gloom_prompt == 'wifi_jammer':
                RunScapyBasicDeauthentication()
                return gloom_main.on_load()

            elif gloom_prompt == 'listener':
                sockListen()
                sockAccept()
                return gloom_main.on_load()

            elif gloom_prompt == 'waf_scanning':
                RunDetection()
                return gloom_main.on_load()

            elif gloom_prompt == 'port_scanner':
                PortScanner()
                return gloom_main.on_load()

            elif gloom_prompt == 'payload_generator':
                payload = EXEPayloads()
                payload.GetInputVariables()
                payload.Meterpreter()
                return gloom_main.on_load()

            elif gloom_prompt == 'tcp_fuzzer':
                fuzztcp = Fuzzer()
                fuzztcp.TCPFuzzer()
                return gloom_main.on_load()

            elif gloom_prompt == 'ftp_fuzzer':
                fuzzftp = Fuzzer()
                fuzzftp.FTPFuzzer()
                return gloom_main.on_load()

            elif gloom_prompt == 'http_fuzzer':
                fuzzhttp = Fuzzer()
                fuzzhttp.HTTPFuzzer()
                return gloom_main.on_load()

            elif gloom_prompt == 'check_email_breach':
                breacher = Breach()
                CORE_STRING = colored("[check_email_breach]", 'blue')
                breacher.RunMainCheck(raw_input(CORE_STRING + " Email> "))
                return gloom_main.on_load()

            elif gloom_prompt == 'anon_email':
                os.system('clear')
                anon = AnonEmail()
                anon.SendAnonEmail()
                return gloom_main.on_load()

            elif gloom_prompt == 'android_attack':
                attack = SMS()
                attack.do_login()
                attack.do_payload()
                attack.do_sms_mail()
                attack.do_metasploit()
                return gloom_main.on_load()

            else:
                print("[gloom_error] " +
                      colored("Unknown Command/Argument!", 'red'))
                return gloom_main.on_load()

        except KeyboardInterrupt:
            cprint(
                "\n[!] You Pressed Ctrl + C! Next time type 'exit' To Quit.",
                'red')
            sys.exit(1)

        except:
            pass
Exemple #39
0
#!usr/bin/python
# -*- coding: utf-8 -*-

import pythonwhois
import sys

if len(sys.argv) == 1:
    print "Introduzca un objetivo: "
elif len(sys.argv) > 3:
    print "El máximo de objetivos son 2"
elif len(sys.argv) == 2 or len(sys.argv) == 3:
    whois = pythonwhois.get_whois(
        sys.argv[1]
    )  # opción whois del módulo whois, recibirá como argumento el nombre del dominio$
    for key in whois.keys():
        print "[+] %s : %s \n" % (key, whois[key])  # recorremos el diccionario
Exemple #40
0
 def get_whois(self):
     try:
         whois = pythonwhois.get_whois(domain=self.__domain)
         return whois
     except Exception:
         return None
Exemple #41
0
import pythonwhois, json, sys

url = sys.argv[1]

a = open(url, 'r')

a = a.readlines()

print(a)

result = []

for i in range(0, len(a)):
    a[i] = a[i].replace("\n", "")
    data = pythonwhois.get_whois(a[i])
    result.append(data)

b = open('result.txt', 'w')

b.write(result)
Exemple #42
0
def get_expiration_date(hostname):
    data = pythonwhois.get_whois(hostname)
    expiration_date = data.get('expiration_date')
    return '' if expiration_date is None else expiration_date[0]
Exemple #43
0
def getwhois(site):
    try:
        who_is = get_whois(site)
        return who_is
    except WhoisException:
        return "No WHOIS root server found for \'{0}\' domain".format(site)
    ]
    if ((containhttps == 1) and (issued_by in issuers) and (duration >= 1)):
        print(1)
    elif ((containhttps == 1) and (issued_by in issuers)):
        print(0)
    else:
        print(-1)
except:
    print(-1)

#domain registration length
print('drl')
domain = host

try:
    w = pythonwhois.get_whois(domain)
    if 'id' not in w:
        print(-1)
    else:

        ud = w['updated_date']

        ed = w['expiration_date']

        diff = ed[0] - ud[0]

        comp = datetime.timedelta(365, 0, 0, 0)

        if (diff > comp):
            print(1)
        else:
Exemple #45
0
def main():
    if len(sys.argv) == 1:
        usage()
        sys.exit(1)

    whois_check = False
    whois_rate_limit = False
    target_url = ''
    available_found = 0
    not_available_found = 0

    try:
        opts,args = getopt.getopt(sys.argv[1:],"u:ch",["url","check","help"])
    except getopt.GetoptError as e:
        print(str(e))
        usage()
        sys.exit(1)

    for o,a in opts:
        if o in ("-u", "--url"):
            target_url = a
        elif o in ("-c", "--check"):
            whois_check = target_url
        elif o in ("-h", "--help"):
            usage()
            sys.exit(0)
        else:
            print("[!] Invalid option {} with value {}".format(o,a))
            sys.exit(1)

    # Only want the url name first, so split the tld out
    split_url = target_url.split('.', 1)

    if len(split_url) == 1:
        usage()
        sys.exit(1)

    # Converting the url to a list of binary numbers representing the letters
    binary_list = string_to_binary_list(split_url[0])

    print("[+] Generating bitsquat domains")

    binary_results = []

    # Loop over each letter checking to see if by changing one bit, another valid
    # url character is generated
    for i,byte in enumerate(binary_list):

        # Check if the byte is the first or last in a url
        if i == 0 or i == (len(binary_list) - 1):
            start_end = True
        else:
            start_end = False

        replacements = check_byte(byte, start_end)
        for replacement in replacements:
            test_list = binary_list.copy()
            test_list[i] = replacement
            binary_results.append(test_list)

    print("[+] Finished generating bitsquat domains")

    for binary_result in binary_results:
        binary_result_string = binary_list_to_string(binary_result)

        url = "{}.{}".format(binary_result_string,split_url[1])

        print("[+] Found {}".format(url))

        if whois_check and not whois_rate_limit:
            print("\t[+] Checking if registered")

            whois = pythonwhois.get_whois(url)

            if whois.get('status') and RATE_LIMIT_STRING in whois.get('status'):
                print("[!] Hit whois rate limit, continuing without whois")
                whois_rate_limit = True

            if whois.get('status') and not whois_rate_limit:
                print("\t[-] Not available")
                not_available_found += 1
            else:
                print("\t[+] Available")
                available_found += 1

            print("\t[+] Waiting {} seconds until next check".format(WHOIS_SLEEP))

            # Delay to prevent rate limiting 
            time.sleep(WHOIS_SLEEP)

    print("[+] Completed. Total domains found: {}".format(len(binary_results)))
    if whois_check:
        print("[+] Total available domains: {}".format(available_found))
        print("[+] Total not available domains: {}".format(not_available_found))
Exemple #46
0
    def Scrape(this, URL, days_update=0):
        # check if number of days_update days have passed since last update, and if so, update
        if Crawler.crawler_api.storage.RowExists('scores', URL.UniqueName()):
            last_update = Crawler.crawler_api.storage.GetSingleValue(
                'scores', URL.UniqueName(), 'lastUpdate')
            last_update = datetime.datetime.strptime(last_update,
                                                     '%Y-%m-%d %H:%M:%S')
            current_date = datetime.datetime.now()
            difference = (current_date - last_update).days
            if difference < days_update:
                this.PrintDivider()
                this.PrintDebug('Skip scrape: ' + URL.UniqueName() +
                                '; last scraped: ' + str(last_update))
                return
            # else, start scraping
        try:
            this.PrintDivider()
            this.PrintDebug('STARTING SCRAPE: ' + URL.Full_URL)
            ### 1. structure characteristics
            this.PrintDivider()
            this.PrintUpdate('obtaining structure-based characteristics')
            this.PrintDivider()
            # - 1.1 number of pages
            number_of_pages = -1.0
            number_of_pages_raw = -1.0  # also store a raw score for data analysis
            # crawl through the URL and each subsequent inbound URL
            crawled = []
            crawl_count = 0
            max_urls = 50
            # - get landing page
            landing_page = CrawlPage(this.JSCrawl(URL.Full_URL))
            inbounds = landing_page.GetInboundURLs()
            crash_pages = []

            this.PrintNote('scraping: ' + URL.Full_URL)
            page_url = (landing_page.URL.Hostname +
                        landing_page.URL.Path).replace('//', '/')
            if page_url[len(page_url) - 1] == '/':
                page_url = page_url[:-1]
            depth_levels = {page_url: 0}
            inbounds.append(page_url)

            # 1.1.1 store depth levels of landing page found inbound urls
            for inbound in inbounds:
                if inbound not in URL.Full_URL and inbound not in page_url:
                    depth_levels[inbound] = 1

            crawled.append(landing_page)

            # 1.1.2. then from each found (inbound) URL, keep crawling until maximum crawl limit is reached
            fail_loop_attempts = 0
            while crawl_count < len(inbounds) and crawl_count < max_urls - 1:
                inbound = inbounds[crawl_count]
                if inbound not in crawled and inbound not in crash_pages:
                    try:
                        with timeout(seconds=20):
                            # crawl next page and obtain new inbound/outbound urls
                            this.PrintNote('scraping: ' + 'http://' + inbound)

                            response = this.JSCrawl('http://' + inbound)
                            crawl_page = CrawlPage(response)
                            new_inbounds = crawl_page.GetInboundURLs()
                            # for each of the new_inbounds, set their depth level if less than currently stored (or not yet stored)
                            for new_inbound in new_inbounds:
                                if new_inbound not in depth_levels:
                                    depth_levels[new_inbound] = depth_levels[
                                        inbound] + 1
                            # merge results
                            inbounds = inbounds + list(
                                set(new_inbounds) - set(inbounds))
                            # then continue
                            crawled.append(crawl_page)
                            crawl_count = crawl_count + 1
                    except Exception as ex:
                        this.PrintError('EXCEPTION: ' + str(ex))
                        crash_pages.append(inbound)
                        crawl_count = crawl_count + 1
                else:
                    fail_loop_attempts += 1  # aborts loop after 10000 tries; indicating infinite loop
                    if fail_loop_attempts > 10000:
                        this.PrintError(
                            'INFINITE LOOP detected; aborting scrape')
                        break

            # 1.1.3 calculate scores
            # use quadratic equation (to give numbers with low pages higher scores)
            # equation: y = -2x^2 + 1 ... (y=0) = 0.707
            # with 25 pages, score is 0.75, so first half pages give 1/4 drop-down in score
            number_of_pages_raw = len(inbounds)
            if number_of_pages_raw > 0:
                number_of_pages = -2 * (number_of_pages_raw /
                                        (50 / 0.707106781))**2 + 1
                number_of_pages = max(number_of_pages, 0.0)
            this.PrintUpdate('number of pages: ' + str(number_of_pages_raw))

            # - 1.2. URL type
            # how to determine its url type? difficult/impossible to determine programmaticaly
            url_type_raw = landing_page.URLType()
            if url_type_raw == 2:
                url_type = 0.0
            else:
                url_type = 1.0
            this.PrintUpdate('url type: ' + str(url_type_raw))

            # - 1.3. Average depth level
            # take previously retrieved depth levels and take average
            average_depth_level = 0.0
            average_depth_level_raw = 0.0
            for depth_url in depth_levels:
                average_depth_level_raw = average_depth_level_raw + depth_levels[
                    depth_url]
            # calculate score: take linear value between 1.0 and 3.0
            average_depth_level_raw = average_depth_level_raw / len(
                depth_levels)
            if average_depth_level_raw <= 1.0:
                average_depth_level = 1.0
            else:
                average_depth_level = max(
                    1.0 - ((average_depth_level_raw - 1.0) / 2.0), 0.0)
            this.PrintUpdate('average depth level: ' +
                             str(average_depth_level_raw))

            # - 1.4. Average URL length
            average_url_length = -1.0
            average_url_length_raw = -1.0
            for page in inbounds:  # use inbounds, not pages crawled as they give much more results
                average_url_length_raw = average_url_length_raw + len(page)
            # calculate score: interpolate linearly from lowest occurence to highest Booter occurence
            average_url_length_raw = average_url_length_raw / len(inbounds)
            if average_url_length_raw <= 15:
                average_url_length = 1.0
            else:
                average_url_length = max(
                    1.0 - ((average_url_length_raw - 15) / 15), 0.0)
            this.PrintUpdate('average url length: ' +
                             str(average_url_length_raw))

            ### 2. content-based characteristics
            this.PrintDivider()
            this.PrintUpdate('obtaining content-based characteristics')
            this.PrintDivider()

            # get whois information
            # "Each part represents the response from a specific WHOIS server. Because the WHOIS doesn't force WHOIS
            # servers to follow a unique response layout, each server needs its own dedicated parser."
            domain_age = -1.0
            domain_age_raw = -1.0
            domain_reservation_duration = -1.0
            domain_reservation_duration_raw = -1.0
            try:
                with timeout(seconds=10):
                    whois = pythonwhois.get_whois(
                        landing_page.GetTopDomain(),
                        False)  # http://cryto.net/pythonwhois/usage.html
            except Exception as ex:
                this.PrintError('EXCEPTION: get WHOIS data: ' + str(ex))
            try:
                # - 2.1. Domain age
                current_date = datetime.datetime.today()
                date_registered = whois['creation_date'][0]
                domain_age_raw = (current_date - date_registered).days
                # calculate score: linear interpolation between current_date and first occurence of
                # booter in data: 2011
                days_since_first = (current_date -
                                    datetime.datetime(2011, 10, 28)).days
                domain_age = max(1.0 - (domain_age_raw / days_since_first),
                                 0.0)
                this.PrintUpdate('domain age: ' + str(domain_age_raw))
            except Exception as ex:
                this.PrintError(
                    'EXCEPTION: whois keywords, likely registrar: ' + str(ex))

            try:
                # - 2.2 Domain reservation duration
                current_date = datetime.datetime.today()
                expire_date = whois['expiration_date'][0]
                domain_reservation_duration_raw = (expire_date -
                                                   current_date).days
                # calculate score: between 1 - 2 years; < 1 year = 1.0
                if domain_reservation_duration_raw < 183:
                    domain_reservation_duration = 1.0
                else:
                    # domain_reservation_duration = max(1.0 - ((domain_reservation_duration_raw - 365) / 365), 0.0)
                    domain_reservation_duration = max(
                        1.0 - (domain_reservation_duration_raw - 183) / 182,
                        0.0)
                this.PrintUpdate('domain reservation duration: ' +
                                 str(domain_reservation_duration_raw))
            except Exception as ex:
                this.PrintError(
                    'EXCEPTION: whois keywords, likely registrar: ' + str(ex))

            # - 2.3. WHOIS private
            # there doesn't exist a private WHOIS field, but private information can be obtained through
            # heuristics using common phrases found by privacy-replacing registry information.
            try:
                private_phrases = [
                    'whoisguard',
                    'whoisprotect',
                    'domainsbyproxy',
                    # 'whoisprivacyprotect', # are caught by privacy term anyways
                    'protecteddomainservices',
                    # 'myprivacy',
                    # 'whoisprivacycorp',
                    # 'privacyprotect',
                    'namecheap',
                    'privacy',
                    'private',
                ]
                whois_private = 0.0
                reg_name = whois['contacts']['registrant']['name'].lower()
                reg_email = whois['contacts']['registrant']['email'].lower()
                for phrase in private_phrases:
                    if phrase in reg_name or phrase in reg_email:  # or phrase in reg_org :
                        whois_private = 1.0
                        break
            except Exception as ex:
                this.PrintError(
                    'EXCEPTION: whois keyfields, private set to -1.0: ' +
                    str(ex))
                whois_private = -1.0
            this.PrintUpdate('WHOIS private: ' + str(whois_private))

            # - 2.4. DPS
            # similar to whois private, use heuristics to determine whether website uses DPS,
            # first we try to determine whether it uses DNS based DPS by checking nameservers
            try:
                with timeout(seconds=10):
                    dps_names = [
                        'cloudflare',
                        'incapsula',
                        'prolexic',
                        'akamai',
                        'verisign',
                        'blazingfast',
                    ]
                    dps = 0.0
                    if 'nameservers' in whois:
                        for nameserver in whois['nameservers']:
                            if dps == 0.0:
                                for dps_name in dps_names:
                                    if dps_name in nameserver.lower():
                                        dps = 1.0
                                        break
                    # if nothing found from nameservers, also check redirection history if dps redirect page was used
                    if dps < 0.5:
                        response_text = this.Session.post(
                            URL.Full_URL,
                            headers=this.Header,
                            allow_redirects=False).text
                        this.PrintNote(
                            'No DPS detected from NS; checking re-direction history'
                        )
                        for dps_name in dps_names:
                            if dps_name in response_text:
                                dps = 1.0
            except Exception as ex:
                this.PrintError('EXCEPTION: dps set to -1.0: ' + str(ex))
                dps = -1.0
            this.PrintUpdate('DPS: ' + str(dps))

            # - 2.5. Page rank
            try:
                url = 'http://data.alexa.com/data?cli=10&dat=s&url=' + URL.Hostname
                response = this.Session.get(url)
                tree = etree.XML(response.text.encode('utf-8'))
                page_rank_raw = tree.xpath('(//REACH)/@RANK')[0]
                page_rank = 0.0
                if int(
                        page_rank_raw
                ) > 200000:  # determined from highest booter (ipstresser.com - vdos-s.com) minus offset
                    page_rank = 1.0
            except Exception as ex:
                page_rank_raw = 25426978.0  # set to highest occuring page rank (lower than that if non-existent)
                page_rank = 1.0

            this.PrintUpdate('Page rank: ' + str(page_rank_raw))

            ### 3. host-based characteristics
            this.PrintDivider()
            this.PrintUpdate('obtaining host-based characteristics')
            this.PrintDivider()

            # - 3.1. Average content size
            average_content_size = 0.0
            average_content_size_raw = 0.0
            crawl_contents = []
            for crawl_page in crawled:
                crawl_content = crawl_page.GetContent()
                crawl_contents.append(crawl_content)
                average_content_size_raw = average_content_size_raw + len(
                    crawl_content)
            average_content_size_raw = average_content_size_raw / len(crawled)
            # calculte score: linear interpolation between 50 - (avg_max_booter = 250)
            if average_content_size_raw < 50:
                average_content_size = 1.0
            else:
                average_content_size = max(
                    1.0 - (average_content_size_raw - 50) / 200, 0.0)
            this.PrintUpdate('Average content size: ' +
                             str(average_content_size_raw))

            # - 3.2. Outbound hyperlinks
            outbound_hyperlinks = 0.0
            outbound_hyperlinks_raw = 0.0
            for crawl_page in crawled:
                outbound_hyperlinks_raw = outbound_hyperlinks_raw + len(
                    crawl_page.GetOutboundURLs())
            outbound_hyperlinks_raw = outbound_hyperlinks_raw / len(crawled)
            # calculate score: linear interpolation between 0 and 2
            outbound_hyperlinks = max(1.0 - outbound_hyperlinks_raw / 2.0, 0.0)
            this.PrintUpdate('Average outbound hyperlinks: ' +
                             str(outbound_hyperlinks_raw))

            # - 3.3. Category-specific dictionary
            dictionary = [
                'stress', 'booter', 'ddos', 'powerful', 'resolver', 'price'
            ]  # or pric, so we can also get items like pricing
            category_specific_dictionary = 0.0
            category_specific_dictionary_raw = 0.0
            words = landing_page.GetContent()
            for item in dictionary:
                for word in words:
                    if item in word.lower():
                        category_specific_dictionary_raw = category_specific_dictionary_raw + 1
            # - now calculate percentage of these words occuring relative to total page content
            if len(words) > 0:
                category_specific_dictionary_raw = category_specific_dictionary_raw / len(
                    words)
            else:
                category_specific_dictionary_raw = 0.0
            # calculate score: interpolate between 0.01 and 0.05
            category_specific_dictionary = max(
                1.0 - (category_specific_dictionary_raw - 0.01) / 0.04), 0.0
            this.PrintUpdate('Category specific dictionary: ' +
                             str(category_specific_dictionary_raw))

            # - 3.4. Resolver indication (only the landing page); perhaps extend to all pages in future version?
            resolver_indication = 0.0
            dictionary = ['skype', 'xbox', 'resolve', 'cloudflare']
            for item in dictionary:
                for word in words:
                    if item in word.lower():
                        resolver_indication = 1.0
            this.PrintUpdate('Resolver indication: ' +
                             str(resolver_indication))

            # - 3.5. Terms of Services page
            terms_of_services_page = 0.0
            # - check if one of the urls contains tos or terms and service
            for url in inbounds:
                url = url.lower()
                if '/tos' in url or 'terms' in url and 'service' in url:
                    terms_of_services_page = 1.0
            # - if not yet found, also check for content hints in all the pages
            tos_phrases = [
                'terms and conditions',
                'purposes intended',
                'you are responsible',
                'we have the right',
                'terms of service',
                'understand and agree',
            ]
            if terms_of_services_page < 0.5:
                for content in crawl_contents:
                    text = ' '.join(content).lower()
                    for phrase in tos_phrases:
                        if phrase in text:
                            terms_of_services_page = 1.0
                            break
                    if terms_of_services_page > 0.5:
                        break
            this.PrintUpdate('Terms of services page: ' +
                             str(terms_of_services_page))

            # - 3.6. Login-form depth level
            # this does also take into account register forms, but that's generally expected
            # to be on the same level as login forms so not an issue
            login_form_depth_level = -1.0
            login_form_depth_level_raw = 3.0  # set to max found in dataset if non-existent
            forms_urls = []
            for page in crawled:
                if page.HasLoginForm():
                    page_url = page.URL.Hostname + page.URL.Path + page.URL.Query
                    if page_url[len(page_url) - 1] == '/':
                        page_url = page_url[:-1]
                    forms_urls.append(page_url)
            min_depth = 100
            for url in forms_urls:
                for depth_url in depth_levels:
                    if depth_url == url:
                        if depth_levels[url] < min_depth:
                            min_depth = depth_levels[url]
                        break
            if min_depth != 100:
                login_form_depth_level_raw = min_depth
            # transform to score (if depth level exceeds 2, score becomes 0)
            login_form_depth_level = min(max(1.0 - min_depth * 0.5, 0.0), 1.0)
            this.PrintUpdate('Login-form depth level: ' +
                             str(login_form_depth_level_raw))

            ### 4. Now save the results into the database
            Crawler.crawler_api.storage.SaveScore(
                'scores', URL,
                datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                number_of_pages, url_type, average_depth_level,
                average_url_length, domain_age, domain_reservation_duration,
                whois_private, dps, page_rank, average_content_size,
                outbound_hyperlinks, category_specific_dictionary,
                resolver_indication, terms_of_services_page,
                login_form_depth_level)
            # also store raw feature data for analysis
            Crawler.crawler_api.storage.SaveScore(
                'characteristics', URL,
                datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                number_of_pages_raw, url_type_raw, average_depth_level_raw,
                average_url_length_raw, domain_age_raw,
                domain_reservation_duration_raw, whois_private, dps,
                page_rank_raw, average_content_size_raw,
                outbound_hyperlinks_raw, category_specific_dictionary_raw,
                resolver_indication, terms_of_services_page,
                login_form_depth_level_raw)
            this.Sleep()

        except Exception as ex:
            this.PrintError('EXCEPTION: Scrape failed: connection host ' +
                            str(ex))
Exemple #47
0
def chk_whois(domain):
    #   *** .ca is not working, check more ***
    email = ns = createdate = expirationdate = updateddate = registrar = registrant = tel = ''
    w = dict(email=email,
             registrar=registrar,
             registrant=registrant,
             tel=tel,
             ns=ns,
             createdate=createdate,
             expirationdate=expirationdate,
             updateddate=updateddate)

    if chk_domain(domain):
        hostname, secondLD = chk_hostname(domain)
        try:
            #   find whois for second-level domain
            msg = 'parsing whois data of: %s ... ' % (secondLD)
            #logging.info(msg)
            ans = pythonwhois.get_whois(secondLD, True)
        except:
            return w
        if ans['contacts'].has_key('admin'):
            if ans['contacts']['admin'] is not None:
                if ans['contacts']['admin'].has_key('email'):
                    if ans['contacts']['admin']['email'] is not None:
                        email = ans['contacts']['admin']['email']
        if ans['contacts'].has_key('registrant'):
            if ans['contacts']['registrant'] is not None:
                if ans['contacts']['registrant'].has_key('name'):
                    if ans['contacts']['registrant']['name'] is not None:
                        registrant = ans['contacts']['registrant']['name']
                if ans['contacts']['registrant'].has_key('phone'):
                    if ans['contacts']['registrant']['phone'] is not None:
                        tel = ans['contacts']['registrant']['phone']
        if ans.has_key('registrar'):
            if ans['registrar'] is not None:
                registrar = ans['registrar']
            if type(ans['registrar']) is list:
                registrar = ans['registrar'][0]
        if ans.has_key('nameservers'):
            if ans['nameservers'] is not None:
                ns = ans['nameservers'][0]
        if ans.has_key('creation_date'):
            if ans['creation_date'] is not None:
                createdate = ans['creation_date']
        if ans.has_key('expiration_date'):
            if ans['expiration_date'] is not None:
                expirationdate = ans['expiration_date']
        if ans.has_key('updated_date'):
            if ans['updated_date'] is not None:
                updateddate = ans['updated_date']
        if createdate == '' and updateddate != '':
            createdate = updateddate
        w = dict(email=email,
                 registrar=registrar.encode('utf-8'),
                 registrant=registrant.encode('utf-8'),
                 tel=tel,
                 ns=ns,
                 createdate=createdate,
                 expirationdate=expirationdate,
                 updateddate=updateddate)
    else:
        msg = '[*] no whois record: %s ...' % (domain)
        logging.info(msg)
    return w
Exemple #48
0
 def do_websiteinfo(self, line):
     url = line
     domains = [url]
     for dom in domains:
         details = pythonwhois.get_whois(dom)
         print details['contacts']['registrant']
import csv
import pythonwhois

sites_to_check = open("data.csv")
exampleReader = csv.reader(sites_to_check)
exampleReader = list(exampleReader)
outputFile = open('output.csv', 'w', newline='')
outputWriter = csv.writer(outputFile)

new_list = []

for domain in exampleReader[0:3000]:
    dict = pythonwhois.get_whois(domain[1])
    try:
        country = dict['contacts']['registrant']['country']
        temp_list = [domain[1], country]
        new_list = new_list + [temp_list]
        outputWriter.writerow(temp_list)
    except:
        pass

outputFile.close()
Exemple #50
0
     print(colored('MX Record: ', 'yellow'))
     print(data)
 print('\n')
 for data in ns_record:
     print(colored('NS Record: ', 'yellow'))
     print(data)
 print('\n')
 for data in txt_record:
     print(colored('TXT Record: ', 'yellow'))
     print(data)
 print('\n')
 print(colored('[*] Getting Whois Information:', 'yellow', attrs=['bold']))
 print('\n')
 print(colored('Whois Information: ', 'yellow'))
 print('\n')
 info = pythonwhois.get_whois(target)
 pprint(info['raw'])
 print('\n')
 print(colored('IP Whois Information: ', 'yellow'))
 obj = IPWhois(host)
 results = obj.lookup_rdap(depth=1)
 print('\n')
 print('Name: ' + results['network']['name'])
 print('CIDR Range: ' + results['network']['cidr'])
 print('\n')
 print(colored('[*] Reverse DNS Search: ', 'yellow', attrs=['bold']))
 print('\n')
 request = 'https://api.hackertarget.com/reversedns/?q=' + target
 r = requests.get(request)
 print(r.text)
 print('\n')
#from flaskapp import pythonwhois as pywhois
import datetime


#*******************************************************************************
# FUNCTIONS
#*******************************************************************************
def in_unix(input):
    start = datetime.datetime(year=1970, month=1, day=1)
    diff = input - start
    return diff.total_seconds()


#*******************************************************************************
# MAIN
#*******************************************************************************
if __name__ == '__main__':

    if len(sys.argv) != 2:
        print "\nERROR: enter a domain name!\n"
    else:
        domain = sys.argv[1]
        whois_domain = pythonwhois.get_whois(domain)  #dictionary

        if 'creation_date' and 'expiration_date' in whois_domain.keys():
            print str(in_unix(whois_domain['creation_date'][0])) + ";" + str(
                in_unix(whois_domain['expiration_date'][0])) + ";" + str(
                    sys.argv[1])
        else:
            print "none;none;", sys.argv[1]
sys.exit(0)
Exemple #53
0
import pythonwhois
import sys

def banner():
    print("* open-whois: 0.1.0")
    print("* Christopher Blake ([email protected]), 2016")
    print("* https://github.com/cj13579/open-whois")  
    print(" ")

if len(sys.argv) < 2:
    banner()
    print"Nothing specified. Please specify a site name!"
    quit()

i = str(sys.argv[1])
domain = pythonwhois.get_whois(i)  
banner()
print(domain['raw'][0])
Exemple #54
0
def get_whois_data(url):
    domain = extract_registered_domain
    return pythonwhois.get_whois(domain)
Exemple #55
0
import sys
import pythonwhois
if len(sys.argv) != 2:
    print("[-] usage python PythonWhoisExample.py <domain_name>")
    sys.exit()
print(sys.argv[1])
whois = pythonwhois.get_whois(sys.argv[1])
for key in whois.keys():
    print("[+]%s : %s \n" % (key, whois[key]))

#recupera el servidor raíz para un determinado dominio
whois = pythonwhois.net.get_root_server(sys.argv[1])
print(whois)

#recupera toda la información para un determinado dominio
whois = pythonwhois.net.get_whois_raw(sys.argv[1])
print(whois)
Exemple #56
0
def resolverDNS(dominio):
    try:
        """Consulta sobre registro IPV4"""
        ansA = dns.resolver.query(dominio, 'A')

        #"""Consulta sobre registro IPV6"""
        #ansAAAA = dns.resolver.query(dominio,'AAAA')
        """Consulta sobre registro MailServers"""
        ansMX = dns.resolver.query(dominio, 'MX')
        """Consulta sobre registro NameServers"""
        ansNS = dns.resolver.query(dominio, 'NS')

        print(
            colored("Respuesta de DNS en IPV4: ",
                    'blue',
                    attrs=['bold', 'blink']))
        print(
            colored("==========================",
                    'blue',
                    attrs=['bold', 'blink']))
        #print (ansA.response.to_text())
        for i in ansA:
            print("Nombres  de la IPs:  %s" %
                  colored(i, 'green', attrs=['bold']))
        #print (colored("\nRespuesta de DNS en IPV6: ",'red', attrs=['bold', 'blink']))
        #print (ansAAAA.response.to_text())

        print(
            colored("\nRespuesta de DNS en MailServers: ",
                    'blue',
                    attrs=['bold', 'blink']))
        print(
            colored("=================================",
                    'blue',
                    attrs=['bold', 'blink']))
        for i in ansMX:
            print("Nombres  de los Mailservers:  %s" %
                  colored(i, 'green', attrs=['bold']))

        print(
            colored("\nRespuesta de DNS en NameServers: ",
                    'blue',
                    attrs=['bold', 'blink']))
        print(
            colored("================================",
                    'blue',
                    attrs=['bold', 'blink']))
        for i in ansNS:
            print("Nombres  de los Servers:  %s" %
                  colored(i, 'green', attrs=['bold']))

        datos = pythonwhois.get_whois(dominio)

        print(
            colored("\nDatos obtenidos de  whois: ",
                    'blue',
                    attrs=['bold', 'blink']))
        print(
            colored("================================",
                    'blue',
                    attrs=['bold', 'blink']))
        if "contacts" in datos:
            print("Contactos:")
            print("   Admin:         %s" % colored(
                str(datos['contacts']['admin']), 'green', attrs=['bold']))
            print("   Tech:          %s" % colored(
                str(datos['contacts']['tech']), 'green', attrs=['bold']))
            print("   Registrant:    %s" % colored(
                str(datos['contacts']['registrant']), 'green', attrs=['bold']))
            print("   Billing:       %s" % colored(
                str(datos['contacts']['billing']), 'green', attrs=['bold']))
        if "id" in datos:
            print("Id:               %s" %
                  colored(str(datos['id'][0]), 'green', attrs=['bold']))
        if "emails" in datos:
            print("Emails:           %s" %
                  colored(str(datos['emails']), 'green', attrs=['bold']))
        if "whois server" in datos:
            print("Whois server:     %s" % colored(
                str(datos['whois_server'][0]), 'green', attrs=['bold']))
        if 'creation_date' in datos:
            print("Fecha creacion:   %s" % colored(
                str(datos['creation_date'][0]), 'green', attrs=['bold']))
        if 'expiration_date' in datos:
            print("Fecha expiracion: %s" % colored(
                str(datos['expiration_date'][0]), 'green', attrs=['bold']))
        if 'registrar' in datos:
            print("Registrar:        %s" %
                  colored(str(datos['registrar'][0]), 'green', attrs=['bold']))
        if 'status' in datos:
            print("Status:           %s" %
                  colored(str(datos['status'][0]), 'green', attrs=['bold']))

    except dns.resolver.NoAnswer as e:
        print(
            colored("[-]Se ha producido un error Answer %s" % e,
                    'red',
                    attrs=['bold']))
    except dns.resolver.NXDOMAIN as e:
        print(
            colored("[-]Se ha producido un error NXDomain  %s" % e,
                    'red',
                    attrs=['bold']))
    except dns.resolver.YXDOMAIN as e:
        print(
            colored("[-]Se ha producido un error YXDomain  %s" % e,
                    'red',
                    attrs=['bold']))
    except dns.resolver.NoNameservers as e:
        print(
            colored("[-]Se ha producido un error Domain  %s" % e,
                    'red',
                    attrs=['bold']))
    except dns.resolver.Timeout as e:
        print(
            colored("[-]Se ha producido un error de Timeout  %s" % e,
                    'red',
                    attrs=['bold']))
    except:
        print(
            colored("[-]Se ha producido un error inexperado",
                    'red',
                    attrs=['bold'])
        )  #    print(colored("[-]Lo sentimos error inesperado" ,'red',attrs=['bold']))
        exit()
    for word in words:
        for length in tld_dict:
            tlds = tld_dict[length]
            ext = word[-length:]
            if ext in tlds and length < len(word):
                domain = word[:-length] + '.' + ext
                candidates.append(domain)
    return candidates

candidates = find_candidates()
if len(candidates) > 0:
    print('Found ' + str(len(candidates)) + ' candidate(s):')
    for candidate in candidates:
        print(candidate)
else:
    sys.exit('No candidates found.')

querywhois = input('query whois? (y/n)') == 'y'
if querywhois:
    available = 'Available domains: '
    unavailable = 'Unavailable domains: '
    for candidate in candidates:
        print('querying whois for ' + candidate)
        whois = pythonwhois.get_whois(candidate)
        if 'id' in whois:
            unavailable = unavailable + candidate + ' '
        else:
            available = available + candidate + ' '
    print(available)
    print(unavailable)
Exemple #58
0
def urlfeatureextractor(wholeurl):
    #Trules = comp('./TURL.yar')
    #Frules = comp('./FURL.yar')
    #Drules = comp('./domain.yar')

    #wholeurl = wholeurl.lower()
    if wholeurl.startswith("http://") or wholeurl.startswith(
            "https://"):  # This is how to deal with url with out http title
        surl = shorturl(wholeurl)
    else:
        surl = wholeurl
        wholeurl = "http://" + wholeurl
    '''request = urllib2.Request(wholeurl)
        request.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36")
	try:
            page = urllib2.urlopen(request,timeout=10)
            html = get_page_content(page)
        except Exception as e:
            print (e)
            page = None
            html = ''
            return []'''
    headers = {
        'User-Agent':
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
    }
    try:
        page = requests.get(wholeurl,
                            headers=headers,
                            timeout=10,
                            verify=False)
        html = page.text
    except Exception as e:
        print(e)
        page = None
        html = ''
        return []

    length = len(surl)
    domain, portnum = domain_port_num(wholeurl)
    #if hmark: #hmark is used for which url the input is. if begin with http(s):// mathc Trule, else Frule
    #	rules = Trules
    #else:
    #	rules = Frules
    #dr = Drules

    features = asarray([0] * 30)  #allocation 30 features
    try:
        #matches = rules.match(data=wholeurl)#match rules for url
        #domainmatches = Drules.match(data = domain) #match rules for domain

        #URL section start
        features[0] = ipisnum(wholeurl)  # feature 1 check whether domain is ip
        features[
            1] = 1 if length > 75 else -1 if length < 54 else 0  # feature 2 check length
        length = None

        features[3] = 1 if "@" in surl else -1  #feature 4
        features[4] = 1 if "//" in surl else -1  # feature 5
        features[5] = 1 if "-" in domain else -1  #feature 6
        dotcount = domain.count('.')
        features[
            6] = 1 if dotcount > 3 else 0 if dotcount == 3 else -1  # feature 7
        dotcount = None
        features[7] = -1 if wholeurl[:8] == "https://" else 1  #feature 8

        features[
            9] = -1  #favicondomain(wholeurl,domain,page)#feature 10 favicon domain
        #features.append(1 if "pat" in matches else -1) # have @
        features[10] = -1 if portnum == 80 else 1  #feature 11
        features[11] = 1 if wholeurl.find('https') > 6 else -1  #feature 12
        features[12] = 0
        fw = open(os.path.join(curdir, './data/white.txt'), 'r')
        for line in fw.readlines():
            if line.strip('\n') == domain:
                features[12] = -1  # domain in whitelist
                break
        fw.close()
        #matches = None
        #domainmatches = None

        #surl = None
        #wholeurl = None
        #URL section end
        if html:
            #HTML section Start
            #print(html)
            if features[12] == 0:
                features[12] = 1 if "<form" in html else 0
            features[13] = anchorURL(html)  #feature 14 1.2.2. anchor
            features[14] = MSLtags(
                html, domain)  # links in meta, script and link tags
            formhandle = findall('<form.*(?!>)action="([^ >"]*)".*>', html)
            features[15] = SFH(formhandle)
            features[16] = Formmail(formhandle)
            formhandle = None
            features[2] = urlredirect(wholeurl,
                                      page)  # feature 3 URL redirection
            features[18] = features[
                12]  #1.3.1. Website Forwarding this feature is kind of duplicate with feature 3
            features[19] = onmouseover(html)  #features 20: onmouseover
            features[20] = disableightclick(
                html
            )  #features 21: whether has "event.button==2" in source code
            features[
                21] = 1 if "window.open(" in html else -1  #features 22: whether has popup window
            features[
                22] = 1 if "<iframe" in html else -1  #features 23:whether use iframe
            #print(html)
            html = None
            page = None
        else:
            features[13] = -1
            features[14] = -1

            features[15] = -1
            features[16] = -1
            features[2] = -1
            features[18] = -1
            features[19] = -1
            features[20] = -1
            features[21] = -1
            features[22] = -1

        #HTML section End

        #DNS section
        splitURLSection = domain.split('.')
        try:
            if (splitURLSection > 2):
                url = splitURLSection[-2] + '.' + splitURLSection[-1]
            w = pythonwhois.get_whois(url)
        except Exception as e:
            print(e)
            w = None
        if w and 'status' in w:
            today = datetime.today()
            features[8] = f9whois(
                w,
                today)  #feature 9, use whois system call to decide reg length
            features[17] = identity(
                w, splitURLSection[-2])  #feature 18, whether url in the name
            features[23] = f24whois(
                w, today)  #feature 24, creation date more than 6 months.
            features[24] = features[12]  # have DNS record so -1
        else:
            features[8] = 0  #unkown time
            features[23] = 0  #unkown time
            features[17] = 1  #url not in name
            features[24] = 1  #no DNS record
        today = None
        w = None
        #DNS section over

        #these features are too expensive to process
        features[25] = features[12]  #Website Traffic
        features[26] = features[12]  #PageRank
        features[27] = features[12]  #Google Index
        features[28] = features[12]  #Number of Links Pointing to Page
        features[29] = 0  #Statistical-Reports Based Feature phishtank
        fb = open(os.path.join(curdir, './data/black.txt'), 'r')
        for line in fb.readlines():
            if line.strip('\n') == wholeurl:
                features[29] = 1  # domain in blacklist
                break
        fb.close()
        #print domainmat
        print(features)
    except Exception as e:
        print(e)


#pass
#print(features)
    return features
Exemple #59
0
#!usr/bin/python
# -*- coding: utf-8 -*-

import dns
import dns.resolver
import dns.exception
import dns.query
import pythonwhois
import sys

obj = raw_input("Introduzca el nombre del objetivo: ")

dom = 0
while dom <= 2: 
	domain = pythonwhois.get_whois(obj[dom])
	for key in domain.keys():
        	print "* %s : %s \n" %(key, domain[key])

ansA = dns.resolver.query(obj, 'A')
ansMX = dns.resolver.query(obj, 'MX')
ansNS = dns.resolver.query(obj, 'NS')
ansSOA = dns.resolver.query(obj, 'SOA')
ansTXT = dns.resolver.query(obj, 'TXT')

print "**********************"
 
print "Información sobre IPv4:"
for ans in ansA:
	print "[-]", ans

print "**********************"
Exemple #60
0
def return_whois(query):
    """
    Whois query, accepts IP or FQDN. FQDN is resolved to ip and then queried for ipwhois data.

    :param query: IP or FQDN

    **Example:**

        $ GET ``/api/whois/1-up.xyz``

        ::

            {
              status: "ok",
              whois: {
                contacts: {
                  admin: {
                    city: "Panama",
                    country: "PA",
                    email: "*****@*****.**",
                    fax: "+51.17057182",
                    handle: "C44073503-CNIC",
                    name: "WhoisGuard Protected",
                    organization: "WhoisGuard, Inc.",
                    phone: "+507.8365503",
                    postalcode: "00000",
                    state: "Panama",
                    street: "P.O. Box 0823-03411"
                  },
                  billing: {
                    city: "Panama",
                    country: "PA",
                    email: "*****@*****.**",
                    fax: "+51.17057182",
                    handle: "C44073504-CNIC",
                    name: "WhoisGuard Protected",
                    organization: "WhoisGuard, Inc.",
                    phone: "+507.8365503",
                    postalcode: "00000",
                    state: "Panama",
                    street: "P.O. Box 0823-03411"
                  },
                  registrant: {
                    city: "Panama",
                    country: "PA",
                    email: "*****@*****.**",
                    fax: "+51.17057182",
                    handle: "C44073500-CNIC",
                    name: "WhoisGuard Protected",
                    organization: "WhoisGuard, Inc.",
                    phone: "+507.8365503",
                    postalcode: "00000",
                    state: "Panama",
                    street: "P.O. Box 0823-03411"
                  },
                  tech: {
                    city: "Panama",
                    country: "PA",
                    email: "*****@*****.**",
                    fax: "+51.17057182",
                    handle: "C44073507-CNIC",
                    name: "WhoisGuard Protected",
                    organization: "WhoisGuard, Inc.",
                    phone: "+507.8365503",
                    postalcode: "00000",
                    state: "Panama",
                    street: "P.O. Box 0823-03411"
                  }
                },
                creation_date: [
                  "Thu, 10 Mar 2016 12:13:09 GMT"
                ],
                emails: [
                  "*****@*****.**"
                ],
                expiration_date: [
                  "Fri, 10 Mar 2017 23:59:59 GMT"
                ],
                id: [
                  "D18499062-CNIC"
                ],
                nameservers: [
                  "dns1.registrar-servers.com",
                  "dns2.registrar-servers.com",
                  "dns3.registrar-servers.com",
                  "dns4.registrar-servers.com",
                  "dns5.registrar-servers.com"
                ],
                raw: [
                  "Domain name: 1-up.xyz Registry Domain ID: D18499062-CNIC Registrar WHOIS Server: whois.namecheap.com
                  ...
                  For more information, please see https://registrar-console.centralnic.com/pub/whois_guidance. "
                ],
                registrar: [
                  "NAMECHEAP INC"
                ],
                status: [
                  "clientTransferProhibited",
                  "serverTransferProhibited",
                  "addPeriod"
                ],
                updated_date: [
                  "Thu, 10 Mar 2016 12:13:12 GMT"
                ],
                whois_server: [
                  "whois.namecheap.com"
                ]
              }
            }
    """

    try:
        result = get_whois(query)
    except WhoisException as e:
        return error_response(e, 400)

    if result:
        return jsonify({'whois': result,
                        'status': "ok"})
    else:
        abort(400)