Exemplo n.º 1
0
    def run(self, query, cmd, *args):

        # rip out newlines and gibberish
        query = query.strip()
        # replace the misp-style de-fanging
        query = query.replace("[", "")
        query = query.replace("]", "")

        parsed_uri = urlparse(query)
        # if it's an IP address
        try:
            if parsed_uri.netloc == '' and re.match('^\d{,3}\.\d{,3}\.\d{,3}\.\d{,3}$', query):
                self.logger.debug("whois on ip '{}'".format(query))
                w = whois.whois(query)
            elif parsed_uri.netloc == '' and parsed_uri.path != '':
                self.logger.debug("whois on domain '{}'".format(parsed_uri.path))    
                w = whois.whois(parsed_uri.path)
            else:
                self.logger.debug("whois on domain '{}'".format(parsed_uri.netloc))
                w = whois.whois(parsed_uri.netloc)
        except whois.parser.PywhoisError as e:
            return (False, {'error' : e})

        if w.get('status', False) == False:
            return (False, {'error' : "No result returned"})

        result = clean_dict(w)
        result['text'] = w.text.replace(">>> ", "--- ").replace(" <<<", " ---")
        return (True, result)
 def is_available(self, domain):
     ''' Blindly grabbing PywhoisError isn't ideal but works '''
     try:
         whois(domain)
         return False
     except PywhoisError:
         return True
Exemplo n.º 3
0
def whois_lookup(url):
    try:
        whois.whois(url)
        sys.stderr.write('unavailable domain: %s\n' % (url))
        sys.stderr.flush()
    except Exception:
        print('available domain: %s' % (url))
Exemplo n.º 4
0
    def whois_handler(bot, update, args):
        chat_id = update.message.chat_id
        domain = args[0] if len(args) > 0 else None

        whois_response = whois.whois(domain) if whois.whois(domain) else None
        if whois_response is None:
            bot.sendMessage(chat_id, text="Sorry, I can't retrieve whois information about: {}.".format(domain))
            return
        bot.sendMessage(chat_id, text='Whois: {}'.format(whois_response.text))
Exemplo n.º 5
0
def whois_validate_domain(value):
    """
    Check that this is a domain according to whois
    """
    try:
        whois.whois(value)
    except PywhoisError:
        msg = _("%(domain)s does not seem to be a valid domain name")
        raise ValidationError(msg % {"domain": value})
Exemplo n.º 6
0
def whois_validate_domain(value):
    '''
    Check that this is a domain according to whois
    '''
    try:
        whois.whois(str(value))
    except PywhoisError:
        msg = _('%(domain)s does not seem to be a valid domain name')
        raise ValidationError(msg % {'domain': value})
def whois_lookup(url):
    url_parse = urlparse(url)
    new_url = str(url_parse.scheme) + "://" + str(url_parse.netloc)

    try:
        whois.whois(new_url)
        return 1
    except:
        return -1
Exemplo n.º 8
0
Arquivo: webapp.py Projeto: stef/kopo
def getISP(ip):
    tmp=gi.record_by_addr(ip)
    if ip in torexits:
        return tmp['city'], tmp['country_name'], "TOR"
    if tmp:
        return tmp['city'], tmp['country_name'], whois(ip)[-1]
    tmp=whois(ip)
    if tmp:
        return 'unknown', 'unknown', tmp[-1]
    return 'unknown', 'unknown', ip
Exemplo n.º 9
0
    def check_domain_available(self,domain_name):
        

        # print( dir(whois))
        # domain = "esencia.in"
        w = whois(domain_name)
        t = w.query(False)
        p = parser.Parser(domain_name, t[1], t[0], True)

        try:

            # print( p.text)
            result=p.parse()

            print("######################")
            print(domain_name)

            if "NotFound" not in result:
            
                print ("Registrar:",list(result['Registrar']))
                print ("RegistrantID:",list(result['RegistrantID']))
                print ("CreationDate:",list(result['CreationDate']))
                print ("ExpirationDate",list(result['ExpirationDate']))
                print ("UpdateDate",list(result['UpdatedDate']))
                print ("RegistrantEmail:",list(result['RegistrantEmail']))

        except Exception as e:
            print(str(e))
Exemplo n.º 10
0
def main():
    opts, args = options.parse_args()
    if len(args) < 1:
        options.print_help()
        return
  
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    site=args[0]
    s.connect((args[0], opts.port))
    
    s.send(hello)
    
    while True:
        typ, ver, pay = recvmsg(s)
        if typ == None:
            
            return
        # Look for server hello done message.
        if typ == 22 and ord(pay[0]) == 0x0E:
            break
  
    
    s.send(hb)
    b = hit_hb(s)
    if b == False:
	print site+" is not vulnerable"
    else:
	print site+" is VULNERABLE!"
	w = whois.whois(site)
	for x in range(0, len(w.emails)):
	  sendmail(w.emails[x])  
Exemplo n.º 11
0
def checkargs():
  
  fqdn = sys.argv[1]
  image = sys.argv[2]
  flavor = sys.argv[3]

  try:
    img = cs.images.get(image)
    print "Good news, we found a valid image, continuing.."
  except:
    print "Sorry, your image was not found in the image list. Please try again."
    quit()

  try:
    int(flavor)
  except:
    print "Your flavor input was not numeric. Please try again."
    quit()

  try:
    cs.flavors.get(flavor)
    print "Valid flavor, still continuing"
  except:
    print "Your flavor is out of range. Please try again."
    quit()

  try:
    w = whois.whois(fqdn)
    print "Seems like a valid FQDN, let's keep rolling.."
  except:
    print
    print "This domain isn't a FQDN, please choose one that is. Quitting now."
    quit() 

  create(fqdn, image, img, flavor)
Exemplo n.º 12
0
 def retrieveWHOIShostInfo(self):
     try:
         self.whoisData = whois.whois(self._uri)
     except Exception, error:
         logger = logging.getLogger(self.__class__.__name__)
         logger.error(error)
         pass
Exemplo n.º 13
0
def domainify():
    """Returns a random domain where the last characters form a TLD"""
    results = open('results','r')
    domains = []
    try:
        while True:
            domains.append(pickle.load(results))

    except EOFError:
        while True:
            pick = domains[random.randint(0, (len(domains))-1)]
            print(pick[0])
            definition =  find(pick[0][0])
            if definition:
                results = []
                for (word,tld) in pick:
                    try:
                        domain = word[:len(word)-len(tld)] + '.' + tld
                        if whois.whois(domain)["expiration_date"]:
                            results.append({'domain':domain, 'definition':definition})
                    except (UnboundLocalError, KeyError):
                        pass
                    except whois.parser.PywhoisError:           # this isn't 100% accurate
                        results.append({'domain':domain, 'definition':definition})
                if len(results)>0:
                    return results[random.randint(0, (len(results))-1)]
Exemplo n.º 14
0
 def process(load,h):
     if load:
         if load.count('4vPI')>0 or load.count('IPv4')>0:
             ls=load.split('\r\n')
             ffrom=''
             ip=''
             for l in ls:
                 if l.startswith('From: '):
                     ffrom=l.split(':')[2].split(';')[0].rstrip('>').lstrip(' ')
                 else:
                     ips=''
                     if l.count('4vPI')>0 and l.count('Addrs'[::-1])>0:
                         ips=l.split(':',1)[1][::-1].split(':')[0].split(' ')
                     if l.count('IPv4')>0 and l.count('Addrs')>0:
                         ips=l.split(':',1)[1].split(':')[0].split(' ')
                     if ffrom and ips:
                         for ip in ips:
                             ip=ip.strip(' ')
                             if ip and not (ip.startswith('192.168.') or
                                            ip.startswith('127.') or
                                            ip.startswith('10.') ):
                                 k=ip+'\t'+ffrom.strip(' ')
                                 t=time.localtime(h.ts[0]+h.ts[1]*0.000001)
                                 ts=time.strftime('%Y-%m-%d %X %Z',t)
                                 owner=''
                                 try:
                                     owner=whois.whois(ip)
                                 except:
                                     pass
                                 result.append(k+'\t'+ts+'\t'+owner)
Exemplo n.º 15
0
    def run_whois(self,domain):
        """Perform a WHOIS lookup for the provided target domain. The WHOIS results are returned
        as a dictionary.

        This can fail, usually if the domain is protected by a WHOIS privacy service or the
        registrar has their own WHOIS service.

        Parameters:
        domain      The domain to use for the WHOIS query
        """
        try:
            who = whois.whois(domain)
            results = {}
            # Check if info was returned before proceeding because sometimes records are protected
            if who.registrar:
                results['domain_name'] = who.domain_name
                results['registrar'] = who.registrar
                results['expiration_date'] = who.expiration_date
                results['registrant'] = who.name
                results['org'] = who.org
                results['admin_email'] = who.emails[0]
                results['tech_email'] = who.emails[1]
                results['address'] = "{}, {}{}, {}, {}".format(who.address,who.city,who.zipcode,who.state,who.country)
                results['dnssec'] = who.dnssec
            else:
                click.secho("[*] WHOIS record for {} came back empty. You might try looking at dnsstuff.com.".format(domain),fg="yellow")
            return results
        except Exception as error:
            click.secho("[!] The WHOIS lookup for {} failed!".format(domain),fg="red")
            click.secho("L.. Details: {}".format(error),fg="red")
Exemplo n.º 16
0
 def __get_whois_record(self, domain):
     try:
         record = whois.whois(domain)
     except Exception, e:
         sys.stderr.write('The whois lookup for the domain: ' + domain + ' failed for the following reason:\n\n')
         sys.stderr.write(e + "\n")
         return None
Exemplo n.º 17
0
def FWhoisInfo(domaine):
	print("Information whois")
	#création de l'objet whois
	obj_whois = whois.whois(domaine)
	#Ouverture du fichier de sortie
	Fichier_result_auto = open("result_whois.txt","a")

	Fichier_result_auto.write("--- Nom domaine ---\n")
	Fichier_result_auto.write(obj_whois.domaine_name)

	Fichier_result_auto.write("--- Date création ---\n")
	Fichier_result_auto.write(obj_whois.creation_date)

	Fichier_result_auto.write("--- Register ---\n")
	Fichier_result_auto.write(obj_whois.registrar)

	Fichier_result_auto.write("--- Serveurs ---\n")
	Fichier_result_auto.write(obj_whois.name_servers)

	Fichier_result_auto.write("--- Emails ---\n")
	Fichier_result_auto.write(obj_whois.emails)

	Fichier_result_auto.write("--- Texte---\n")
	Fichier_result_auto.write(obj_whois.text)

	#Fermeture du fichier de sortie
	Fichier_result_auto.close()
Exemplo n.º 18
0
def get_whois(domain):
    global COUNT
    host = random.choice([USE_WHOIS_CMD, random.choice(HOST)])
    logging.info("Retrieving whois of domain '%s' by using '%s'." % (domain, host))
    if COUNT == 10:
        logging.warn("Whoiser thread is sleeping for 2 seconds.")
        time.sleep(2)
        host = USE_WHOIS_CMD
        COUNT = 0
    w = None
    try:
        w = whois.whois(domain, host)
    except Exception as e:
        logging.error("PyWhois Exception: %s" % e)

    if w is None:
        COUNT += 1
        logging.error("FAIL Retrieve whois of domain '%s' fail." % domain)
        return None
    #: 没有邮箱直接判定为查询失败
    elif w.emails is not None:
        logging.debug("SUCCESS Retrieve whois of domain '%s'." % domain)
        return w
    else:
        COUNT += 1
        logging.error("FAIL Retrieve whois of domain '%s' fail." % domain)
        return None
def retWhois(data):
    try:
        w = whois.whois(data)
        return w.text
    except:
        w = ''
        return w
Exemplo n.º 20
0
    def get_whois(self, request, *args, **kwargs):
        self.method_check(request, ['post'])

        data = json.loads(request.body)
        host = data.get('host')
        try:
            domain = whois.whois(host)

            uuid_hex = uuid.uuid3(uuid.NAMESPACE_URL, str(host))
            domain_uuid = base64.b64encode(uuid_hex.get_hex())
            cname_domain = base64.urlsafe_b64encode(host.lower())

            domain.update(uuid=domain_uuid,
                          cname=cname_domain.lower().replace('=', ''),
                          host=host,
                          base=data.get('base'))

            print(domain)
            domain_obj = Domain.objects.filter(domain_name=host).last()

            if domain_obj:
                resp = {
                    "success": False,
                    "status": 418,
                    "message": "We have already generated an SSL certificate for your domain. Login to manage your account.",
                    "data": domain
                }
            else:
                if not data.get('forced'):
                    expiration_date = (lambda d: d.expiration_date if isinstance(
                        d.expiration_date, str) else min(d.expiration_date))(domain)

                    if datetime.now() > expiration_date:
                        resp = {
                            "success": False,
                            "status": 418,
                            "message": "Your Domain name has expired.",
                            "data": domain
                        }
                    else:
                        resp = {
                            "success": True,
                            "message": "Whois record exist.",
                            "data": domain
                        }
                else:
                    resp = {
                        "success": True,
                        "message": "Whois record exist.",
                        "data": domain
                    }
                    
            print(resp, 'here')
            return self.create_response(request, resp, HttpAccepted)

        except Exception, e:
            print(e)
            raise CustomBadRequest(code='whois_error',
                                    message='Whois record verification error. Our developers have been notified.')
Exemplo n.º 21
0
def check(url):
    w = whois.whois(url)
    if (w.status.find('DELEGATED') != -1):
        print 'Domain delegated'
        return {'status': 'Delegated', 'code': 1}
    else:
        print 'Domain not delegated'
        return {'status': 'Not delegated', 'code': 0}
Exemplo n.º 22
0
def domainwhois(entity):
    domain = json.loads(str(whois.whois(entity)))
    for k, v in domain.iteritems():
        if type(v) == list:
            domain[k] = ', '.join(v)
    if 'city' not in domain.keys():
        domain['city'] = 'N/A'
    return domain
Exemplo n.º 23
0
 def whois_data(self, domain):
     try:
         reg_data = whois.whois(domain)
         if reg_data:
             print('\t[+] already owned.')
     except whois.parser.PywhoisError as error:
         print('\t[+] not yet owned.')
         self.buy_it(domain)
Exemplo n.º 24
0
def whoisnew(domain, taskId):
	print "\t\t\t[+] Gathering WhoIs Information...\n"
	try:
		whoisdata = dict(whois.whois(domain))
	except:
		whoisdata = {}		
	save_record(domain, taskId, "WHOIS Information", whoisdata)
	return whoisdata
Exemplo n.º 25
0
def whois_check(word, tld):
    try:
        domain = word[:len(word)-len(tld)] + '.' + tld
        if whois.whois(domain)["expiration_date"]:
            return True
    except (UnboundLocalError, KeyError):
        pass
    except whois.parser.PywhoisError:           # this isn't 100% accurate
            return True
Exemplo n.º 26
0
def dns_list():
	fdesc = open(domains_perm, 'r')
	lines = [line.strip() for line in open(domains_perm)]
	for line in lines:
		print line
		try: w = whois.whois(line)
		except: 
			print "Error with url: " + str(line)
			pass
		url = w.domain_name
		if url == []: continue
		else:
			url = line
			cdate = str(w.creation_date).split(' ')[0]
			if "datetime" in cdate:
				cdate = str(w.creation_date[0]).split(' ')[0]
			else: pass
			if cdate == "[]": cdate = "Not Available"
			else: pass	
			try: udate = str(w.updated_date).split(' ')[0]
			except: 
				try:
					udate = str(w.updated_date).split(' ')[0]
				except:
					udate = "Not Available"
			if "datetime" in udate:
				udate = str(w.updated_date[0]).split(' ')[0]
			else: pass
			if "['" in udate:
				udate = str(w.updated_date[0]).split(' ')[0]
			else: pass
			if udate == "[]": udate = "Not Available"
			else: pass
			try:
				email = str(w.registrant_name)
				if "[" in email:
					email = w.registrant_name[0]
				else: pass
			except:
				try:
					email_list = [w.emails]
					email = email_list[0][0]
					if "@" in email:
						pass
					else:
						email = email_list[0]
				except:
					try:
						email = w.registrant
						print "Trying registrant"
					except:
						email = "Not Available"
			if email == "[]": email = "Not Available"
			else: pass
			writer.writerow([url,cdate,udate,email])
	myfile.close()
Exemplo n.º 27
0
def get_administrative_emails_from_whois(domain_name):
    """ For a given domain, get an administrative email list based on emails
        obtained by a Whois Lookup of the domain
    """
    administrative_emails = []
    whois_data = whois.whois(str(domain_name))
    if whois_data and hasattr(whois_data, 'emails') and whois_data.emails is not None:
        administrative_emails = whois_data.emails
        administrative_emails = list(set(administrative_emails))
    return administrative_emails
Exemplo n.º 28
0
    def do_whois():
        """perf whois"""
        deferred = whois.whois(address)
        def print_result(result):
            """ Print result of whois """
            runner.results["whois"].setdefault(address, {})
            runner.results["whois"][address] = result
            runner.decrease_counter()

        deferred.addCallback(print_result)
Exemplo n.º 29
0
def howold(strDomain):

    strWhois = whois(strDomain).split('\n')

    if strWhois[0].startswith('Empty response from') or strWhois[0].startswith('Rate limited by '):
        return None

    try:
        for strLine in strWhois:
            if strLine.startswith('Creation Date: '):
                strDate = strLine[15:]
                dateThen = dateutil.parser.parse(strDate)
                dateThen = dateThen.replace(tzinfo=None)
                dateNow  = datetime.now()
                dateDiff = dateNow - dateThen
                return dateDiff.days

        # FR
        for strLine in strWhois:
            strLine = strLine.strip(' ')
            if strLine.startswith('created:     '):
                strDate = strLine[15:]
                dateThen = dateutil.parser.parse(strDate)
                dateThen = dateThen.replace(tzinfo=None)
                dateNow  = datetime.now()
                dateDiff = dateNow - dateThen
                return dateDiff.days
            
        # UK
        for strLine in strWhois:
            strLine = strLine.strip(' ')
            if strLine.startswith('Registered on: '):
                strDate = strLine[15:]
                dateThen = dateutil.parser.parse(strDate)
                dateThen = dateThen.replace(tzinfo=None)
                dateNow  = datetime.now()
                dateDiff = dateNow - dateThen
                return dateDiff.days
                     

        # CN
        for strLine in strWhois:
            strLine = strLine.strip(' ')
            if strLine.startswith('Registration Time: '):
                strDate = strLine[19:]
                dateThen = dateutil.parser.parse(strDate)
                dateThen = dateThen.replace(tzinfo=None)
                dateNow  = datetime.now()
                dateDiff = dateNow - dateThen
                return dateDiff.days
    except ValueError:
        return None

    return None
Exemplo n.º 30
0
 def generate_domains(self):
     i = 0
     for domain in self.domains:
         i = i + 1
         print "checking domain %s %i/%i" % (domain, i, len(self.domains))
         try:
             w = whois.whois(domain)
         except whois.parser.PywhoisError:
             print 'Domain %s is not registered' % domain
             twitter_flag = self.check_twitter_handle(domain)
             self.outfile.write(domain + ' ' + twitter_flag + '\n')
Exemplo n.º 31
0
def result(request):
    #nm=request.GET['url']
    try:
        text = request.GET['url']
        if text.startswith('https://') or text.startswith('http://'):

            if len(text) <= 9:
                return render(request, 'errorpage.html')
            aburl = -1
            digits = "0123456789"
            if text[8] in digits:
                oneval = -1
            else:
                oneval = 1
            if len(text) > 170:
                secval = -1
            else:
                secval = 1
            if "@" in text:
                thirdval = -1
            else:
                thirdval = 1
            k = text.count("//")
            if k > 1:
                fourthval = -1
            else:
                fourthval = 1

            if "-" in text:
                fifthval = -1
            else:
                fifthval = 1
            if "https" in text:
                sixthval = 1
            else:
                sixthval = -1
            temp = text
            temp = temp[6:]
            k1 = temp.count("https")

            if k1 >= 1:
                seventhval = -1
            else:
                seventhval = 1
            if "about:blank" in text:
                eighthval = -1
            else:
                eighthval = 1
            if "mail()" or "mailto:" in text:
                ninthval = -1
            else:
                ninthval = 1
            re = text.count("//")
            if re > 3:
                tenthval = -1
            else:
                tenthval = 1

            import whois
            from datetime import datetime

            url = text

            try:
                res = whois.whois(url)
                try:
                    a = res['creation_date'][0]
                    b = datetime.now()
                    c = b - a
                    d = c.days
                except:
                    a = res['creation_date']
                    b = datetime.now()
                    c = b - a
                    d = c.days
                if d > 365:
                    eleventhval = 1
                else:
                    eleventhval = -1
            except:
                aburl = 1
                eleventhval = -1

            if aburl == 1:
                twelthval = -1
            else:
                twelthval = 1

            filename = 'phish_trainedv0.sav'

            loaded_model = joblib.load(filename)

            arg = loaded_model.predict(([[
                oneval, secval, thirdval, fourthval, fifthval, sixthval,
                seventhval, eighthval, ninthval, tenthval, eleventhval,
                twelthval
            ]]))
            print(arg[0])
            if arg[0] == 1:
                te = "Legitimate"
            else:
                te = "Malicious"
            if arg[0] == 1:
                mal = True
            else:
                mal = False
            from json.encoder import JSONEncoder
            final_entity = {"predicted_argument": [int(arg[0])]}
            # directly called encode method of JSON
            print(JSONEncoder().encode(final_entity))

            return render(
                request, 'result.html', {
                    'result': 'Real-time analysis successfull',
                    'f2': te,
                    'mal': mal,
                    'text': text
                })
        else:
            return render(request, 'errorpage.html')
    except:
        return render(request, 'errorpage.html')
Exemplo n.º 32
0
try:
  import whois
  from dateutil import relativedelta
except ImportError:
  print("Error importing necessary modules, check if they are installed.")
  exit(1)

import datetime
import sys

if len(sys.argv) < 2:
  print("Please supply a domain name")
  exit(1)

domain = sys.argv[1]
whois_info = whois.whois(domain.lower())

if whois_info.creation_date:
  current_date = datetime.datetime.now()
  try:
    created_on = whois_info.creation_date[0]
    diff = relativedelta.relativedelta(current_date, created_on)
  except TypeError:
    print("Error calculating age difference - whois data may be missing or invalid")
    exit(1)

  print("%s was created %s years and %s months ago on %s" % (domain, diff.years, diff.months, created_on.strftime("%Y-%m-%d")))

else:
  print("No creation date returned :(")
  exit(1)
Exemplo n.º 33
0
def printWebWhois(url):
    return whois.whois(url)
Exemplo n.º 34
0
def checkDomainName(name, endString):

    sys.stdout.write(name + "|")

    name = name + endString
    w = whois.whois(name)
Exemplo n.º 35
0
import builtwith
import whois  #ouwer 也可以通过urlib.request.urlopen()来看

print(builtwith.parse("http://baidu.com"))
print(whois.whois("souhu.com"))
Exemplo n.º 36
0
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import whois

f = open('./dom.txt', 'r')
for lines in f.readlines():
    try:
        print(lines)
        data = whois.whois(lines.strip())
        #       print(data) #去除注释输入完整whois信息
        print("注册公司:", data.registrar, "注册人:", data.name, "注册邮箱:", data.emails,
              "注册时间:", data.creation_date, "过期时间:", data.expiration_date)
        print("______________________________")
    except:
        pass
        continue
f.close()
def privacyMetric():
    @after_this_request
    def add_header(response):
        response.headers.add('Access-Control-Allow-Origin', '*')
        return response

    # variable initialise
    userInfo = {}

    # get domain name from url =======
    url = request.form.get("url")
    protocol = url.split(':')[0]
    urlInfo = tldextract.extract(url)
    domain = urlInfo.domain + '.' + urlInfo.suffix

    if protocol == "chrome-extension":
        return jsonify({
            'privacyScore': 0,
            'reasonForPrivacyScore': "This webpage is completely safe.",
            "websiteType": "privacyProtection"
        })

    print("protocol: ", protocol)

    # get data from request
    userInfo['domainVisitCount'] = int(request.form.get("domainVisitCount"))

    # get user profile
    userProfile = request.form.get("userProfile")
    userInfo['userProfile'] = json.loads(userProfile)
    print("UserProfile: ", request.form.get("userProfile"))

    # initialising privacyScore Variable
    privacyScore = 0

    # flags
    calledWhois = False

    if domain not in ['localhost.']:
        # get user location from userLocation ======
        userLocationLat = request.form.get("userLocationLat")
        userLocationLong = request.form.get("userLocationLong")
        print(userLocationLat)
        print(userLocationLong)

        g = geocoder.osm([userLocationLat, userLocationLong], method='reverse')
        geocoderTriedNum = 0
        while g is None or geocoderTriedNum < 5:
            time.sleep(2)
            g = geocoder.osm([userLocationLat, userLocationLong],
                             method='reverse')
            geocoderTriedNum += 1
        print(g.json['country'])
        userInfo['websitevisitedcountry'] = g.json['country']
        # check user's country is present in the dbpedia
        if dbpedia.IsInfoInDBPedia(userInfo['websitevisitedcountry']):
            userInfo['websitevisitedcountry'] = userInfo[
                'websitevisitedcountry']

        print(domain)

        # check domain is present in the our graph
        objectIsPresent = blazegraph.checkForSubject(domain)
        comp_info_score = []

        # if not present, add info to that graph
        isPresentInDBPedia = False
        # get domain information using alexa api
        if objectIsPresent == False:
            comp_info = alexa.alexa(domain)

            comp_info = list(comp_info)
            # check if the website creation date is present
            if comp_info[7] == 'NaN':
                # get expiration date using whois
                websiteInfoFromWhoIs = whois.whois(domain)
                print("websiteInfoFromWhoIs:@: ", websiteInfoFromWhoIs)
                calledWhois = True
                if isinstance(websiteInfoFromWhoIs.creation_date, list):
                    print("websiteDate1:")
                    comp_info[7] = datetime.strftime(
                        websiteInfoFromWhoIs.creation_date[1],
                        "%Y-%m-%d %H:%M:%S")
                else:
                    print("websiteDate2:")
                    comp_info[7] = datetime.strftime(
                        websiteInfoFromWhoIs.creation_date,
                        "%Y-%m-%d %H:%M:%S")

            # to add create info into rdf.
            blazegraph.add_companyInfo(comp_info)

            # delete if NaN is present
            blazegraph.deleteNaN()

            # get complete URL and connect with DBPedia
            # check info is present in DBPedia
            comp_info[1] = comp_info[1].replace('/', '')
            comp_info[1] = comp_info[1].replace(' ', '_')
            isPresentInDBPedia = dbpedia.IsInfoInDBPedia(comp_info[1])
            print("isPresentInDBPedia:", isPresentInDBPedia)

            if isPresentInDBPedia:
                print("same")
                # get company name
                #companyTitle = blazegraph.getCompanyName(domain)
                blazegraph.sameAs(domain, comp_info[1])

                # get company location information from dbpedia
                companyLoc = dbpedia.getCompanyLocation(comp_info[1])

                if companyLoc != None:
                    # convert company location into country
                    geoLocator = Nominatim(user_agent="privacyProtection")
                    companyLocForGeoCoder = companyLoc.split('/')[-1]
                    location = geoLocator.geocode(companyLocForGeoCoder)
                    geocoderTriedNum2 = 0
                    while location is None or geocoderTriedNum2 < 5:
                        time.sleep(2)
                        location = geoLocator.geocode(companyLocForGeoCoder)
                        geocoderTriedNum2 += 1
                    companyLoc = location.raw['display_name'].split(" ")[-1]
                    print("location country 222", companyLoc)

            if isPresentInDBPedia == False or companyLoc == None:
                # get website domain reg. location using whois
                if calledWhois == False:
                    # get expiration location using whois
                    websiteInfoFromWhoIs = whois.whois(domain)

                # websiteDomainCity = websiteInfoFromWhoIs.city
                # if websiteDomainCity != None:
                #     print("Company location in app @1@: ", websiteInfoFromWhoIs)
                #     companyLoc = websiteDomainCity.replace(" ", "_")
                # else:
                websiteDomainCountry = websiteInfoFromWhoIs.country
                companyLoc = pycountry.countries.get(
                    alpha_2=websiteDomainCountry)
                if companyLoc == None:
                    companyLoc = "NaN"
                else:
                    companyLoc = companyLoc.name
                    companyLoc = companyLoc.replace(" ", "_")

            # get company information from dbpedia
            print("Company location in app @@: ", companyLoc)
            comp_info.append(companyLoc)
            blazegraph.addCompanyLocation(domain, comp_info[8])
            print("companyLoc: ", comp_info[8])

            # add website protocol info to calculate privacy score
            comp_info.append(protocol)
            comp_info_score = comp_info
            # --------
        else:
            # get company information from our triple store
            comp_info = blazegraph.getCompanyInfoInFormat(subject_m=domain)
            print("Company's information: ", comp_info)
            comp_info.append(protocol)
            comp_info_score = comp_info

        # get privacy score based on company Info @@to-do send this data to the client
        privacyScore, reasonForPrivacyScore = privacyMetrics.calculatePrivacyScore(
            comp_info, userInfo)
        print("comp_info[4]", comp_info[4])
        if comp_info[4] is not None and comp_info[4] is not "NaN":
            websiteType = comp_info[4].split('/')[0]
        else:
            websiteType = "others"

        print("privacyRiskScore :", privacyScore)
        print("reasonForPrivacyScore :", reasonForPrivacyScore)
        print("websiteType :", websiteType)

    return jsonify({
        'privacyRiskScore': privacyScore,
        'reasonForPrivacyScore': reasonForPrivacyScore,
        "websiteType": websiteType
    })
def result(request):
    text=request.GET['url'].lower().strip()

    import requests
    from lxml import etree
    try:
        temporary=requests.get(text).status_code
    except:
        temporary=404    
    if temporary==200:
        online_stat="Website is currently ONLINE" 
        try:
            from StringIO import StringIO
        except:
            from io import StringIO   

        parser=etree.HTMLParser()
        html=requests.get(text).text
        tree=etree.parse(StringIO(html),parser)
        title=tree.xpath("//title/text()")
        tittle=title


    else:
        online_stat="Website is currently OFFLINE or temporarily overloaded"    
        tittle="Not determined as URL is OFFLINE or temporarily overloaded"

    #print (online_stat,tittle)    
    try:
    
        #nm=request.GET['url']
        import tldextract
        do=tldextract.extract(text).domain
        sdo=tldextract.extract(text).subdomain
        suf=tldextract.extract(text).suffix
        
        if not text.startswith('http://') and not text.startswith('https://'):
            return render(request,"404.html")
        
        if (text.startswith('https://www.google.com/search?q=')==False ):

            if text.startswith('https://') or text.startswith('http://'):
                var13="Not Applicable"
                varab="Not Applicable"
                var11="Not Applicable"
                var10="Not Applicable"
                var5="Not Applicable"
                var4="Not Applicable"
                var3="Not Applicable"

                if len(text)<=9:
                    return render(request,'errorpage.html')
                aburl=-1
                digits="0123456789"
                if text[8] in digits:
                    oneval=-1
                else:
                    oneval=1    
                if len(text)>170:
                    secval=-1
                else:
                    secval=1  
                if "@" in text:
                    thirdval=-1
                    var3="'@' detected"
                else:
                    thirdval=1       
                    var3="No '@' detected"
                k=text.count("//")          
                if k>1:
                    fourthval=-1
                    var4="More Redirects"
                else:
                    fourthval=1
                    
                if "-" in do or "-" in sdo:
                    fifthval=-1
                    var5="Prefix-Suffix detected"
                else:
                    fifthval=1 
                    var5="No Prefix-Suffix detected"     

                if "https" in text:
                    sixthval=1
                else:
                    sixthval=-1
                temp=text
                temp=temp[6:]
                k1=temp.count("https")

                if k1 >=1:
                    seventhval=-1
                else:
                    seventhval=1
                if "about:blank" in text:
                    eighthval=-1
                else:
                    eighthval=1
                if "mail()" or "mailto:" in text:
                    ninthval=-1
                else:
                    ninthval=1
                re=text.count("//")          
                if re>3:
                    tenthval=-1
                    var10="redirects more than 2"
                else:
                    tenthval=1    
                    var10=f"{re-1} redirects detected"

                import whois
                from datetime import datetime

                url=text
                #code replaced whois
                # 
                """try:"""
                d=0
                try:
                    res=whois.whois(url)
                    cpyres=res
                except:
                    print("getaddrerrror DNE")
                    d=-1
                    name="Not found in WHOIS database"
                    org="Not found in WHOIS database"
                    add="Not found in WHOIS database"
                    city="Not found in WHOIS database"
                    state="Not found in WHOIS database"
                    ziip="Not found in WHOIS database"
                    country="Not found in WHOIS database"
                    emails="Not found in WHOIS database"
                    dom="Not Found in WHOIS database"
                    registrar="Not Found in WHOIS database"
                if d!=-1:    
                    try:
                        if len(res.creation_date)>1:
                            a=res['creation_date'][0]
                            b=datetime.now()
                            c=b-a
                            d=c.days
                    except:
                        a=res['creation_date']
                        b=datetime.now()
                        c=b-a
                        d=c.days
                """except:
                    print("getaddrerrror DNE")
                    d=0"""


                

                if d>365:
                    eleventhval=1
                    aburl=1
                    var11=f"Domain age is {d} days"
                elif d<=365:
                    eleventhval=-1
                    aburl=-1
                    var11=f"Domain age working less than a year, {d} days"
        
        



                if aburl==-1:
                    twelthval=-1
                    varab="Abnormal URL detected"
                else:
                    twelthval=1 
                    varab="Website Registered on WHOIS Database"

                #print (twelthval,eleventhval,aburl,d)    
                import urllib.request, sys, re
                import xmltodict, json

                try:
                    xml = urllib.request.urlopen('http://data.alexa.com/data?cli=10&dat=s&url={}'.format(text)).read()

                    result= xmltodict.parse(xml)

                    data = json.dumps(result).replace("@","")
                    data_tojson = json.loads(data)
                    url = data_tojson["ALEXA"]["SD"][1]["POPULARITY"]["URL"]
                    rank= int(data_tojson["ALEXA"]["SD"][1]["POPULARITY"]["TEXT"])
                    #print ("rank",rank)
                    if rank<=150000:
                        thirt=1
                    else:
                        thirt=-1
                        var13=f"Ranked {rank} in Alexa Database, Larger index in alexa database detected!!"
                    #print (thirt)    
                except:
                    thirt=-1 
                    rank=-1
                    ##############var13="Larger index in alexa database"
                    var13="Not indexed in alexa database"
                    #print (rank)                  



                filename = 'phish_trainedv7mud0.001.sav'

                loaded_model = joblib.load(filename)

                arg=loaded_model.predict(([[oneval,secval,thirdval,fourthval,fifthval,seventhval,eighthval,ninthval,tenthval,eleventhval,twelthval,thirt]]))
                array_score=[oneval,secval,thirdval,fourthval,fifthval,seventhval,eighthval,ninthval,tenthval,eleventhval,twelthval,thirt]
            
                safety_scores=((array_score.count(1)/11)*100)
                safety_score=str(safety_scores) + " %"
                #print (arg[0])
                import whois
                url=text
                #print (res)
                #res=whois.whois(url)
                if (d!=-1):
                    name=res.domain_name
                    #print (res.domain_name)
                    org=res.org
                    #print (res.org)
                    add=res.address
                    #print (res.address)
                    city=res.city
                    #print (res.city)
                    state=res.state
                    #print (res.state)
                    ziip=res.zipcode
                    #print (res.zipcode)
                    country=res.country
                    #print (res.country)
                    emails=res.emails
                    #print (res.emails)
                    dom=res.domain_name
                    #print (res.domain_name)   
                    registrar=res.registrar             
                else:
                    name="Not found in database"
                    org="Not found in database"
                    add="Not found in database"
                    city="Not found in database"
                    state="Not found in database"
                    ziip="Not found in database"
                    country="Not found in database"
                    emails="Not found in database"
                    dom="Not Found"
                    registrar="Not Found"

                
                    

                if aburl==-1 and rank==-1 :
                    arg[0]=-1
                    #phishing

                if arg[0]==1:
                    te="Legitimate"
                else:
                    te="Malicious"  
                if arg[0] == 1:
                    mal = True
                else:
                    mal = False      

                #print (name,org,add,city,state,ziip,country,emails,dom)


                from json.encoder import JSONEncoder
                final_entity = { "predicted_argument": [int(arg[0])]}
                # directly called encode method of JSON
                #print (JSONEncoder().encode(final_entity)) 
                domage=str(d)+' '+'days'
                redir=k-1

                if isinstance(cpyres.domain_name,str)==True:
                    d=cpyres.domain_name
                elif isinstance(cpyres.domain_name,list)==True:
                    d=cpyres.domain_name[0]   


                #print (d)
                try:
                    ip=socket.gethostbyname_ex(d)
                    ipadd=(ip[2][0])
                    
                    g=geocoder.ip(ipadd)
                    ipcity=g.city
                    
                    ipstate=g.state
                    
                    ipcountry=g.country
                
                    iplatitude=g.latlng[0]
                    
                    iplongitude=g.latlng[1]
                    
                except:
                    ipadd="Not Found"
                    #print (ipadd)
                    
                    ipcity="Not Found"
                    #print (city)
                    ipstate="Not Found"
                    #print (state)
                    ipcountry="Not Found"
                    #print (country)
                    iplatitude="Not Found"
                    #print (g.latlng)
                    iplongitude="Not Found"
                    #print (latitude)
                    #print (longitude)
                '''print (ipadd)
                print (ipcity)
                print (ipstate)
                print (ipcountry)
                print (iplatitude)
                print (iplongitude)
    '''

                if text.startswith('https://mudvfinalradar.eu-gb.cf.appdomain.cloud/'):
                    mal=True
                    te="Legitimate"

                obj = Url()
                obj.result = te 
                #print (dom,rank)
                        
                tags = [name,org,state,add,city,ziip,country,emails,dom,rank,domage,varab,redir,var3,var5]

                tags = list(filter(lambda x: x!="Not Found",tags))
                tags.append(text)
                obj.link = text
                obj.add = add
                obj.state = state
                obj.city = city
                
                #obj.ziip = res['zip_code']
                obj.country = country 
                obj.emails = emails
                obj.dom = dom
                obj.org = org
                obj.rank = rank
                obj.registrar=registrar
                obj.domage=domage
                obj.varab=varab
                obj.redir=redir
                obj.var3=var3
                obj.var5=var5
                obj.ipadd=ipadd
                obj.ipcity=ipcity
                obj.ipstate=ipstate
                obj.ipcountry=ipcountry
                obj.iplatitude=iplatitude
                obj.iplongitude=iplongitude

                obj.save()
                nm=name
                oor=org
                em=emails
                #print (add)
                if add!=None:
                    if add and len (add)==1:
                        add=add.replace(",","")
                    elif len(add)>1:
                        add="".join(add)
                    #print (add)     
                
                name="".join(name)
                #print (name)
                if emails!=None:
                    emails="".join(emails)
                if org!=None:    
                    org=str(org).replace(",","")
                #print (org)
                '''print (dom)'''
                dom="".join(dom)
                #print (dom)
                if registrar:
                    registrar=registrar.replace(",","")
                #print (registrar)
                #print (emails)
                #print(city)
                import datetime

                if text.startswith('https://mudvfinalradar.eu-gb.cf.appdomain.cloud/'):
                    mal=True
                    te="Legitimate"
            



                import csv
                with open ('static//dataset.csv','a',encoding="utf-8") as res:        
                    writer=csv.writer(res)           
                    s="{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(te,str(name).replace(",",''),
                        str(org).replace(",",''),
                        str(add).replace(",",''),
                        str(city).replace(",",''),
                        str(state).replace(",",''),
                        str(ziip).replace(",",''),
                        str(country).replace(",",''),str(emails).replace(",",''),
                        str(dom).replace(",",''),rank,str(registrar).replace(",",''),str(datetime.datetime.now()))
                    res.write(s)


                url_organisations=str(org).replace(",",'')
                #print(url_organisations)
                url_address=str(add).replace(",",'')
                #print(url_address)
                url_city=str(city).replace(",",'')
                #print(url_city)
                url_state=str(state).replace(",",'')
                #print(url_state)
                url_zip=str(ziip).replace(",",'')
                #print(url_zip)
                url_country=str(country).replace(",",'')
                #print(url_country)
                url_email=str(emails).replace(",",'')
                #print(url_email)
                url_domain=str(dom).replace(",",'')
                #print(url_domain)
                    #rank
                #print(rank)
                url_registrar=str(registrar).replace(",",'')
                #print(url_registrar)
                date=str(datetime.datetime.now())
                #print(date)
                    

                    ##cloudant
                    #using both Authentication method "IBM Cloud Identity and Access Management (IAM)"
                from cloudant.client import Cloudant
                from cloudant.error import CloudantException
                from cloudant.result import Result, ResultByKey
                    
                  
                client = Cloudant("username", "key", url="key")

                client.connect()
                    #created database name "URL"
                database_name = "url_history"
                my_database = client.create_database(database_name)
                    #check connection between cloudant and application
                if my_database.exists():
                    print(f"'{database_name}' successfully created.")
                else:
                    print("connection failed")

                    #store data in json and push to cloudant
                import json
                json_document = {
                    "URL": str(text),
                    "Property": str(te),
                    "Name": str(name),
                    "Organisation": str(url_organisations),
                    "Address": str(url_address),
                    "City": str(url_city),
                    "State": str(url_state),
                    "Zipcode": str(url_zip),
                    "Country": str(url_country),
                    "Domain": str(url_domain),
                    "Alexa Rank": str(rank),
                    "Registrar": str(url_registrar),
                    "E-mails": str(url_email),
                    "time": str(date)}
                    
                new_document = my_database.create_document(json_document)
                ##csv read                
                
                if text.startswith('https://mudvfinalradar.eu-gb.cf.appdomain.cloud/'):
                    mal=True
            
                return render(request,'result.html',{'result':'Real-time analysis successfull','f2':te,'safety_score':safety_score,'safety_scores':safety_scores,'mal': mal,'text':text,'name':nm,
                        'org':oor,
                        'add':add,
                        'city':city,
                        'state':state,
                        'ziip':ziip,
                        'country':country,'emails':em,
                        'dom':d,'rank':rank,'registrar':registrar,"tags":tags,"var13":var13,"varab":varab,"var11":var11,"var10":var10,"var5":var5,"var4":var4,"var3":var3,"ipadd":ipadd,'ipcity':ipcity,'ipstate':ipstate,'ipcountry':ipcountry,'iplatitude':iplatitude,'iplongitude':iplongitude,'online_stat':online_stat,'tittle':tittle})



        else:
            return render(request,'404.html')  
    except:
        return render(request,'404.html')  
        #website DNE or feature extraction cannot be completed
        '''return render(request,'result.html',{'result':'Real-time analysis successfull','f2':'Legtimate','mal': True,'text':text,'name':"NA",
def api(request):
    text=request.GET['query'].lower().strip()
    try:
        
        import datetime

        if text.startswith('https://mudvfinalradar.eu-gb.cf.appdomain.cloud/'):
            import datetime
            mydict = {
                "query" : text,
                "malware" : False,
                "datetime" : str(datetime.datetime.now())
            }
            response = JsonResponse(mydict)
            return response   

        if text.startswith('https://www.google.com/search?q='):
            import datetime
            mydict = {
                "query" : text,
                "malware" : False,
                "datetime" : str(datetime.datetime.now())
            }
            response = JsonResponse(mydict)
            return response    


        #if (text.startswith('https://www.google.com/search?q=')==False) :

        else:
        
            if text.startswith('https://') or text.startswith('http://'):
                import tldextract
                do=tldextract.extract(text).domain
                sdo=tldextract.extract(text).subdomain
                suf=tldextract.extract(text).suffix

                if len(text)<=9:
                    return render(request,'errorpage.html')
                aburl=-1
                digits="0123456789"
                if text[8] in digits:
                    oneval=-1
                else:
                    oneval=1    
                if len(text)>170:
                    secval=-1
                else:
                    secval=1  
                if "@" in text:
                    thirdval=-1
                else:
                    thirdval=1    
                k=text.count("//")          
                if k>1:
                    fourthval=-1
                else:
                    fourthval=1
                    
                if "-" in do or "-" in sdo:
                    fifthval=-1
                else:
                    fifthval=1         
                if "https" in text:
                    sixthval=1
                else:
                    sixthval=-1
                temp=text
                temp=temp[6:]
                k1=temp.count("https")

                if k1 >=1:
                    seventhval=-1
                else:
                    seventhval=1
                if "about:blank" in text:
                    eighthval=-1
                else:
                    eighthval=1
                if "mail()" or "mailto:" in text:
                    ninthval=-1
                else:
                    ninthval=1
                re=text.count("//")          
                if re>3:
                    tenthval=-1
                else:
                    tenthval=1    

                import whois
                from datetime import datetime

                url=text

                d=0
                try:
                    res=whois.whois(url)
                except:
                    #print("getaddrerrror DNE")
                    d=-1
                    name="Not found in database"
                    org="Not found in database"
                    add="Not found in database"
                    city="Not found in database"
                    state="Not found in database"
                    ziip="Not found in database"
                    country="Not found in database"
                    emails="Not found in database"
                    dom="Not Found"
                if d!=-1:    
                    try:
                        if len(res.creation_date)>1:
                            a=res['creation_date'][0]
                            b=datetime.now()
                            c=b-a
                            d=c.days
                    except:
                        a=res['creation_date']
                        b=datetime.now()
                        c=b-a
                        d=c.days
                """except:
                    print("getaddrerrror DNE")
                    d=0"""


                

                if d>365:
                    eleventhval=1
                    aburl=1
                elif d<=365:
                    eleventhval=-1
                    aburl=-1
                    var11="Domain age working less than a year"
        
     



                if aburl==-1:
                    twelthval=-1
                else:
                    twelthval=1                 
                import urllib.request, sys, re
                import xmltodict, json
                rank=-1
                try:
                    xml = urllib.request.urlopen('http://data.alexa.com/data?cli=10&dat=s&url={}'.format(text)).read()

                    result= xmltodict.parse(xml)

                    data = json.dumps(result).replace("@","")
                    data_tojson = json.loads(data)
                    url = data_tojson["ALEXA"]["SD"][1]["POPULARITY"]["URL"]
                    rank= int(data_tojson["ALEXA"]["SD"][1]["POPULARITY"]["TEXT"])
                    #print ("rank",rank)
                    if rank<=150000:
                        thirt=1
                    else:
                        thirt=-1
                    #print (thirt)    
                except:
                    thirt=-1 
                    rank=-1
                    #rank="Not Indexed by Alexa"
                    #print (rank)                  




                filename = 'phish_trainedv7mud0.001.sav'

                loaded_model = joblib.load(filename)

                arg=loaded_model.predict(([[oneval,secval,thirdval,fourthval,fifthval,seventhval,eighthval,ninthval,tenthval,eleventhval,twelthval,thirt]]))
                #print (arg[0])
                import whois
                url=text
                
                #print (res)
                if (d!=-1):
                    name=res.domain_name
                    #print (res.domain_name)
                    org=res.org
                    #print (res.org)
                    add=res.address
                    #print (res.address)
                    city=res.city
                    #print (res.city)
                    state=res.state
                    #print (res.state)
                    ziip=res.zipcode
                    #print (res.zipcode)
                    country=res.country
                    #print (res.country)
                    emails=res.emails
                    #print (res.emails)
                    dom=res.domain_name
                    #print (res.domain_name)                
                else:
                    name="Not found in database"
                    org="Not found in database"
                    add="Not found in database"
                    city="Not found in database"
                    state="Not found in database"
                    ziip="Not found in database"
                    country="Not found in database"
                    emails="Not found in database"
                    dom="Not Found"

                
                    

                if aburl==-1 and rank==-1 :
                    arg[0]=-1
                    #phishing

                if arg[0]==1:
                    te="Legitimate"
                else:
                    te="Malicious"  
                if arg[0] == 1:
                    mal = True
                else:
                    mal = False      


                if arg[0] == 1:
                    malstatus = False
                else:
                    malstatus = True                 
                from json.encoder import JSONEncoder
                final_entity = { "predicted_argument": [int(arg[0])]}

            import datetime
            mydict = {
                "query" : url,
                "malware" : malstatus,
                "datetime" : str(datetime.datetime.now())
            }
            response = JsonResponse(mydict)
            return response

                

    except:
        text=request.GET['query']
        import datetime
        mydict = {
            "query" : text,
            "malware" : False,
            "datetime" : str(datetime.datetime.now())
        }
        response = JsonResponse(mydict)
        return response  
Exemplo n.º 40
0
def WhoisLocation(url):
    """finds the location of a url with packages like pythonwhois and whois
    NOTE: the url must be bar_bones, otherwise the whois wont work ex(https://www.nike.com/ won't work it needs to be nike.com) """
    location = []
    location_str_list = []
    try:  # first try this
        #trying with pythonwhois to see if the location exists
        obj = pythonwhois.get_whois(url)
        for key in obj['contacts']['registrant']:
            location.append(obj['contacts']['registrant'][key])
        #turn the list into a string with a space between them
        str = ' '.join(location)
        location_str_list.append(str)
    except TypeError:
        pass
    except Exception:  # dealing wit hthe rest of the socket errros and whois.parser.PywhoisError
        pass
    if len(location_str_list) == 0:
        try:  # if the first try doesn't work try this
            w = whois.whois(url)
            #if any of these are none we get a TypeError so to get rid of that we check if they are none or not to deal wit hthat issue
            if whois.whois(url)["address"] == None:
                pass
            else:
                location.append(whois.whois(url)["address"])
            if whois.whois(url)["city"] == None:
                pass
            else:
                location.append(whois.whois(url)["city"])
            if whois.whois(url)["state"] == None:
                pass
            else:
                location.append(whois.whois(url)["state"])
            if whois.whois(url)["zipcode"] == None:
                pass
            else:
                location.append(whois.whois(url)["zipcode"])
            if whois.whois(url)["country"] == None:
                pass
            else:
                location.append(whois.whois(url)["country"])

            #turn the list into a string
            str = ' '.join(location)
            location_str_list.append(str)
        except Exception:  # dealing with socket errors
            pass
        except KeyError:
            pass
        except TypeError:  # there were maybe multiple addresses and my code coudln't figure that out
            print("multiple")
            pass
        except whois.parser.PywhoisError:
            pass
    return (location_str_list)
Exemplo n.º 41
0
#!/usr/bin/python2

import whois

x = raw_input("enter anything")

w = whois.whois(x)

print(w)
Exemplo n.º 42
0
 def do_GET(self):
     self.send_response(200)
     self.send_header('Content-type','text/plain')
     self.end_headers()
     if self.server.args.verbose: self.server.safe_print(self.path)
     (ignore, ignore, urlpath, urlparams, ignore) = urlparse.urlsplit(self.path)
     cmdstr = tgtstr = None
     if re.search("[\/](?:created|alexa|domain)[\/].*?", urlpath):
         cmdstr = re.search(r"[\/](created|alexa|domain)[\/].*$", urlpath)
         tgtstr = re.search(r"[\/](created|alexa|domain)[\/](.*)$", urlpath)
         if not cmdstr or not tgtstr:
             api_hlp = 'API Documentation\nhttp://%s:%s/cmd/tgt cmd = domain or alexa tgt = domain name' % (self.server.server_address[0], self.server.server_address[1])
             self.wfile.write(api_hlp.encode("latin-1"))
             return
         params = {}
         params["cmd"] = cmdstr.group(1)
         params["tgt"] = tgtstr.group(2)
     else:
         cmdstr=re.search("cmd=(?:domain|alexa|created)",urlparams)
         tgtstr =  re.search("tgt=",urlparams)
         if not cmdstr or not tgtstr:
             api_hlp = 'API Documentation\nhttp://%s:%s/cmd/tgt cmd = domain or alexa tgt = domain name' % (self.server.server_address[0], self.server.server_address[1])
             self.wfile.write(api_hlp.encode("latin-1"))
             return
         params={}
         try:
             for prm in urlparams.split("&"):
                 key,value = prm.split("=")
                 params[key]=value
         except:
             self.wfile.write('Unable to parse the url.'.encode('latin-1'))
             return
     if params["cmd"] == "alexa":
         if self.server.args.verbose: self.server.safe_print ("Alexa Query:", params["tgt"])
         if not self.server.alexa:
             if self.server.args.verbose: self.server.safe_print ("No Alexa data loaded. Restart program.")
             self.wfile.write("Alexa not loaded on server. Restart server with -a or --alexa and file path.".encode("latin-1"))
         else:
             if self.server.args.verbose: self.server.safe_print ("Alexa queried for:%s" % (params['tgt']))              
             self.wfile.write(str(self.server.alexa.get(params["tgt"],"0")).encode("latin-1"))
     elif params["cmd"] == "domain":
         fields=[]
         if "/" in params['tgt']:
             fields = params['tgt'].split("/")
             params['tgt'] = fields[-1]
             fields = fields[:-1]
         if params['tgt'] in self.server.cache:
             if self.server.args.verbose: self.server.safe_print("Found in cache!!")
             domain_info = self.server.cache.get(params['tgt'])
             #If whois told us it doesnt exist previously then return cached response.  Dont update time so this times out at cache interval.
             if domain_info.get('status','NOT FOUND') == "NOT FOUND":
                 self.wfile.write(str("No whois record for %s" % (params['tgt'])).encode("latin-1"))
                 return 
             #Update the time on the domain so frequently queried domains stay in cache.
             domain_info["time"] = time.time()
             try:
                 self.server.cache_lock.acquire()
                 self.server.cache[params['tgt']] = domain_info
             finally:
                 self.server.cache_lock.release()
         else:
             #Look it up on the web
             try:
                 if self.server.args.verbose: self.server.safe_print ("Querying the web", params['tgt'])
                 domain_info = whois.whois(params['tgt'])
                 if not domain_info.get('creation_date'):
                     self.wfile.write(str("No whois record for %s" % (params['tgt'])).encode("latin-1"))
                     return
             except Exception as e:
                 print(e)
                 if "no match for" in str(e).lower():
                     domain_info={'domain_name': params['tgt'], 'time': time.time(),'status':"NOT FOUND"}
                 else:
                     self.server.safe_print ("Error querying whois server: %s" % (str(e)))     
                     return
             #Put it in the cache
             self.server.safe_print("Caching whois record %s" % (domain_info.get("domain_name","incomplete record")))
             domain_info["time"] = time.time()
             if self.server.alexa:
                 domain_info['alexa'] = self.server.alexa.get(params["tgt"],"0")
             try:
                 self.server.cache_lock.acquire()
                 self.server.cache[params['tgt']] = domain_info
             finally:
                 self.server.cache_lock.release()
         if not fields:
             dinfo = pformat(domain_info)
             self.wfile.write(dinfo.encode("latin-1"))
         else:
             if self.server.args.verbose: self.server.safe_print("processing fields %s" % (fields))
             if domain_info.get('status','') == "NOT FOUND":
                 self.wfile.write(str("No whois record for %s" % (params['tgt'])).encode("latin-1"))
                 return
             for fld in fields:
                 #We only pull one value if multiple values exist unless the field name ends with an * or --all was on cli
                 retrieve_all = self.server.args.all
                 if fld.endswith("*"):
                     fld = fld[:-1]
                     retrieve_all = True
                 fld_value = domain_info.get(fld,"no field named %s found" % (fld))
                 if (not retrieve_all) and type(fld_value)==list:
                     fld_value = fld_value[0]
                 self.wfile.write(str(fld_value).encode("latin-1")+b"; ")              
     return
Exemplo n.º 43
0
# -*- coding: utf-8 -*-
"""使用python-whois"""
import whois
print(whois.whois('www.baidu.com'))
Exemplo n.º 44
0
def check_domain(domain_name, expiration_days, cost, interval_time=None, current_domain=0):
    """
    Check domain
    :param domain_name: string
    :param expiration_days: integer
    :param cost: float
    :param interval_time: integer
    :param current_domain: integer
    :return: False - Error, True - Successfully
    """
    global NAMESPACE
    global EXPIRES_DOMAIN
    global ERRORS_DOMAIN
    global ERRORS2_DOMAIN
    global G_DOMAINS_TOTAL

    is_internal_error = False
    if not interval_time:
        interval_time = NAMESPACE.interval_time

    if NAMESPACE.use_only_external_whois:
        expiration_date, registrar, whois_server, error = make_whois_query(
            domain_name)
    else:
        expiration_date = None
        registrar = None
        whois_server = None
        error = None

        try:
            w = whois.whois(domain_name)
        except Exception:
            is_internal_error = True
            error = 1

        if not is_internal_error:
            expiration_date = w.get("expiration_date")
            registrar = w.get("registrar")
            whois_server = w.get("whois_server")
        else:
            if NAMESPACE.use_extra_external_whois:
                expiration_date_e, registrar_e, whois_server_e, error = make_whois_query(
                    domain_name)
                if error:
                    if error == 1:
                        if domain_name not in ERRORS_DOMAIN:
                            ERRORS_DOMAIN.append(str(domain_name).lower())
                    elif error == 2:
                        # Exceeded the limit on whois
                        if domain_name not in ERRORS2_DOMAIN:
                            ERRORS2_DOMAIN.append(str(domain_name).lower())

                if not expiration_date:
                    expiration_date = expiration_date_e
                if not registrar:
                    registrar = registrar_e
                if not whois_server:
                    whois_server = whois_server_e
            else:
                if domain_name not in ERRORS_DOMAIN:
                    ERRORS_DOMAIN.append(str(domain_name).lower())
                print_domain(domain_name, None, None, None, -1, -1, cost, current_domain, error)  # Error
                if current_domain < G_DOMAINS_TOTAL:
                    if interval_time:
                        if NAMESPACE.print_to_console:
                            print(f"\tWait {interval_time} sec...\r", end="")
                        time.sleep(interval_time)
                return False

    if (not whois_server) and (not registrar) and (not expiration_date):
        print_domain(domain_name, whois_server, registrar,
                        expiration_date, -2, -1, cost, current_domain, error)  # Free ?
        if current_domain < G_DOMAINS_TOTAL:
            if interval_time:
                if NAMESPACE.print_to_console:
                    print(f"\tWait {interval_time} sec...\r", end="")
                time.sleep(interval_time)
        return False

    if not expiration_date:
        print_domain(domain_name, whois_server, registrar,
                        expiration_date, -1, -1, cost, current_domain, error)  # Error
        if current_domain < G_DOMAINS_TOTAL:
            if interval_time:
                if NAMESPACE.print_to_console:
                    print(f"\tWait {interval_time} sec...\r", end="")
                time.sleep(interval_time)
        return False

    if 'datetime.datetime' in str(type(expiration_date)):
        expiration_date_min = expiration_date
    else:
        expiration_date_min = max(expiration_date)

    days_remaining = calculate_expiration_days(expiration_date_min)

    print_domain(domain_name, whois_server, registrar, expiration_date_min, days_remaining,
                    expiration_days, cost, current_domain, error)

    if days_remaining < expiration_days:
        EXPIRES_DOMAIN[str(domain_name).lower()] = days_remaining

    return True
Exemplo n.º 45
0
def get_domain_expiration_date(domain_name):
    who_is = whois.whois(domain_name)
    if not who_is:
        return None
    exp_date = who_is["expiration_date"]
    return exp_date[0] if isinstance(exp_date, list) else exp_date
Exemplo n.º 46
0
def Domain_whois(domain):
    return whois.whois(domain)
Exemplo n.º 47
0
def category3(website):

    file_obj = open(r'phishing5.txt', 'a')
    file_obj.closed
    try:
        match_web = re.search('(.[\w]+[-`]*.com)', website)
        if match_web:
            domain = match_web.group(1)

        page = urllib.request.urlopen(website)
        soup = BeautifulSoup(page, "html.parser")
        #a = soup.findAll('div')
        dict_anchor = {'hashvalue': 0, 'content': 0, 'jvalue': 0, 'anyo': 0}
        dict_url = {'phishing': 0, 'legitimate': 0}
        #print a
        for data in soup.findAll('a'):
            #10 Request_URL checking
            if data.get('href'):
                match_url = re.search('^http', str(data.get('href')))
                #data_URL = str(data.get('href'))
                if match_url:  #start with https and new address
                    if 'phishing' in dict_url:
                        dict_url['phishing'] += 1
                    else:
                        dict_url['phishing'] = 0

                else:
                    if 'legitimate' in dict_url:
                        dict_url['legitimate'] += 1
                    else:
                        dict_url['legitimate'] = 0
                #print "yeah legitimate website"
            else:
                if 'legitimate' in dict_url:
                    dict_url['legitimate'] += 1
                else:
                    dict_url['legitimate'] = 0

            #11 URL_OF_ANCHOR checking
            if data.get('href'):
                match = re.search('^#$', str(data.get('href')))
                if match:
                    if 'hashvalue' in dict_anchor:
                        dict_anchor['hashvalue'] += 1
                    else:
                        dict_anchor['hashvalue'] = 0
                match2 = re.search('^#[a-zA-Z0-9]+$', str(data.get('href')),
                                   re.I)
                if match2:
                    if 'content' in dict_anchor:
                        dict_anchor['content'] += 1
                    else:
                        dict_anchor['content'] = 0
                match3 = re.search('^JavaScript::void(0)',
                                   str(data.get('href')), re.I)
                if match3:
                    if 'jvalue' in dict_anchor:
                        dict_anchor['jvalue'] += 1
                    else:
                        dict_anchor['jvalue'] = 0
                match4 = re.search('[0-9a-zA-Z/]+', str(data.get('href')),
                                   re.I)
                if match4:
                    if 'anyo' in dict_anchor:
                        dict_anchor['anyo'] += 1
                    else:
                        dict_anchor['anyo'] = 0

            #print data.get('href')
        #remove print statment before final binding
        #print dict_anchor,"\n",dict_url

        phishing = dict_anchor['hashvalue'] + dict_anchor[
            'content'] + dict_anchor['jvalue']
        total = dict_anchor['hashvalue'] + dict_anchor[
            'content'] + dict_anchor['jvalue'] + dict_anchor['anyo']
        if total != 0:
            total_phishing_per = float(phishing * 100 / total)
        else:
            total_phishing_per = None
        total_requested_url = dict_url['phishing'] + dict_url['legitimate']
        if total_requested_url != 0:
            total_phishing_url_per = float(dict_url['phishing'] * 100 /
                                           total_requested_url)
        else:
            total_phishing_url_per = None

        #12 Links in <Meta>, <Script> and <Link> tags
        meta = soup.findAll('meta')
        script = soup.findAll('script')
        link = soup.findAll('link')
        #print link
        #link = soup.find_all(href=re.compile('domain'))
        dict_MSL = {
            'L_meta': len(meta),
            'P_link': 0,
            'L_script': 0,
            'L_link': 0,
            'P_script': 0
        }
        #print dict_MSL
        #print len(meta),link

        for val in link:
            if val.get('href') != None:
                match_link1 = re.search('^[/#]+', str(val.get('href')))
                if match_link1:
                    if 'L_link' in dict_MSL:
                        dict_MSL['L_link'] += 1
                else:
                    match_link2 = re.search('.css$', str(val.get('href')))
                    if match_link2:
                        if 'L_link' in dict_MSL:
                            dict_MSL['L_link'] += 1
                    else:
                        match_link3 = re.search(
                            '([\w]+.[\w]+[-`a-z0-9A-Z]*.com)',
                            str(val.get('href')))
                        if match_link3:
                            modified_url = re.search(
                                '(.[\w]+[-`a-z0-9A-Z]*.com)',
                                match_link3.group(1))
                            if domain == modified_url.group(1):
                                if 'L_link' in dict_MSL:
                                    dict_MSL['L_link'] += 1
                            else:
                                if 'P_link' in dict_MSL:
                                    dict_MSL['P_link'] += 1
                        else:
                            if 'P_link' in dict_MSL:
                                dict_MSL['P_link'] += 1
        for val in script:
            #print str(val.get('src'))
            if val.get('src') != None:
                #print str(val.get('src'))
                match_script1 = re.search('.js$', str(val.get('src')))
                if match_script1:
                    if 'L_script' in dict_MSL:
                        dict_MSL['L_script'] += 1
                elif re.search('.com$', str(val.get('src'))):
                    match_script2 = re.search('(www.[\w]+[-`\w]*.com$)',
                                              str(val.get('src')))
                    match_script2 = re.search('(.[\w]+[-`a-z0-9A-Z]*.com)',
                                              match_script2.group(1))
                    if match_script2.group(1) == domain:
                        if 'L_script' in dict_MSL:
                            dict_MSL['L_script'] += 1
                    else:
                        if 'P_script' in dict_MSL:
                            dict_MSL['P_script'] += 1
                else:
                    if 'P_script' in dict_MSL:
                        dict_MSL['P_script'] += 1

        #print dict_MSL
        total_MSL = sum(dict_MSL.values())
        P_MSL = dict_MSL['P_script'] + dict_MSL['P_link']
        #print total_MSL,P_MSL
        if total_MSL != 0:
            per_p_msl = float(P_MSL * 100 / total_MSL)
        else:
            per_p_msl = None
        #print per_p_msl ,'%'

        #10 REquest Url write on file : output
        if total_phishing_url_per != None:
            if total_phishing_url_per < 22:
                file_obj.write('1,')
            else:
                file_obj.write('-1,')
        else:
            file_obj.write('-1,')
    except:
        file_obj.write('-1,')
    file_obj.flush()
    #print total

    #print total_phishing_per
    #11URK_of_Anchor write on file : output
    try:
        if total_phishing_per != None:
            if total_phishing_per < 31:
                file_obj.write('1,')
            elif total_phishing_per >= 31 and total_phishing_per < 67 or total_phishing_per != None:
                file_obj.write('0,')
            else:
                file_obj.write('-1,')
        else:
            file_obj.write('-1,')
    except:
        file_obj.write('-1,')
    file_obj.flush()
    #write MSL output on file
    try:
        if per_p_msl != None:
            if per_p_msl < 17:
                file_obj.write('1,')
            elif per_p_msl >= 17 and per_p_msl < 81 or per_p_msl != None:
                file_obj.write('0,')
            else:
                file_obj.write('-1,')
        else:
            file_obj.write('-1,')
    except:
        file_obj.write('-1,')
    file_obj.flush()

    #13 Server Form Handler (SFH)
    try:
        form = soup.find('form')
        #print form
        if form != None:
            #print(form)
            if form.get('action') != None:
                #print ('y')
                match_form = re.search('^[./]+', str(form.get('action')))
                if match_form:
                    file_obj.write('1,')
                elif re.search('^http', str(form.get('action'))):
                    match_form2 = re.search('(www.[a-z0-9A-Z]+[-`]*.com)',
                                            str(form.get('action')))
                    if match_form2:
                        #print match_form2.group(1),domain
                        modified_url = re.search('(.[\w]+[-`a-z0-9A-Z]*.com)',
                                                 match_form2.group(1))
                        #print modified_url.group(1),domain
                        if domain == modified_url.group(1):
                            file_obj.write('1,')
                        else:
                            file_obj.write('0,')
                else:
                    file_obj.write('-1,')
            else:
                file_obj.write('1,')
        else:
            file_obj.write('0,')
    except:
        file_obj.write('-1,')
    file_obj.flush()
    #14 Abnormal URL
    try:
        temp = whois.whois(website)
        if type(temp.domain_name) == list:
            domain_name = temp.domain_name[0].lower()
        elif type(temp.domain_name) == str:
            domain_name = temp.domain_name.lower()
        match_d_name = re.search(domain_name, website)
        if match_d_name:
            file_obj.write('1,')
        else:
            file_obj.write('-1,')
    except:
        file_obj.write('-1,')
    file_obj.flush()

    file_obj.close()
Exemplo n.º 48
0
def Domain_email(domain):
    return whois.whois(domain)["emails"]
Exemplo n.º 49
0
    sys.exit()

total = 1000
tb1 = pt.PrettyTable()
tb1.field_names = ['域名', '到期時間']

with open(file, 'r', encoding='UTF-8') as fs:
    domains = fs.readlines()
    domains = [x.rstrip('\n') for x in domains]

i = 1
pbar = ProgressBar(widgets=widgets, maxval=10 * total).start()
for sname in domains:
    pbar.update(100 * i + 1)
    i = i + 10
    time.sleep(1)
    wd = whois.whois(sname)
    if wd:
        if isinstance(wd.expiration_date, list):
            expDate = wd.expiration_date[1]
            tb1.add_row([sname, str(expDate.strftime('%Y-%m-%d'))])
        else:
            tb1.add_row([sname, (wd.expiration_date).strftime('%Y-%m-%d')])

pbar.finish()
print(tb1)
if query_yes_no('是否要儲存到檔案', 'no'):
    save_file(tb1)

#os.system("pause")
Exemplo n.º 50
0
def Domains_whois(domains):
    res=[]
    for i in domains:
        res.append(whois.whois(i))
    return res
Exemplo n.º 51
0
#!/usr/bin/python3

from whois import whois
import pandas as pd

name_of_domain = input("Enter the name of domain: ")

for line in whois(name_of_domain).text.splitlines():
	if "Registry Expiry Date" in line:
		final_split2 = line.split()[3].split("T")[0]
		print ("Expiry Date %s" %final_split2)
		if (pd.to_datetime(final_split2) - pd.to_datetime('today')).days <= 30:
			print ("Due for renewal")
		else:
			print ("Days to expire: %s" % (pd.to_datetime(final_split2) - pd.to_datetime('today')).days)

for line in whois(name_of_domain).text.splitlines():
	if "Registrant Organization" in line:
		print (line)

for line in whois(name_of_domain).text.splitlines():
	if "Registrant State/Province:" in line:
		print (line)

for line in whois(name_of_domain).text.splitlines():
	if "Registrant Country" in line:
		print (line)			
Exemplo n.º 52
0
def Domains_email(domains):
    res=[]
    for i in domains:
        res.append(whois.whois(i)["emails"])
    return res
Exemplo n.º 53
0
#!/usr/bin/python

import whois
from datetime import datetime
from sys import argv, exit

now = datetime.now()

if len(argv) < 2:
    print 'No domain specified on the command line, usage:  '
    print ''
    print '    ./check-domain.py example.net'
    exit(1)

domain = argv[1]
w = whois.whois(domain)

if (w.expiration_date and w.status) == None:
    print 'The domain does not exist, exiting...'
    exit(1)

if type(w.expiration_date) == list:
    w.expiration_date = w.expiration_date[0]
else:
    w.expiration_date = w.expiration_date

domain_expiration_date = str(w.expiration_date.day) + '/' + str(
    w.expiration_date.month) + '/' + str(w.expiration_date.year)

timedelta = w.expiration_date - now
days_to_expire = timedelta.days
Exemplo n.º 54
0
    def do_GET(self):
        """Respond to a GET request."""

        try:
            if self.path.endswith("/"):
                self.output_file(curdir + sep + "index.html", 'text/html')
                return
            elif self.path.endswith(".html"):
                self.output_file(curdir + sep + self.path, 'text/html')
                return
            elif self.path.endswith(".css"):
                self.output_file(curdir + sep + self.path, 'text/css')
                return
            elif self.path.endswith(".js"):
                self.output_file(curdir + sep + self.path, 'application/javascript')
                return
            elif self.path.endswith(".map"):
                self.output_file(curdir + sep + self.path, 'application/json')
                return
            elif self.path.endswith(".ico"):
                self.output_file(curdir + sep + self.path, 'image/x-icon')
                return
            elif self.path.endswith(".png") and self.path.find("..") != 0:
                f = open(curdir + sep + self.path, "rb") 
                self.send_response(200)

                month = timedelta(days=30)
                futuredate = date.today() + month
                self.send_header('Expires', futuredate.strftime('%a, %d %b %Y %H:%M:%S GMT'))
                self.send_header('Content-type', 'image/png')
                self.end_headers()
                self.wfile.write(f.read())
                f.close()
                return
            # v2 REST API - get geo for an IPv4
            elif "geov4.ncc" in self.path:
                lastSlash = self.path.rfind("/")
                strIP = self.path[lastSlash + 1:]
                strIMG = _hostinfo.getGeoImagebyIPv4new(strIP)

                f = open(curdir + sep + strIMG, "rb") 
                self.send_response(200)

                month = timedelta(days=30)
                futuredate = date.today() + month
                self.send_header('Expires', futuredate.strftime('%a, %d %b %Y %H:%M:%S GMT'))
                self.send_header('Content-type', 'image/png')
                self.end_headers()
                self.wfile.write(f.read())
                f.close()

            # v2 REST API - get geo for an IPv6
            elif "geov6.ncc" in self.path:
                lastSlash = self.path.rfind("/")
                strIP = self.path[lastSlash + 1:]
                strIMG = _hostinfo.getGeoImagebyIPv6new(strIP)

                f = open(curdir + sep + strIMG, "rb") 
                self.send_response(200)

                month = timedelta(days=30)
                futuredate = date.today() + month
                self.send_header('Expires', futuredate.strftime('%a, %d %b %Y %H:%M:%S GMT'))
                self.send_header('Content-type', 'image/png')
                self.end_headers()
                self.wfile.write(f.read())
                f.close()

            # v2 REST API - get whois for domain
            elif "whois.ncc" in self.path:
                lastSlash = self.path.rfind("/")
                strDomain = urllib.parse.unquote(self.path[lastSlash + 1:])
                
                self.send_response(200)
                self.send_header("Content-type", "text/html")
                self.end_headers()
                self.output(whois(strDomain))
                
            else:
                self.send_error(404, '[!] File Not Found: %s' % self.path)

        except IOError:
            self.send_error(404, '[!] File Not Found: %s' % self.path)
        except:
            pass
Exemplo n.º 55
0
def getWhois(url):
    try:
        domain = whois.whois(url)
        print(domain)
    except:
        print("Domain does not exist")
def get_domain_expiration_date(url):
    expiration_site_date = whois.whois(url).expiration_date
    if isinstance(expiration_site_date, list):
        return expiration_site_date[0]
    else:
        return expiration_site_date
Exemplo n.º 57
0
    def handleEvent(self, event):
        eventName = event.eventType
        srcModuleName = event.module
        eventData = event.data

        if eventData in self.results:
            return None
        else:
            self.results[eventData] = True

        self.sf.debug("Received event, " + eventName + ", from " +
                      srcModuleName)

        try:
            data = None
            if eventName != "NETBLOCK_OWNER":
                whoisdata = whois.whois(eventData)
                if whoisdata:
                    data = whoisdata.text
            else:
                qry = eventData.split("/")[0]
                ip = IPAddress(qry) + 1
                self.sf.debug("Querying for IP ownership of " + str(ip))
                r = ipwhois.IPWhois(ip)
                whoisdata = r.lookup_rdap(depth=1)
                if whoisdata:
                    data = str(whoisdata)
            if not data:
                self.sf.error(
                    "Unable to perform WHOIS on " + eventData + ": " + str(e),
                    False)
                return None
        except BaseException as e:
            self.sf.error(
                "Unable to perform WHOIS on " + eventData + ": " + str(e),
                False)
            return None

        # This is likely to be an error about being throttled rather than real data
        if len(data) < 250:
            self.sf.error("Throttling from Whois is probably happening.",
                          False)
            return None

        if eventName.startswith("DOMAIN_NAME"):
            typ = "DOMAIN_WHOIS"
        if eventName.startswith("NETBLOCK"):
            typ = "NETBLOCK_WHOIS"
        if eventName.startswith("AFFILIATE_DOMAIN"):
            typ = "AFFILIATE_DOMAIN_WHOIS"
        if eventName.startswith("CO_HOSTED_SITE_DOMAIN"):
            typ = "CO_HOSTED_SITE_DOMAIN_WHOIS"
        if eventName == "SIMILARDOMAIN":
            typ = "SIMILARDOMAIN_WHOIS"

        rawevt = SpiderFootEvent(typ, data, self.__name__, event)
        self.notifyListeners(rawevt)

        if whoisdata.has_key('registrar'):
            if eventName.startswith(
                    "DOMAIN_NAME") and whoisdata['registrar'] is not None:
                evt = SpiderFootEvent("DOMAIN_REGISTRAR",
                                      whoisdata['registrar'], self.__name__,
                                      event)
                self.notifyListeners(evt)
Exemplo n.º 58
0
# importing csv module
import csv
import whois

filename = "input.csv"

fields = []
rows = []

with open(filename, 'r') as csvfile:
    csvreader = csv.reader(csvfile)
    fields = csvreader.next()
    for row in csvreader:
        rows.append(row)

filename = "output.csv"
fields = ['Domain name', 'Creation', 'ID', 'Location', 'IP', 'Expiration']
with open(filename, 'w') as csvfile:
    csvwriter = csv.writer(csvfile)
    csvwriter.writerow(fields)

    for row in rows:
        for col in row:
            x = whois.whois(col)
            csvwriter.writerow([(x.domain_name[1]), (x.creation_date[0]),
                                (x.registry_domain_id), (x.state),
                                (x.registrar_url), (x.expiration_date[0])])
        print('\n')
Exemplo n.º 59
0
def ajax_whois():
    hostname = re.sub(r"^(?:http|https|ftp):\/\/", "",
                      request.form["hostname"]).rstrip('/')
    who = pythonwhois.whois(hostname)
    print(who)
    return who
def get_domain_expiration_date(url):
    domain = whois.extract_domain(url)
    return whois.whois(domain).expiration_date