コード例 #1
0
ファイル: hunteriomanager.py プロジェクト: zimshk/abaddon
class HunterioManager:
    def __init__(self):
        self.hunter = PyHunter('72ebe102b85923275c17d6b374551edb56dbb898')

    def get_mails(self, domain, company):
        """
		Wrapper around domain_search
		"""
        results_file = domain + ".txt"
        try:
            if company != "":
                print(company)
                x = self.hunter.domain_search(company=company)
            else:
                print(domain)
                x = self.hunter.domain_search(domain)
            #json.dumps(json_data["data"])

            with open(results_file, 'w') as file:
                #x="{'domain': 'instagram.com', 'disposable': False, 'webmail': False, 'pattern': '{first}{last}', 'organization': 'Instagram', 'emails': [{'value': '*****@*****.**'}]}"
                #file.write(x) ==> not working
                for i in x['emails']:
                    print(i['value'])
        except Exception as e:
            raise e
        print("Go to hunter.io/<domain> and explore the results manually")
コード例 #2
0
 def domain_search_hunter(self):
     if len(self.hunter_io_mails) > 0:
         self.email_list.update(self.hunter_io_mails)
         print(
             "Email list is expanded using hunter.io results from the file."
         )
         return
     hunter = PyHunter(self.hunter_api_key)
     i = 0
     search = hunter.domain_search(self.domain_name,
                                   limit=100,
                                   offset=100 * i)
     while search['emails']:
         search = hunter.domain_search(self.domain_name,
                                       limit=100,
                                       offset=100 * i)
         for item in search['emails']:
             user_mail = item['value']
             self.email_list.add(user_mail)
             self.hunter_io_mails.append(user_mail)
         i += 1
     print("{} emails have been found via hunter.io".format(
         len(self.hunter_io_mails)))
     with open("email_hunter.em", "w", encoding="UTF-8") as ff:
         ff.write("\n".join(self.hunter_io_mails))
コード例 #3
0
ファイル: Hunter.py プロジェクト: miguelrang/Project
    def search(self):
        hunter = PyHunter(self.apikey)
        result = hunter.domain_search(company=self.domain,
                                      limit=self.limit,
                                      emails_type='personal')

        return result
コード例 #4
0
def hunter_domain_search(domain):
    api = "44ac9d0fcf060465933c1591d75c2ace4b1692d8"
    hunter = PyHunter(api)
    info = (hunter.account_information())
    print("Kalan API Hakkı : ",info["calls"]["left"])
    print("hunter domain search..")
    while(True):
        if (info["calls"]["left"] != 0):
            result = hunter.domain_search(domain)
            result = result["emails"]
            for i in range(len(result)):
                email.append(result[i]["value"])
            break

        else:
            print("\nMevcut Api Arama Hakkı Bitti")
            print("""
            1 - Yeni API Ekle
            0 - Çıkış Yap
            """)

            choise = input("Yapmak İstediğiniz İşlemi Seçiniz = ")

            if (choise == "1"):
                api = input("\nYeni Api Değerini Giriniz = ")
            elif (choise == "0"):
                break
            else:
                print("Hatalı Bir Seçim Yaptınız, Tekrar Deneyiniz..")
コード例 #5
0
 def domain_search_hunter(self):
     hunter = PyHunter(self.hunter_api_key)
     i = 0
     search = hunter.domain_search(self.domain_name,
                                   limit=100,
                                   offset=100 * i)
     while search['emails']:
         search = hunter.domain_search(self.domain_name,
                                       limit=100,
                                       offset=100 * i)
         for item in search['emails']:
             user_mail = item['value']
             self.email_list.add(user_mail)
             self.hunter_io_mails.append(user_mail)
         i += 1
     print("{} emails have been found via hunter.io".format(
         len(self.hunter_io_mails)))
コード例 #6
0
def domain_search(company, qualified):
    if company is not None:
        hunter = PyHunter('a890302fd13f718af83604989dbd3213772a0d07')
        json_load = json.loads(hunter.domain_search(company=company, limit=1000, raw = True).text)
        positions = []
        emails = []
        last_names = []
        first_names = []
        scores = []
        companies = []
        for i in range(len(json_load['data']['emails'])):
            if emailVerifier(json_load['data']['emails'][i]['value']) == True:
                if qualified is False:
                    positions.append(json_load['data']['emails'][i]['position'])
                    emails.append(json_load['data']['emails'][i]['value'])
                    scores.append(json_load['data']['emails'][i]['confidence'])
                    last_names.append(json_load['data']['emails'][i]['last_name'])
                    first_names.append(json_load['data']['emails'][i]['first_name'])
                    companies.append(company)
                if qualified is True:
                    if json_load['data']['emails'][i]['last_name'] is not None:
                        positions.append(json_load['data']['emails'][i]['position'])
                        emails.append(json_load['data']['emails'][i]['value'])
                        scores.append(json_load['data']['emails'][i]['confidence'])
                        last_names.append(json_load['data']['emails'][i]['last_name'])
                        first_names.append(json_load['data']['emails'][i]['first_name'])
                        companies.append(company)
                if qualified is 'ultra':
                    if json_load['data']['emails'][i]['position'] is not None:
                        positions.append(json_load['data']['emails'][i]['position'])
                        emails.append(json_load['data']['emails'][i]['value'])
                        scores.append(json_load['data']['emails'][i]['confidence'])
                        last_names.append(json_load['data']['emails'][i]['last_name'])
                        first_names.append(json_load['data']['emails'][i]['first_name'])
                        companies.append(company)
        print('company name terminée: ', company)
        companies = pd.DataFrame({    'last_name': last_names,
                                      'email': emails,
                                      'company': companies,
                                      'position': positions,
                                      'first_name': first_names,
                                      'score': scores
                              })
        if emails != []:
            if qualified is False:
                companies.to_csv('/home/francois/Téléchargements/allCompanies/' + company + '.csv')
            if qualified is True:
                companies.to_csv('/home/francois/Téléchargements/allCompanies/' + company + '.csv')
            if qualified is 'ultra':
                companies.to_csv('/home/francois/Téléchargements/allCompanies/' + company + '.csv')
コード例 #7
0
 def Text(self):
     textboxValue = self.textbox.text()
     hunter=PyHunter('Enter key here')
     Temp=hunter.domain_search(textboxValue)
     Temp1=Temp['emails']
     for i in range (0,len(Temp1)):
         temp2=Temp1[i]
         Dom_em=temp2['value']
     file = open(textboxValue + ".txt",'w')
     for i in range (0,len(Temp1)):
         temp2=Temp1[i]
         file.write(temp2['value'])
         file.write('\n')
     QMessageBox.question(self, 'Emails', "The Emails are saved in a text file " , QMessageBox.Ok, QMessageBox.Ok)
     file.close()
コード例 #8
0
 def spreadsheet(self):
     textboxValue = self.textbox.text()
     hunter=PyHunter('Enter key here')
     Temp=hunter.domain_search(textboxValue)
     Temp1=Temp['emails']
     for i in range (0,len(Temp1)):
         temp2=Temp1[i]
         Dom_em=temp2['value']
     csv = open(textboxValue + ".csv",'w')
     Column_Title = "S.No.,Email\n"
     csv.write(Column_Title)
     for i in range (0,len(Temp1)):
         Sno = str(i+1)
         temp2=Temp1[i]
         email = temp2['value']
         row = Sno + "," + email + "\n"
         csv.write(row)
     QMessageBox.question(self, 'Emails', "The Emails are saved in a spreadsheet" , QMessageBox.Ok, QMessageBox.Ok)
     csv.close()
コード例 #9
0
 def run(self, conf, args, plugins):
     if 'subcommand' in args:
         hunter = PyHunter(conf['Hunter']['key'])
         if args.subcommand == 'email':
             if ' ' not in args.NAME:
                 print('Name should contains first and last name')
                 print('(Yes this API is useless)')
                 sys.exit(1)
             res = hunter.email_finder(
                 domain=args.DOMAIN,
                 full_name=args.NAME,
                 raw=True
             )
             print(json.dumps(res, sort_keys=True, indent=4))
         elif args.subcommand == 'domain':
             res = hunter.domain_search(args.DOMAIN)
             print(json.dumps(res, sort_keys=True, indent=4))
         else:
             self.parser.print_help()
     else:
         self.parser.print_help()
コード例 #10
0
ファイル: hunterio.py プロジェクト: superzerosec/lure
def get_hunterio_emails(company, API_KEY):
    hunter = PyHunter(API_KEY)

    account_info = hunter.account_information()
    calls_remaining = account_info['calls']['left']
    calls_allowed = account_info['calls']['available']
    print_success("[+] Checking hunter.io ({0}/{1} queries remaining)".format(
        calls_remaining, calls_allowed))
    results = hunter.domain_search(company, limit=1000, emails_type="personal")
    company_records = results['emails']

    hunterio_emails = []
    counter = 0
    for record in company_records:
        email = str(record['value'])
        fname = str(record['first_name'])
        lname = str(record['last_name'])
        position = str(record['position'])
        hunterio_emails.append(fname + "," + lname + "," + email + "," +
                               position)
        counter = counter + 1
    return hunterio_emails
コード例 #11
0
                        dic_dnsreconrecords['tipo'] = y.attrib['type']
                        dic_dnsreconrecords['name'] = y.attrib['name']
                        saida = '\nTipo :' + dic_dnsreconrecords[
                            'tipo'] + ' Registro:' + dic_dnsreconrecords['name']
                        log_osint.write(saida)
except:
    pass

#Execucao do SCAN EMAILS
saida = '\n\n########### E M A I L S ###########\n'
log_osint.write(saida)

print("\033[32m" + "[+] SCAN EMAILS Execution" + "\033[0;0m" + "\n")
##Conexao com o Hunter e buscas de informacoes
hunter = PyHunter('40719a5bd9a907b3c9b204b652a9a1a770b95348')
hunter_search = hunter.domain_search(dominio)
#Parse do HUNTER
for x in hunter_search.keys():
    if (x == 'domain'):
        dic_hunter['domain'] = hunter_search.get('domain')
    if (x == 'emails'):
        emails = hunter_search.get('emails')
        for y in emails:
            e_mail = y.get('value')
            dic_hunter['email'] = e_mail
            dic_email[e_mail] = e_mail
            dic_hunter['tipo'] = y.get('type')
            if (dic_hunter['tipo'] == None):
                dic_hunter['tipo'] = ''
            dic_hunter['confianca'] = y.get('confidence')
            if (dic_hunter['confianca'] == None):
コード例 #12
0
def Search(Query_List, Task_ID, Type, Limit=10):

    try:
        Data_to_Cache = []
        Directory = General.Make_Directory(Plugin_Name.lower())
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        Log_File = General.Logging(Directory, Plugin_Name.lower())
        handler = logging.FileHandler(os.path.join(Directory, Log_File), "w")
        handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter("%(levelname)s - %(message)s")
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        Shodan_API_Key = Load_Configuration()
        API_Session = PyHunter(Shodan_API_Key)
        Cached_Data_Object = General.Cache(Directory, Plugin_Name)
        Cached_Data = Cached_Data_Object.Get_Cache()
        Query_List = General.Convert_to_List(Query_List)
        Limit = General.Get_Limit(Limit)

        for Query in Query_List:

            try:

                if Type == "Domain":

                    if Common.Regex_Handler(Query, Type="Domain"):
                        Local_Plugin_Name = Plugin_Name + "-Domain"
                        API_Response = API_Session.domain_search(Query)
                        JSON_Object = Common.JSON_Handler(API_Response)
                        JSON_Output_Response = JSON_Object.Dump_JSON()

                        if API_Response["domain"] and API_Response['emails']:
                            Main_File = General.Main_File_Create(
                                Directory, Local_Plugin_Name,
                                JSON_Output_Response, Query,
                                The_File_Extensions["Main"])
                            Output_Connections = General.Connections(
                                Query, Local_Plugin_Name, Domain, "Account",
                                Task_ID, Plugin_Name.lower())
                            Current_Step = 0

                            for Hunter_Item in API_Response["emails"]:
                                Current_Email_Address = Hunter_Item["value"]
                                Current_Hunter_Item_Host = f"https://{Domain}/verify/{Current_Email_Address}"
                                Current_Hunter_Item_Responses = Common.Request_Handler(
                                    Current_Hunter_Item_Host,
                                    Filter=True,
                                    Host=f"https://{Domain}")
                                Filtered_Response = Current_Hunter_Item_Responses[
                                    "Filtered"]
                                Title = "Hunter | " + Current_Email_Address

                                if Current_Email_Address not in Cached_Data and Current_Email_Address not in Data_to_Cache and Current_Step < int(
                                        Limit):
                                    Output_file = General.Create_Query_Results_Output_File(
                                        Directory, Query, Local_Plugin_Name,
                                        Filtered_Response,
                                        Current_Hunter_Item_Host,
                                        The_File_Extensions["Query"])

                                    if Output_file:
                                        Output_Connections.Output(
                                            [Main_File, Output_file],
                                            Current_Hunter_Item_Host, Title,
                                            Plugin_Name.lower())
                                        Data_to_Cache.append(
                                            Current_Email_Address)

                                    else:
                                        logging.warning(
                                            f"{Common.Date()} - {__name__.strip('plugins.')} - Failed to create output file. File may already exist."
                                        )

                                    Current_Step += 1

                elif Type == "Email":

                    if Common.Regex_Handler(Query, Type="Email"):
                        Local_Plugin_Name = Plugin_Name + "-Email"
                        API_Response = API_Session.email_verifier(Query)
                        JSON_Object = Common.JSON_Handler(API_Response)
                        JSON_Output_Response = JSON_Object.Dump_JSON()

                        if API_Response["email"] and API_Response['sources']:
                            Main_File = General.Main_File_Create(
                                Directory, Local_Plugin_Name,
                                JSON_Output_Response, Query,
                                The_File_Extensions["Main"])
                            Output_Connections = General.Connections(
                                Query, Local_Plugin_Name, Domain,
                                "Account Source", Task_ID, Plugin_Name.lower())
                            Current_Step = 0

                            for Hunter_Item in API_Response["sources"]:
                                Current_Hunter_Item_Host = Hunter_Item["uri"]
                                Current_Hunter_Item_Domain = Hunter_Item[
                                    "domain"]

                                if 'http://' in Current_Hunter_Item_Host:
                                    Current_Hunter_Item_Responses = Common.Request_Handler(
                                        Current_Hunter_Item_Host,
                                        Filter=True,
                                        Host=
                                        f"http://{Current_Hunter_Item_Domain}")
                                    Filtered_Response = Current_Hunter_Item_Responses[
                                        "Filtered"]

                                elif 'https://' in Current_Hunter_Item_Host:
                                    Current_Hunter_Item_Responses = Common.Request_Handler(
                                        Current_Hunter_Item_Host,
                                        Filter=True,
                                        Host=
                                        f"https://{Current_Hunter_Item_Domain}"
                                    )
                                    Filtered_Response = Current_Hunter_Item_Responses[
                                        "Filtered"]

                                else:
                                    Filtered_Response = Common.Request_Handler(
                                        Current_Hunter_Item_Host)

                                Title = "Hunter | " + Current_Hunter_Item_Host

                                if Current_Hunter_Item_Host not in Cached_Data and Current_Hunter_Item_Host not in Data_to_Cache and Current_Step < int(
                                        Limit):
                                    Output_file = General.Create_Query_Results_Output_File(
                                        Directory, Query, Local_Plugin_Name,
                                        Filtered_Response,
                                        Current_Hunter_Item_Host,
                                        The_File_Extensions["Query"])

                                    if Output_file:
                                        Output_Connections.Output(
                                            [Main_File, Output_file],
                                            Current_Hunter_Item_Host, Title,
                                            Plugin_Name.lower())
                                        Data_to_Cache.append(
                                            Current_Hunter_Item_Host)

                                    else:
                                        logging.warning(
                                            f"{Common.Date()} - {__name__.strip('plugins.')} - Failed to create output file. File may already exist."
                                        )

                                    Current_Step += 1

            except Exception as e:
                logging.warning(
                    f"{Common.Date()} - {__name__.strip('plugins.')} - Failed to complete task - {str(e)}"
                )

        Cached_Data_Object.Write_Cache(Data_to_Cache)

    except Exception as e:
        logging.warning(
            f"{Common.Date()} - {__name__.strip('plugins.')} - {str(e)}")
コード例 #13
0
def scrape(domain,api_key):
    hunter = PyHunter(api_key)
    data = hunter.domain_search(domain)
    domain = data['domain']
    organization = data['organization']
    df = json_normalize(data['emails'])
コード例 #14
0
ファイル: Pyhunter.py プロジェクト: KrSuma/InstaHashtag
from pyhunter import PyHunter
import json
import pprint

hunter = PyHunter('a6498ae6a4371fd85d51cbfd9d8f8892b1b6f402')

domain_search = hunter.domain_search('cokodive.com')

domain_search_o = hunter.domain_search(company='Instagram',
                                       limit=5,
                                       offset=2,
                                       emails_type='personal')

pprint.pprint(domain_search)
コード例 #15
0
# -*- coding: utf-8 -*-
"""
Created on Thu Aug  1 11:25:24 2019

@author: ikhwan
"""
import pandas as pd
from pyhunter import PyHunter

hunter = PyHunter('c4d2eab17b086e9bbd65531c7b81a0b5e6e4beb7')

web_list = pd.read_csv('pet_care.csv')

for index, url in web_list.iterrows():
    df_profile = pd.DataFrame()
    mailList = hunter.domain_search(url['web_url'])
    #print(email)
    
    for key, value in mailList.items():
        if key == 'emails':
            email = value
            
    for mail in email:
        del mail['sources']
        if df_profile.empty:
            df_profile = pd.DataFrame.from_dict(mail, orient='index')
        else :
            temp = pd.DataFrame.from_dict(mail, orient='index')
            df_profile = pd.concat([df_profile, temp], axis=1, join='inner')
            
    df_save = df_profile.transpose()
コード例 #16
0
webpage = webpage.decode("utf8")
# On récupere tout les urls qui se trouvent dans la reponse
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', webpage)
# On enleve tous les urls du moteur de recherche : ici google
urls = [x for x in urls if "google" not in x]
# On supprime les doublons
urls = list(dict.fromkeys(urls))

# Ici j'ai eu le choix entre 2 strategies.
# 1er solution : Utiliser Hunter.io | Problème : C'est trop chère pour un étudiant
# 2eme solution : Trouvez les emails moi-même à l'aide de des nom de domaine | Problème : C'est galére et les adresses emails scrapper ne sont pas forcément les bonnes

# je vous presente ici la 1er solution mais je ne ferais que une requete car c'est juste un petit projet (Je suis limité à 50 requete par mois en gratuit, sinon il faut payer).

domaineInformation = hunter.domain_search(
    urls[0]
)  # Je prend seulement le premier resulat, si on voudrait obtenir tous les informations il suffirait de faire une boucle for sur ces informations et de les enregistrer

print("Voici les informations obtenue :")
# ICI ON PEUT FAIRE LA BOUCLE
print("Pour le nom de domaine : " + domaineInformation["domain"])
print("Voici la liste des adresses emails :")
for info in domaineInformation["emails"]:
    print(
        "Adresse email : " + info["value"]
    )  # + " " + info["first_name"] + " " + info["last_name"] + " Role : " + info["position"] + " Tél : " + info["phone_number"] + "Linkedin : " + info["linkedin"])

information = hunter.account_information()
print("Il reste " + str(information["calls"]["left"]) +
      " requetes à l'API hunter.io sur le compte. " +
      "Le compte hunter.io appartient à " + information["first_name"] + " " +
コード例 #17
0
import requests
from datetime import datetime
from pyhunter import PyHunter

# Checking the params -d and -l
arguments_len = len(sys.argv) - 1

if arguments_len == 4:
    if sys.argv[1] == '-d' and sys.argv[3] == '-l':
        # Do not forget to put your API-key in PyHunter()!
        hunter_api = PyHunter('')
        domain = sys.argv[2]
        limit = sys.argv[4]
        # Trying to connect to Hunter.io and get the list of emails for the given domain
        try:
            hunter_resp = hunter_api.domain_search(domain, limit=limit)
            emails_len = len(hunter_resp['emails'])
            filename_output = 'email_results_' + str(
                datetime.now().strftime("%H-%M-%S_%d-%m-%y")) + '.txt'
            file_output = open(filename_output, "w+")
            for x in range(emails_len):
                file_output.write(
                    str(hunter_resp['emails'][x]['value']) + "\r\n")
            file_output.close()
        # Catching errors into the error log .txt file
        except requests.exceptions.HTTPError as e:
            filename_output = 'error_log_' + str(
                datetime.now().strftime("%H-%M-%S_%d-%m-%y")) + '.txt'
            file_output = open(filename_output, "w")
            file_output.write(str(e) + "\r\n")
            file_output.close()
コード例 #18
0
from pyhunter import PyHunter
import pprint

# https://github.com/VonStruddle/PyHunter
# pip install pyhunter

hunter = PyHunter('SUA-KEY')
hunter.domain_search('businesscorp.com.br')

pp = pprint.PrettyPrinter(indent=4)
pp.pprint(hunter.domain_search('businesscorp.com.br'))
コード例 #19
0
	def find_email(self,tld):
		hunter = PyHunter(self.api_key)
		response = hunter.domain_search(company=tld)
		return response
コード例 #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-file",
        help=
        "The input CSV file. Must have the 'Firm' column (and can also have the optional 'Location' column).",
        type=str,
        default=0)
    parser.add_argument("--output-file",
                        help="The output CSV file.",
                        type=str,
                        default=0)
    parser.add_argument("--start-row",
                        help="The row in input file to start on",
                        type=int,
                        default=0)
    parser.add_argument("--end-row",
                        help="The row in input file to end on",
                        type=int,
                        default=0)
    args = parser.parse_args()

    if not os.path.isfile(args.input_file):
        print("Input file doesn't exist. Exiting.")
        sys.exit(1)

    if os.path.isfile(
            args.output_file) and not (args.end_row or args.start_row):
        print("Output file ({}) exists already. Exiting.".format(
            args.output_file))
        sys.exit(1)

    # Make sure we can load the file
    df = pd.read_csv(args.input_file)

    if args.start_row != 0:
        print("Starting on row {}".format(args.start_row))
        df = df[args.start_row:]

    if args.end_row != 0:
        print("Will stop on row {}".format(args.end_row))
        df = df[:args.end_row]

    if not HUNTER_API_KEY:
        print("Hunter API Key missing. Exiting.")
        sys.exit(1)

    hunter = PyHunter(HUNTER_API_KEY)
    tally = len(df)

    # Just in case there are duplicate domains we don't want to API call twice
    df = df.drop_duplicates(subset=['Domain'])

    for index, row in df.iterrows():
        domain = row['Domain']
        # validators.domain does exactly that - nifty little tool
        # also we only want to lookup unique domains
        if not validators.domain(
                domain) and domain != 'wikipedia.org' and domain != '4icu.org':
            print("{} is an invalid domain. Skipping.".format(domain))
            break

        print("Processing {} ({}/{})".format(domain, index - args.start_row,
                                             tally))

        # Had to remove limit=100 as it broke the client
        try:
            results = hunter.domain_search(domain, emails_type='personal')
        except requests.exceptions.HTTPError as e:
            print("Received error: {}".format(e))
            break

        normalized = json_normalize(results['emails'])
        normalized['org'] = row['Firm']
        normalized.to_csv(args.output_file,
                          mode='a',
                          header=1,
                          encoding='utf-8')
        print(normalized.columns)
コード例 #21
0
ファイル: hunt3.py プロジェクト: go2dmny/hunt3
parser.add_argument(
    "-fnm",
    help="Find multiple names based on domain and persons name.",
    action="store_true")
parser.add_argument(
    "-fe",
    help="Find all email addresses associated with all domains in a text file",
    action="store_true")
parser.add_argument("-v", help="Verify an email address", action="store_true")
parser.add_argument("-c",
                    help="Check how many calls left",
                    action="store_true")
args = parser.parse_args()

hunter = PyHunter('CHANGE TO YOUR API KEY FROM HUNTER.IO')
results = hunter.domain_search('YOURDOMAINNAME.com')

if (args.fe):
    infile = input("Location of URLs file..")
    outfile = input("Location of output text file..")
    fopen = open(outfile, 'w')
    with open(infile) as f:
        for i in f:
            x = i.rstrip('\n')
            url = x
            print(url)
            results = hunter.domain_search(url)
            printed = json.dumps(results)
            fopen.write(printed)
    fopen.close()
elif (args.fns):
コード例 #22
0
from pyhunter import PyHunter
from settings import HUNTERIO_API_KEY

hunter = PyHunter(HUNTERIO_API_KEY)

if __name__ == '__main__':
    result = hunter.domain_search(domain='mandmpestcontrol.com', limit=10)
コード例 #23
0
from pyhunter import PyHunter
#from reverseip import ReverseIP as rip
import os
import csv
import re
hunter = PyHunter('Enter key here')

Web_Dom = input('Enter The Name of the website: ')

Temp = hunter.domain_search(Web_Dom)
Temp1 = Temp['emails']

for i in range(0, len(Temp1)):
    temp2 = Temp1[i]
    print(temp2['value'])

Choice = int(
    input(
        "\n1.Do you want to save in a text file \n2.Do you want to save it in a spreadsheet\n"
    ))
while Choice > 2 or Choice <= 0:
    print('Please Enter either 1 or 2 ')
    Choice = int(
        input(
            "\n1.Do you want to save in a text file  \n2. Do you want to save it in a spreadsheet\n"
        ))
if (Choice == 1):
    file = open(str(Web_Dom + ".txt"), 'w')
    for i in range(0, len(Temp1)):
        temp2 = Temp1[i]
        file.write(temp2['value'])