def tor(): request = TorRequest() response = request.get("http://httpbin.org/ip") ip_address = json.loads(response.content)["origin"] response = requests.get(f"http://ip-api.com/json/{ip_address}") data = json.loads(response.content) city = data["city"] country = data["country"] return f"{country} - {city}"
def tor_identity(): from torrequest import TorRequest global ip print("Loading new Tor identity ...") tr = TorRequest() response = requests.get('http://ipecho.net/plain') print("My Original IP Address:", response.text) tr.reset_identity() #Reset Tor identity response = tr.get('http://ipecho.net/plain') ip = response.text print("New Ip Address", response.text)
def get_last_page_num(stock): tr = TorRequest(proxy_port=9050, ctrl_port=9051, password=None) headers = {'User-Agent': random.choice(browsers)} target_url = "https://finance.naver.com/item/sise_day.nhn?code=%s&page=1" % stock.code r = tr.get(target_url, headers=headers) # page_re = re.compile(r'page=(\d+)') # s = BeautifulSoup(r.text, 'lxml') # rr = s.find('td', {"class": "pgRR"}) # rr_href = rr.a['href'] # m = re.search(r.text, rr_href) # return int(m[1]) return 1
class Ecosia: """ Ecosia Search (through TOR nodes). Using tor is the standard mode, but it needs some extra config, in order to work on your machine. See these links for the config and possible errors: > https://www.scrapehero.com/make-anonymous-requests-using-tor-python/ > https://stackoverflow.com/questions/49470261/tor-failing-to-run-with-failed-to-bind-one-of-the-listener-ports """ def __init__(self, isTor=True): self.searchURL = "https://www.ecosia.org/search?q=" # init generators self.rhGen = RequestHeaderGenerator() self.stGen = SearchTermGenerator() self.isTor = isTor self.searches = 0 if self.isTor: # this password needs to be set in your .env file # simply create a new file and paste the password you set # while configuring tor. Make sure that the .env file is # in the same dir as the .config file self.tr = TorRequest(password=TOR_PASS) def _buildUrl(self): """ build search url with given search terms and make sure, that they are correctly encoded """ return self.searchURL + "+".join( list(map(urllib.parse.quote, self.stGen.getSearchTerm()))) def search(self): """ requests ecosia search results page has 2 modes: anonymous reqs via tor or normal ones to avoid blocking for normal reqs I generate for every req a new req header """ url = self._buildUrl() if self.isTor: self.tr.reset_identity() response = self.tr.get(url) else: print("Tor Option disabled") response = requests.get( url, headers=self.rhGen.getRandomRequestHeader()) if int(response.status_code) == 200: self.searches += 1 print( f"Performed request to url: {url}, \nGot status code: {response.status_code}" ) return response.status_code
def tor_reset(): global tor global header try: tor = TorRequest(password='******') tor.reset_identity() except: tor.close() tor_1 = TorRequest(password='******') tor = tor_1 tor.reset_identity() response = tor.get('http://ipecho.net/plain') print("Ip Address has changed: ", response.text)
def __init__(self, password= None, url= None): self.password= password if(password is not None): self.tr = TorRequest(password=self.password) else: self.tr = TorRequest() self.url = url self.options = Options() self.options.add_argument('--headless') self.options.add_argument('--disable-gpu') # Last I checked this was necessary
def __init__(self, isTor=True): self.searchURL = "https://www.ecosia.org/search?q=" # init generators self.rhGen = RequestHeaderGenerator() self.stGen = SearchTermGenerator() self.isTor = isTor self.searches = 0 if self.isTor: # this password needs to be set in your .env file # simply create a new file and paste the password you set # while configuring tor. Make sure that the .env file is # in the same dir as the .config file self.tr = TorRequest(password=TOR_PASS)
def check_tor(password): ''' Check if we are connected via tor. ''' # Requirements import sys from torrequest import TorRequest # Add HashedControlPass. tr = TorRequest(password=password) # Check that we are connected via tor. url = 'https://check.torproject.org' response = tr.get(url) txt = response.text status = txt[txt.find('<title>') + 7:txt.find('</title>')].split('\n')[2].lstrip() print(status, file=sys.stderr)
def get_html(self, url, tor=False): """ Method which send GET request to specific url and return html. :param url: :param tor: :return: """ time.sleep(3) html = None proxies = None if tor: with TorRequest(proxy_port=9050, ctrl_port=9051, password=None) as tr: tr.reset_identity() proxies = self.tor_proxies try: html = self.client.get(url, proxies=proxies, timeout=(3.05, 27), stream=True) except Exception as e: print(e) self.get_html(url) return html.content
def get_info_from_page(url, object): ua = UserAgent() header = {'User-Agent': str(ua.chrome)} # С помощью torrequests делаем запросы через тор with TorRequest() as tr: html = tr.get(url, headers=header).text ads = BeautifulSoup(html, 'html.parser') try: phone = ads.find(attrs={ 'class': 'js-phone phoneView phone-hidden' }).find('a').get('href').split('+')[1] except: phone = 'не получен' try: adress = ads.find(attrs={'class': 'iblock'}).text except: adress = 'не получен' try: title = ads.find(attrs={'class': 'iblock'}).text except: title = 'не получен' try: site = ads.find(attrs={ 'class': 'service-website' }).find('a').get('href') except: site = 'не получен' ads_dict = {'title': title, 'phone': phone, 'adress': adress, 'site': site} object.ads.append(ads_dict) print(ads_dict)
def main(): input_df = pd.read_csv(csv_input) for index, row in input_df.iterrows(): if row.Status == 0: hsn, tsn, brand = str(int( row.HSN)).zfill(4), row.TSN.zfill(3), row.Brand try: with TorRequest() as tor: brand_id, model_id, vehicle_id, vehicle_example = get_vehicle_infos( tor, hsn, tsn) main_services_list = get_main_services( tor, hsn, tsn, brand_id, model_id, vehicle_id) service_name, service_overview, km, model, prices, sqi, url = get_final_infos( tor, vehicle_example) services_detailed = get_services_details(tor, sqi) result = generate_results_dictionary( brand, hsn, tsn, model, main_services_list, service_name, service_overview, km, min(prices), max(prices), np.mean(prices), services_detailed, url) save_execution_status(result, input_df, index, status=1) except: result = generate_results_dictionary(brand, hsn, tsn, '/', '/', '/', '/', '/', '/', '/', '/', '/', '/') save_execution_status(result, input_df, index, status=2)
def getPhone(lienPage): id = lienPage.split('/')[4].split(".")[0] lienImage = "https://www2.leboncoin.fr/ajapi/get/phone?list_id="+id with TorRequest() as tr: time.sleep(5) tr.reset_identity() tr.ctrl.signal('CLEARDNSCACHE') r3 = tr.get(lienImage) response = tr.get('http://ipecho.net/plain') print("ip", response.text) data = json.loads(r3.text) if data!= '': lien = data['phoneUrl'] f = open('temp.gif', 'wb') f.write(requests.get(lien).content) f.close() call(["sips", "-s", "format", "jpeg", "temp.gif", "--out", "temp.jpeg", "-Z", "600" ]) call(["tesseract", "temp.jpeg", "temp", "-psm", "7", "nobatch", "digits"]) numero = open('temp.txt').read() print(numero) if len(numero)>0: return (re.sub(r'\D', '', numero)) else : return("000000000") else : return("000000000")
def getProjectIssues(project_id, startAt=0): args['jql'] = 'project=' + project_id args['startAt'] = startAt issues = [] sleep(10) with TorRequest() as tr: while True: urlbits[4] = urlencode(args) print('[' + str(datetime.today()) + ']: ' + urlunparse(urlbits)) try: resp = tr.get(urlunparse(urlbits), headers=headers) except: print('[' + str(datetime.today()) + ']: Something went wrong. Trying again.') tr.reset_identity() sleep(randint(1, 10)) continue j = resp.json() issues.append(j['issues']) args['startAt'] = j['maxResults'] + j['startAt'] if j['total'] <= j['maxResults'] + j['startAt']: break sleep(randint(1, 10)) tr.reset_identity() return issues
def get_html(url, page_num): """ Method which send GET request to specific url and return html. :param url: :param page_num: :return: """ time.sleep(3) data = { "action": "loadNewsPosts", "page": "{}".format(page_num), } try: html = requests.post(url, data=data).content.decode('utf-8') except Exception as e: print(e) with TorRequest(proxy_port=9050, ctrl_port=9051, password=None) as tr: tr.reset_identity() html = tr.post(url, data=data).content return html
def Y_tor(): local_time = time.localtime() time_string = time.strftime("%m/%d/%Y, %H:%M:%S ", local_time) get_URL() with TorRequest(proxy_port=9050, ctrl_port=9051, password=None) as tr: try: print(bcolors.BOLD + "\n[+] Searching for files...\n" + bcolors.ENDC) for line in file: lfi = (str(tr.get(url + line)) + " -> " + url + line) if lfi.find('200') != -1: print(lfi) all_test = open("tr_requests.txt", "a") all_test.write(str("\n" + time_string + lfi)) except ConnectionError: print(bcolors.FAIL + "Connection Error, retry" + bcolors.ENDC) Y_tor() print(bcolors.WARNING + "FINISHED - see more in tr_requests.txt" + bcolors.ENDC) file.close() quit()
def scrape_owner(df): """Function that scrapes addresses and the corresponding entity from bitinfocharts.com. Usage of tor requests and threading for annonymous scraping in parallel """ with TorRequest(proxy_port=9050, ctrl_port=9051, password=None) as tr: resp = tr.get('http://ipecho.net/plain') proxy = resp.text print(proxy) proxy_list.append(proxy) tr.reset_identity() for index, row in df.iterrows(): address = row['address'] try: url = "https://bitinfocharts.com/bitcoin/address/" + address res = tr.get(url) soup = BeautifulSoup(res.content, 'lxml') table = soup.find_all('table')[1] df = pd.read_html(str(table))[0] owner = df.iloc[0, 3] owner = owner.replace('wallet:', ' ').strip() wallet_list.append([address, owner]) print("Appended wallet " + str(len(wallet_list)) + " (" + proxy + ")") time.sleep(random.uniform(1, 2)) except: print("Error:", url, sep=" ") time.sleep(random.uniform(1, 10)) print(">>>finished<<<")
def get_new_ip(n): with TorRequest(proxy_port=9001, ctrl_port=9002, password=None) as tr: for i in range(n): print(i) response = tr.get('http://ipecho.net/plain') print(response.text) # not your IP address tr.reset_identity()
def ip(): response = requests.get('http://ipecho.net/plain') print(bcolors.FAIL + "\nOriginal IP Address: ", response.text + bcolors.ENDC) with TorRequest() as tr: tr.reset_identity() response = tr.get('http://ipecho.net/plain') print(bcolors.OKGREEN + "New Ip Address: ", response.text + bcolors.ENDC)
def run(): tr = TorRequest( password='******') response = tr.get(site, headers=headers[0], verify=True) response = tr.get(site1, headers=headers[0], verify=True) print '[' + str(i) + ']' + ' Blog View Added With IP:' + tr.get( 'http://ipecho.net/plain').content tr.reset_identity()
def get_tor_client(ask_if_needed=False): tpwd = config('TOR_PASSWORD', '') if not tpwd and os.path.isfile(TOR_CONF): with open(TOR_CONF) as infile: tpwd = infile.read().strip() elif not tpwd and ask_if_needed: tpwd = getpass.getpass(prompt='Tor password: '******'w') as outfile: outfile.write(tpwd) print('Connecting to tor...') try: tr = TorRequest(password=tpwd) tr.reset_identity() print('Session established!') except OSError: print('Tor not available, using regular requests...') tr = requests return tr
def assign_new_ip(text=False): """ Reset the identity using TorRequest Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the IP address tuple (old, morphed) Returns ------- boolean True/False """ try: # pass the hashed password req = TorRequest( password= '******') # return the ip address normal_identity = requests.get('http://ipecho.net/plain') # reset the identity using Tor req.reset_identity() # make a request now morphed_identity = req.get('http://ipecho.net/plain') # return the status depending on the flag if morphed_identity != normal_identity: if text == True: # return the ip address pairs as a tuple return (normal_identity.text, morphed_identity.text) else: return True else: # return just the status return False except: return False
def randomize_ip(password, quiet=False): ''' Randomize IP addredss with tor. Reset tor to randomize your IP address. Takes your tor hashed control password as an argument. Requires that you have set HashedControlPassword variable in the tor configuration file. ''' # Requirements import sys from torrequest import TorRequest # Add HashedControlPass. tr = TorRequest(password=password) # Reset Tor. tr.reset_identity() # Check new ip. response = tr.get('http://ipecho.net/plain') ip = response.text if not quiet: print("IP address is set to: {}".format(ip), file=sys.stderr) return (ip)
def tor_session(password): ''' tor_session ''' # Requirements import sys from torrequest import TorRequest # Add HashedControlPass. tr = TorRequest(password=password) session = tr.session url = 'https://check.torproject.org' response = tr.get(url) txt = response.text status = txt[txt.find('<title>') + 7:txt.find('</title>')].split('\n')[2].lstrip() print(status, file=sys.stderr) if status is "Sorry. You are not using Tor.": print("Continue only at your own risk.", file=sys.stderr) #EIF return (session)
def get_csrf_token(auth_url, user_agent=DEFAULT_USER_AGENT, proxies=None): client.cookies.clear() headers = {'User-Agent': user_agent} try: html = client.get(auth_url, headers=headers, proxies=proxies, timeout=(3.05, 27), stream=True) except Exception as e: print(e) html = client.get(auth_url, headers=headers, proxies=proxies, timeout=(3.05, 27), stream=True) if html.status_code == 200: soup = BeautifulSoup(html.text, 'lxml') try: csrf = soup.find(id='frm_login').find('input').get('value') except AttributeError: global MAX_RETRIES csrf = None while MAX_RETRIES != 5: MAX_RETRIES += 1 # Set text and log. text = '[ERROR] Perhaps server was banned your ip. Tor is activated. Changing ip...' progressbar_widget(colored(text, 'red'), range_=2, sleep=1) dt = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p") logger.info(dt + text) # Activate tor , change ip , call func which getting csrf token again. with TorRequest(proxy_port=9050, ctrl_port=9051, password=None) as tr: tr.reset_identity() csrf = get_csrf_token(auth_url, user_agent=get_random_user_agent(), proxies=TOR_PROXIES) break return csrf else: return False
def pageScan(link): li = [] ua = UserAgent() headers = { 'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36 ua.random' } tr = TorRequest(1234) tr.reset_identity() url = tr.get(link).text soup = BeautifulSoup(url, 'lxml') try: links = soup.find('div', id='atfResults').find( 'ul', id='s-results-list-atf').find_all('li', class_='s-result-item') for l in links: li.append(l['data-asin']) except (AttributeError, TypeError): pass return li
def get_connection(links_site, torR): headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'} for i in range(15): try: resp = torR.get(links_site, headers=headers, timeout=5) return resp except requests.exceptions.Timeout: print("Reconnect" + str(i+1)) with Controller.from_port(port = 9051) as controller: controller.authenticate(password='******') print("Success!") controller.signal(Signal.NEWNYM) print("New Tor connection processed") torR=TorRequest(password='') torR.reset_identity() #Reset Tor response= torR.get('http://ipecho.net/plain') print("New Ip Address",response.text) pass return resp
def get_tor(self, url): ''' Makes request over TOR network ''' try: from torrequest import TorRequest with TorRequest() as tr: content = tr.get(url) tr.reset_identity() return content except: logging.exception('could not get %s' % url) return None
def make_request(url, headers, error_type, social_network, verbose=False, tor=False, unique_tor=False): r = TorRequest() if (tor or unique_tor) else requests try: rsp = r.get(url, headers=headers) if unique_tor: r.reset_identity() if rsp.status_code: return rsp, error_type except requests.exceptions.HTTPError as errh: print_error(errh, "HTTP Error:", social_network, verbose) except requests.exceptions.ConnectionError as errc: print_error(errc, "Error Connecting:", social_network, verbose) except requests.exceptions.Timeout as errt: print_error(errt, "Timeout Error:", social_network, verbose) except requests.exceptions.RequestException as err: print_error(err, "Unknown error:", social_network, verbose) return None, ""
def get_tor(self, url): """ Makes request over TOR network """ try: from torrequest import TorRequest with TorRequest() as tor_req: content = tor_req.get(url) self.urls.append(url) tor_req.reset_identity() return content except BaseException: logging.exception("could not get over tor %s", url) return self.get(url)
def get_financial(tor: TorRequest, company: str) -> pd.DataFrame: """ Get accountability information on company """ financial_df = pd.DataFrame() # writer = pd.ExcelWriter('XLS/{}.xlsx'.format(company)) for elements_financier in accountant: r = tor.get(url + "{}/{}?p={}".format(company, elements_financier, company)) if r.status_code != 200: print(r.status_code, ":", r.reason) time.sleep(10) financial_df = get_financial(tor, company) return financial_df soup = BeautifulSoup(r.text, "lxml") tables = soup.find_all('table') df = pd.DataFrame() raw = [] for table in tables: tr = table.find_all('tr') for row in tr: td = row.find_all('td') # Catch if this is a title if len(td) == 1: data = str(td[0].find(text=True)) raw.append(data) df = df.append([raw]) raw = [] continue # Add a line with a temporary raw for element in td: data = str(element.find(text=True)) raw.append(data) df = df.append([raw]) del raw[:] df.set_index([0], inplace=True) # df.to_excel(writer, elements_financier) financial_df = pd.concat([financial_df, df]) # writer.save() return financial_df
def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): """Run Sherlock Analysis. Checks for existence of username on various social media sites. Keyword Arguments: username -- String indicating username that report should be created against. site_data -- Dictionary containing all of the site data. verbose -- Boolean indicating whether to give verbose output. tor -- Boolean indicating whether to use a tor circuit for the requests. unique_tor -- Boolean indicating whether to use a new tor circuit for each request. Return Value: Dictionary containing results from report. Key of dictionary is the name of the social network site, and the value is another dictionary with the following keys: url_main: URL of main site. url_user: URL of user on site (if account exists). exists: String indicating results of test for account existence. http_status: HTTP status code of query which checked for existence on site. response_text: Text that came back from request. May be None if there was an HTTP error when checking for existence. """ global amount fname = username.lower() + ".txt" if os.path.isfile(fname): os.remove(fname) print((Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN + "] Removing previous file:" + Fore.WHITE + " {}").format(fname)) print((Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN + "] Checking username" + Fore.WHITE + " {}" + Fore.GREEN + " on:").format(username)) # A user agent is needed because some sites don't # return the correct information since they think that # we are bots headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0' } # Allow 1 thread for each external service, so `len(site_data)` threads total executor = ThreadPoolExecutor(max_workers=len(site_data)) # Create session based on request methodology underlying_session = requests.session() underlying_request = requests.Request() if tor or unique_tor: underlying_request = TorRequest() underlying_session = underlying_request.session() # Create multi-threaded session for all requests. Use our custom FuturesSession that exposes response time session = ElapsedFuturesSession(executor=executor, session=underlying_session) # Results from analysis of all sites results_total = {} # First create futures for all requests. This allows for the requests to run in parallel for social_network, net_info in site_data.items(): # Results from analysis of this specific site results_site = {} # Record URL of main site results_site['url_main'] = net_info.get("urlMain") # Don't make request if username is invalid for the site regex_check = net_info.get("regexCheck") if regex_check and re.search(regex_check, username) is None: # No need to do the check at the site: this user name is not allowed. print((Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.GREEN + " {}:" + Fore.YELLOW + " Illegal Username Format For This Site!").format(social_network)) results_site["exists"] = "illegal" else: # URL of user on site (if it exists) url = net_info["url"].format(username) results_site["url_user"] = url request_method = session.get if social_network != "GitHub": # If only the status_code is needed don't download the body if net_info["errorType"] == 'status_code': request_method = session.head # This future starts running the request in a new thread, doesn't block the main thread future = request_method(url=url, headers=headers) # Store future in data for access later net_info["request_future"] = future # Reset identify for tor (if needed) if unique_tor: underlying_request.reset_identity() # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site # Open the file containing account links f = open_file(fname) # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses for social_network, net_info in site_data.items(): # Retrieve results again results_site = results_total.get(social_network) # Retrieve other site information again url = results_site.get("url_user") exists = results_site.get("exists") if exists is not None: # We have already determined the user doesn't exist here continue # Get the expected error type error_type = net_info["errorType"] # Default data in case there are any failures in doing a request. http_status = "?" response_text = "" # Retrieve future and ensure it has finished future = net_info["request_future"] r, error_type, response_time = get_response(request_future=future, error_type=error_type, social_network=social_network, verbose=verbose) # Attempt to get request information try: http_status = r.status_code except: pass try: response_text = r.text.encode(r.encoding) except: pass if error_type == "message": error = net_info.get("errorMsg") # Checks if the error message is in the HTML if not error in r.text: print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "status_code": # Checks if the status code of the response is 2XX if not r.status_code >= 300 or r.status_code < 200: print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "response_url": error = net_info.get("errorUrl") # Checks if the redirect url is the same as the one defined in data.json if not error in r.url: print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "": print((Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.GREEN + " {}:" + Fore.YELLOW + " Error!").format(social_network)) exists = "error" # Save exists flag results_site['exists'] = exists # Save results from request results_site['http_status'] = http_status results_site['response_text'] = response_text results_site['response_time_ms'] = response_time # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site print((Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN + "] Saved: " + Fore.WHITE + "{}").format(fname)) final_score(amount, f) return results_total