def get_credits_data() -> dict: """ Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["tool"]["poetry"] lock_data = toml.load(project_dir / "poetry.lock") project_name = metadata["name"] poetry_dependencies = chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) direct_dependencies = {dep.lower() for dep in poetry_dependencies} direct_dependencies.remove("python") indirect_dependencies = {pkg["name"].lower() for pkg in lock_data["package"]} indirect_dependencies -= direct_dependencies dependencies = direct_dependencies | indirect_dependencies packages = {} for pkg in search_packages_info(dependencies): pkg = {_: pkg[_] for _ in ("name", "home-page")} packages[pkg["name"].lower()] = pkg for dependency in dependencies: if dependency not in packages: pkg_data = httpx.get(f"https://pypi.python.org/pypi/{dependency}/json").json()["info"] home_page = pkg_data["home_page"] or pkg_data["project_url"] or pkg_data["package_url"] pkg_name = pkg_data["name"] package = {"name": pkg_name, "home-page": home_page} packages.update({pkg_name.lower(): package}) return { "project_name": project_name, "direct_dependencies": sorted(direct_dependencies), "indirect_dependencies": sorted(indirect_dependencies), "package_info": packages, }
def main(argv): service, flags = sample_tools.init( argv, 'searchconsole', 'v1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always # check which days in a date range have data before running your main query. # This query shows data for the entire range, grouped and sorted by day, # descending; any days without data will be missing from the results. # Get top 10 queries for the date range, sorted by click count, descending. html_doc = httpx.get(flags.sitemap) soup = BeautifulSoup(html_doc, 'html.parser') url_list = [url.contents[0] for url in soup.find_all('loc')] print(f'# of domains to query: {url_list.__len__()}') request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'rowLimit': 10 } for property_uri in url_list[:4]: print(f'Reading property data from: {property_uri}') try: response = execute_request(service, property_uri, request) csv_writer_file, data_file = prepare_csv(response) append_new_data(csv_writer_file, response['rows'], property_uri) except HttpError: print(f"Couldn't access query data from {property_uri}") if data_file: data_file.close()
def fetch( self, station: str = None, lat: float = None, lon: float = None, timeout: int = 10, ) -> str: """ Fetches a report string from the service """ if station: valid_station(station) elif lat is None or lon is None: raise ValueError("No valid fetch parameters") try: url, params = self._make_url(station, lat, lon) if self.method.lower() == "post": resp = httpx.post(url, params=params, data=self._post_data(station), timeout=timeout) else: resp = httpx.get(url, params=params, timeout=timeout) if resp.status_code != 200: raise SourceError( f"{self.__class__.__name__} server returned {resp.status_code}" ) except (ConnectTimeout, ReadTimeout): raise TimeoutError( f"Timeout from {self.__class__.__name__} server") except gaierror: raise ConnectionError( f"Unable to connect to {self.__class__.__name__} server") report = self._extract(resp.text, station) # This split join replaces all *whitespace elements with a single space if isinstance(report, list): return dedupe(" ".join(r.split()) for r in report) return " ".join(report.split())
def list_orders(self, query: OrderQuery = OrderQuery()) -> OrderWithTotal: """ List the registered orders. Args: - query: the query parameters for list orders. Raises: OrderNotFound: when there is no orders registed yet. UnknownNetworkError: when any unknown network error happens. Returns: - the available orders for the query parameters provided. """ self._check_authentication() response = get( f"{self.endpoint}/orders/", params={ "moderator": query.moderator, "owner": query.owner, "skip": query.skip, "limit": query.limit, "desc": query.desc, }, headers={"Authorization": f"Bearer {self.get_access_token()}"}, ) if response.status_code == 404: raise OrderNotFound("No orders registered yet!") if response.status_code != 200: raise UnknownNetworkError( f"Failed to list the orders, network error: " f"(status: {response.status_code} - data: {response.content})." ) return OrderWithTotal(**response.json())
def sync_detailed( *, client: AuthenticatedClient, author_username: Union[Unset, str] = UNSET, expand: Union[Unset, SoftwareListExpand] = UNSET, limit: Union[Unset, int] = UNSET, machine_name: Union[Unset, str] = UNSET, name: Union[Unset, str] = UNSET, offset: Union[Unset, int] = UNSET, ) -> Response[PaginatedSoftwareList]: kwargs = _get_kwargs( client=client, author_username=author_username, expand=expand, limit=limit, machine_name=machine_name, name=name, offset=offset, ) response = httpx.get(**kwargs, ) return _build_response(response=response)
def simcore_docker_stack_and_registry_ready( event_loop: asyncio.AbstractEventLoop, docker_registry: UrlStr, docker_stack: Dict, simcore_services_ready: None, ) -> Dict: # At this point `simcore_services_ready` waited until all services # are running. Let's make one more check on the web-api for attempt in Retrying( wait=wait_fixed(1), stop=stop_after_delay(0.5 * _MINUTE), reraise=True, before_sleep=before_sleep_log(log, logging.INFO), ): with attempt: resp = httpx.get("http://127.0.0.1:9081/v0/") resp.raise_for_status() log.info( "Connection to osparc-simcore web API succeeded [%s]", json.dumps(attempt.retry_state.retry_object.statistics), ) return docker_stack
async def api_create_mentee_account(mentee): httpx.post(api_url + '/accounts', json={ 'password': mentee['password'], 'account': { 'role': mentee['role'], 'login_name': mentee['login_name'], 'email': mentee['email'] } }) access_token = await api_access_token(mentee['login_name'], mentee['password']) auth_header = api_auth_header(access_token) r = httpx.get(api_url + '/myuser', headers=auth_header) my_user = r.json() r = httpx.put(api_url + '/users/' + my_user['user']['id'], headers=auth_header, json={ 'display_name': mentee['display_name'], 'role': mentee['role'], 'account_id': my_user['account']['id'], 'id': my_user['user']['id'] })
async def test_request_headers(snapshot_context): """ When request headers are configured for this integration We add the request headers as tags on the span """ url = get_url("/response-headers?Some-Response-Header=Response-Value") headers = { "Some-Request-Header": "Request-Value", } try: config.httpx.http.trace_headers(["Some-Request-Header", "Some-Response-Header"]) with snapshot_context(): resp = httpx.get(url, headers=headers) assert resp.status_code == 200 with snapshot_context(): async with httpx.AsyncClient() as client: resp = await client.get(url, headers=headers) assert resp.status_code == 200 finally: config.httpx.http = HttpConfig()
def __init__(self, host=None, port=None, sync=False, frontend_url=None): super().__init__(host, port) self.sync = sync self.frontend_url = frontend_url if self.sync: try: res = httpx.get(frontend_url) if res.status_code == 200: httpx.PoolLimits(max_keepalive=1, max_connections=1) self.cli = httpx.Client() print( "Attempt connecting to Cluster Serving frontend success" ) else: raise ConnectionError() except Exception as e: print( "Connection error, please check your HTTP server. Error msg is ", e) # TODO: these params can be read from config in future self.input_threshold = 0.6 self.interval_if_error = 1
def ping(self): """ Pings the neo4j backend. :return: """ neo4j_test_connection_endpoint = "" ping_url = f"{self._scheme}://{self._host}:{self._port}/{neo4j_test_connection_endpoint}" # if we can't contact neo4j, we should exit. try: import time now = time.time() response = httpx.get(ping_url, headers=self._header) later = time.time() time_taken = later - now logger.debug(f'Contacting neo4j took {time_taken} seconds.') if time_taken > 5: # greater than 5 seconds it's not healthy logger.warn(f"Contacting neo4j took more than 5 seconds ({time_taken}). Neo4j might be stressed.") if response.status_code != 200: raise Exception(f'server returned {response.status_code}') except Exception as e: logger.error(f"Error contacting Neo4j @ {ping_url} -- Exception raised -- {e}") logger.debug(traceback.print_exc()) raise RuntimeError('Connection to Neo4j could not be established.')
def _init_champ(self): self.version_num = httpx.get( "https://ddragon.leagueoflegends.com/api/versions.json").json() self.patch_num = self.version_num[0].split(".") self.patch = f"{self.patch_num[0]}.{self.patch_num[1]}" # self.patch = "10.16" champ_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=default&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) top_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=top&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) jungle_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=jungle&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) middle_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=middle&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) bottom_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=bottom&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) support_req = httpx.get( f"https://api.op.lol/tierlist/5/?lane=support&patch={self.patch}&tier=platinum_plus&queue=420®ion=all" ) self.champ_resp = champ_req.json() self.top_resp = top_req.json() self.jungle_resp = jungle_req.json() self.middle_resp = middle_req.json() self.bottom_resp = bottom_req.json() self.support_resp = support_req.json() self.champs_list, self.top_list, self.jungle_list, self.middle_list, self.bottom_list, self.support_list = [], [], [], [], [], [] self.update_role_list(self.champ_resp, self.champs_list) self.update_role_list(self.top_resp, self.top_list) self.update_role_list(self.jungle_resp, self.jungle_list) self.update_role_list(self.middle_resp, self.middle_list) self.update_role_list(self.bottom_resp, self.bottom_list) self.update_role_list(self.support_resp, self.support_list)
def get_trips(): API_KEY = os.environ["TRAFIKLAB_API_KEY"] URL = ( "http://api.sl.se/api2/realtimedeparturesv4.json" "?key={}&siteid={}&timewindow=60&metro=false&train=false&tram=false&ship=false" ) in_data = {1367: {"name": "Rosenlundsgatan"}, 1500: {"name": "Årsta torg"}} data = [] for x in in_data.keys(): req = httpx.get(URL.format(API_KEY, x)) json = req.json() trips = list( filter( lambda x: x["Destination"] == "Gullmarsplan", json["ResponseData"]["Buses"], )) data.append({"name": in_data[x]["name"], "trips": trips}) return data
def handle(self, *args, **options): logger.info('updating licenses') for data in httpx.get(LICENSES).json()['licenses']: License.objects.get_or_create(spdx_id=data['licenseId'], defaults={ 'name': data['name'], 'url': data['detailsUrl'] }) logger.info('updating forges') for forge in Forge.objects.order_by('source'): logger.info(f' updating {forge}') forge.get_projects() logger.info('updating repos') for repo in Repo.objects.all(): logger.info(f' updating {repo}') repo.api_update() call_command('delete_perso') call_command('fetch') call_command('robotpkg') call_command('cmake')
def find_resource(self, resource_id: str, access_token: str = None) -> Dict: """ Method to fetch the details of a resource >>> from keycloak import Client >>> kc = Client() >>> kc.find_resource('bb6a777f-a17b-4555-b035-a6ce12a1fd21') {'name': 'Default Resource', 'type': 'urn:python-client:resources:default', 'owner': {'id': 'd74cc555-d46c-4ef8-8a30-ceb2b91d8823'}, 'ownerManagedAccess': False, 'attributes': {}, '_id': 'bb6a777f-a17b-4555-b035-a6ce12a1fd21', 'uris': ['/*'], 'resource_scopes': []} >>> :param access_token: access token to be used :returns: list """ access_token = access_token or self.access_token # type: ignore headers = auth_header(access_token) endpoint = f"{config.uma2.resource_endpoint}/{resource_id}" log.debug("Retrieving resource from keycloak") response = httpx.get(endpoint, headers=headers) response.raise_for_status() log.debug("Resource retrieved successfully") return response.json()
def single_graphite(target: str, summarize: bool = True, sum_times: dict = SUMMARIZE, func: str = "avg") -> Response: if summarize: time = sum_times[request.args.get("frame", "day")] target = f'summarize({target}, "{time}", "{func}")' return { "data": list( filter( valid_datapoints, httpx.get( GRAPHITE_HOST, params={ "target": target, "from": TIME_FRAMES[request.args.get("frame", "day")], "until": "now", "format": "json", }, ).json()[0]["datapoints"])) }
async def log_audio(self, file: str, url: str, group_id: str, sub_path: str, base_audio_path=None, **kwargs): col = get_collection('audio', group_id) if base_audio_path is None: base_audio_path: str = self.cfg.base_path + 'audio' if not col.find_one({"file": file}): line = { "file": file, } res = httpx.get(url) if res.status_code == 200: aud = res.content path = f"{base_audio_path}/{sub_path}/" if not os.path.exists(path): os.makedirs(path) with open(f"{path}{file}", 'wb') as fi: fi.write(aud) else: line["failed_url"] = True col.insert_one(line)
def sync_detailed( *, client: Client, alias: Union[Unset, str] = UNSET, invitation_key: Union[Unset, str] = UNSET, my_did: Union[Unset, str] = UNSET, state: Union[Unset, ConnectionsState] = UNSET, their_did: Union[Unset, str] = UNSET, their_role: Union[Unset, ConnectionsTheirRole] = UNSET, ) -> Response[ConnectionList]: kwargs = _get_kwargs( client=client, alias=alias, invitation_key=invitation_key, my_did=my_did, state=state, their_did=their_did, their_role=their_role, ) response = httpx.get(**kwargs, ) return _build_response(response=response)
async def ia(self, ctx, *, argument): """Replies with Instant Answer from DuckDuckGo""" output_template = "**{subject}**: *Brought to you by **DuckDuckGo** Instant Answer API*\n\n" \ "```{abstractText}```\n" \ "**Source:** <{abstractUrl}> @ {abstractSource}" subject = re.sub('[^ 0-9a-zA-Z]+', '', argument) url_template = "https://api.duckduckgo.com/?q={query}&format=json" url = url_template.format(query=subject.replace(" ", "+")) data = httpx.get(url).json() output = "Sorry, no Instant Answer found." \ if data["AbstractURL"] == "" \ else output_template.format( subject=subject, abstractText=data["AbstractText"], abstractUrl=data["AbstractURL"], abstractSource=data["AbstractSource"]) \ .replace("``````", "") await ctx.send(output)
def get_data(self, **kwargs) -> typing.Any: """ read the data from a given URL or path to a local file :param kwargs: :return: Feeds if Feeds well formed """ if 'url_to_parse' not in kwargs: raise ValueError('you have to provide "url_to_parse" value') url_to_parse = kwargs.get('url_to_parse', '') if url_to_parse is False: raise ValueError('you have to provide "url_to_parse" value') bypass_bozo = kwargs.get('bypass_bozo', "False") data = httpx.get(url_to_parse) logger.debug(url_to_parse) data = feedparser.parse(data.text, agent=self.USER_AGENT) # if the feeds is not well formed, return no data at all if bypass_bozo is False and data.bozo == 1: data.entries = '' log = f"{url_to_parse}: is not valid. You can tick the checkbox " "'Bypass Feeds error ?' to force the process" logger.info(log) return data
def send_message(): """ This checks the weather and sends the message via Telegram. """ temp, weather = israining() message = '' for i in weather: if int(str(i['id'])[:1]) in [2, 3, 5]: message += f'\U00002614 Grab an Umbrella, it\'s raining today: \nDescription: {i["description"]}.\n' elif int(str(i['id'])[:1]) in [6]: message += f'\U00002603 Grab a jacket, it\'s going to snow today! \nDescription: {i["description"]}.\n' message += f'\nThe temperature will be around {round(temp)}°F.' telegram_bot_token = os.getenv('TELEGRAM_BOT_TOKEN') chat_id = os.getenv('TELEGRAM_CHAT_ID') params = {'chat_id': chat_id, 'text': message} resp = httpx.get( f'https://api.telegram.org/bot{telegram_bot_token}/sendMessage', params=params)
def test_counter(self): resp = httpx.get(SRV1 + '/counter1') self.assertEqual("variant1: 1 1", resp.text) resp = httpx.get(SRV1 + '/counter1') self.assertEqual("variant2: 2 1", resp.text) resp = httpx.get(SRV1 + '/counter1') self.assertEqual("variant1: 3 2", resp.text) resp = httpx.get(SRV1 + '/counter1') self.assertEqual("variant2: 4 2", resp.text) resp = httpx.get(SRV1 + '/counter2') self.assertEqual("variant3: 5 3 3", resp.headers.get('X-Counter')) resp = httpx.get(SRV1 + '/counter3') self.assertEqual("variant3: 5 3 3", resp.text)
def data_set_detail(q: str): res_data = {} r = httpx.get(q) soup = BeautifulSoup(r.text, 'html.parser') root = soup.find('div', attrs={'class': 'page_directory'}) res_data['title'] = root.div.text res_data['statics'] = [] res_data['resources'] = [] res_data['infomation'] = [] external_infomation = root.find_all('tr') for ei in external_infomation: th_text = re.sub("\n|\r|\s+|-", '', ei.th.text) td_text = re.sub("\n|\r|\s+|-", '', ei.td.text) if re.sub("\n|\r|\s+|-", '', ei.th.text) != '資料資源:': res_data['infomation'].append({'name': th_text, 'value': td_text}) else: resources = ei.find_all('a') for resrouce in resources: download_link = '' if isValidURL(resrouce['href']) == True: download_link = resrouce['href'] else: download_link = f"https://www.pthg.gov.tw{resrouce['href']}" res_data['resources'].append({ 'detail': resrouce['title'], 'name': td_text, 'type': resrouce['title'].split('.')[1], "description": resrouce['title'], 'downloadLink': download_link }) return res_data
async def updateMMangaInfo(self, link): sourceS = httpx.get(link) pageS = html.fromstring(sourceS.content) try: title = ((pageS.xpath(f"//meta[@property='og:title']/@content"))[0].split(" – "))[0] except: title = "No Title" try: imgURL=(pageS.xpath(f"//meta[@property='og:image']/@content"))[0] except: imgURL = "https://i.resimyukle.xyz/1yx268.png" durum=(pageS.xpath(f"//span[@class='mangasc-stat']/text()"))[0] mangaka=(pageS.xpath(f"//td[./b/text()='Mangaka:']/text()"))[0] bolum=(pageS.xpath("//td[./b/text()='Bölüm Sayısı:']/text()"))[0] turler=", ".join(str(x) for x in pageS.xpath("//td[./b/text()='Türler:']/ul/li/a/text()")) diger=(pageS.xpath("//td[./b/text()='Diğer Adları:']/text()"))[0] cikis=(pageS.xpath("//td[./b/text()='Çıkış Yılı:']/text()"))[0] try: konu=(pageS.xpath("//meta[@property='og:description']/@content"))[0] except: konu = " " try: latestL=(pageS.xpath("(//a[@class='mangaep-episode'])[1]/@href"))[0] latestN=(pageS.xpath("(//a[@class='mangaep-episode'])[1]/text()"))[0] except: latestN="Bölüm Yok" latestL=link manga = await self.bot.pg_con.fetch(f'SELECT * FROM manga WHERE name = $1', title) if not manga: await self.bot.pg_con.execute(f'INSERT INTO manga (name, author, "releaseDate", status, genre, konu, "latestN", "latestL", url, img, total, alias) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)', title, mangaka, cikis, durum, turler, konu, latestN, latestL, link, imgURL, bolum, diger) else: await self.bot.pg_con.execute(f'UPDATE manga SET author = $2, "releaseDate" = $3, status = $4, genre = $5, konu = $6, "latestN" = $7, "latestL" = $8, url = $9, img = $10, total = $11, alias = $12 WHERE name = $1', title, mangaka, cikis, durum, turler, konu, latestN, latestL, link, imgURL, bolum, diger)
def obtener_codigo_postal(): url_municipios = obtener_url_completa_municipios() municipios = obtener_nombre_url() codigos_postales = [] resultado = [] lista_municipios = [] string = "https://micodigopostal.org/zacatecas/" estados = [] for municipio in municipios: lista_municipios.append(municipio.replace("/", "")) for url in url_municipios: if search(string, url): estados.append(url) for url_estados in estados: page = httpx.get(url_estados) soup = BeautifulSoup(page.content, "html.parser") table = soup.find("tbody") for tr in table.find_all("tr"): if len(tr) > 4: codigo_postal = [ td.text for td in tr.find_all("td") if tr != [] ] codigos_postales.append(codigo_postal) resultado.append({ "municipio": codigo_postal[3], "estado": lista_municipios[31], "ciudad": codigo_postal[4], "asentamiento": codigo_postal[0], "tipo_asentamiento": codigo_postal[1], "codigo_postal": codigo_postal[2] }) return resultado
async def emoji_search(self, ctx, *, search): URL = f"https://slackmojis.com/emojis/search?utf8=%E2%9C%93&authenticity_token=8OgBpTphVqlDDugOXU6J6IBtDdXBCdtVhg3VDCEHCTdTt7TSn5vQNha%2BoJkhDbmGkow8Tvk8d%2FiBmanqQeP%2Bdg%3D%3D&query={search}" response = requests.get(URL) if response.status_code == 200: soup = bs(response.text) images = [] titles = [] for img in soup.find_all('img'): images.append(img['src']) title = img['alt'].replace(' random', '') titles.append(title) more_than_5 = True if len(images) == 0: await ctx.send("Sorry, nothing for you boomer!") elif not nsfw_check(images): for i in range(5): message = discord.Embed(title=titles[i].title(), color=discord.Colour.orange()) message.set_image(url=images[i]) await ctx.send(embed=message) if i == len(images) - 1: more_than_5 = False break if more_than_5: await ctx.send( f"Type `f.emoji_list {search}` to get the full emoji list" ) else: message = discord.Embed(title="CENSORED!", color=discord.Colour.red()) await ctx.send(embed=message)
def sync_detailed( *, client: AuthenticatedClient, application_machine_name: Union[Unset, str] = UNSET, application_name: Union[Unset, str] = UNSET, ecs_task_family: Union[Unset, str] = UNSET, environment_machine_name: Union[Unset, str] = UNSET, environment_name: Union[Unset, str] = UNSET, expand: Union[Unset, EcsTaskDeploysListExpand] = UNSET, fullname: Union[Unset, str] = UNSET, limit: Union[Unset, int] = UNSET, offset: Union[Unset, int] = UNSET, sha: Union[Unset, str] = UNSET, username: Union[Unset, str] = UNSET, version: Union[Unset, str] = UNSET, ) -> Response[PaginatedECSTaskDeployList]: kwargs = _get_kwargs( client=client, application_machine_name=application_machine_name, application_name=application_name, ecs_task_family=ecs_task_family, environment_machine_name=environment_machine_name, environment_name=environment_name, expand=expand, fullname=fullname, limit=limit, offset=offset, sha=sha, username=username, version=version, ) response = httpx.get( **kwargs, ) return _build_response(response=response)
def get( *, client: "AuthenticatedClient", group_id: "int", extra_parameters: Mapping[str, str] = None, ) -> Union[ GroupAsExtendedJSON, ]: """Get a group by id.""" url = "{}/api/v1/groups/{groupId}".format(client.base_url, groupId=group_id) headers: Dict[str, Any] = client.get_headers() params: Dict[str, Any] = { "no_course_in_assignment": "true", "no_role_name": "true", "no_assignment_in_case": "true", "extended": "true", } if extra_parameters: params.update(extra_parameters) response = httpx.get(url=url, headers=headers, params=params,) if response_code_matches(response.status_code, 200): return GroupAsExtendedJSON.from_dict(cast(Dict[str, Any], response.json())) if response_code_matches(response.status_code, 400): raise BaseError.from_dict(cast(Dict[str, Any], response.json())) if response_code_matches(response.status_code, 409): raise BaseError.from_dict(cast(Dict[str, Any], response.json())) if response_code_matches(response.status_code, 401): raise BaseError.from_dict(cast(Dict[str, Any], response.json())) if response_code_matches(response.status_code, 403): raise BaseError.from_dict(cast(Dict[str, Any], response.json())) if response_code_matches(response.status_code, "5XX"): raise BaseError.from_dict(cast(Dict[str, Any], response.json())) else: raise ApiResponseError(response=response)
def _login_0(self) -> Cookies: """ 扫码登录 :return: 登陆后产生的Cookies """ # 获取构建二维码用的相关信息 qrcode_url_resp = FaildRetry()( lambda: httpx.get(self.urls['login']['qrcode_info']))() if qrcode_url_resp.status_code != 200: print(r'二维码获取失败...') return qrcode_url_json = qrcode_url_resp.json() oauthKey = qrcode_url_json['data']['oauthKey'] qrcode_url = qrcode_url_json['data']['url'] qr = qrcode.QRCode(version=3) qr.add_data(qrcode_url) qr.make() img = qr.make_image() img.show() # 开始心跳获取 while True: time.sleep(1) getLoginInfo_resp = FaildRetry()( lambda: httpx.post(self.urls['login']['login_info'], data={ 'oauthKey': oauthKey, 'gourl': self.urls['main'] }))() if getLoginInfo_resp.status_code != 200: continue getLoginInfo_json = getLoginInfo_resp.json() if not getLoginInfo_json['status']: continue self.cookies = getLoginInfo_resp.cookies break print(f'登陆成功') return self.cookies
async def test_configure_service_name_pin(tracer, test_spans): """ When setting service name via a Pin We use the value from the Pin """ url = get_url("/status/200") def assert_spans(test_spans, service): test_spans.assert_trace_count(1) test_spans.assert_span_count(1) assert test_spans.spans[0].service == service test_spans.reset() # override the tracer on the default sync client # DEV: `httpx.get` will call `with Client() as client: client.get()` Pin.override(httpx.Client, tracer=tracer) # sync client client = httpx.Client() Pin.override(client, service="sync-client", tracer=tracer) # async client async_client = httpx.AsyncClient() Pin.override(async_client, service="async-client", tracer=tracer) resp = httpx.get(url) assert resp.status_code == 200 assert_spans(test_spans, service=None) resp = client.get(url) assert resp.status_code == 200 assert_spans(test_spans, service="sync-client") async with httpx.AsyncClient() as client: resp = await async_client.get(url) assert resp.status_code == 200 assert_spans(test_spans, service="async-client")
def get_daily_scheme_performance(self, performance_url, as_json=False): fund_performance = [] if self.is_holiday(): url = performance_url + '&nav-date=' + self.get_friday() else: url = performance_url + '&nav-date=' + self.get_today() # html = requests.get(url,headers=self._user_agent) html = httpx.get(url, headers=self._user_agent, timeout=15) soup = BeautifulSoup(html.text, 'html.parser') rows = soup.select("table tbody tr") try: for tr in rows: scheme_details = dict() scheme_details['scheme_name'] = tr.select("td")[0].get_text() scheme_details['benchmark'] = tr.select("td")[1].get_text() scheme_details['latest NAV- Regular'] = tr.select("td")[2].get_text().strip() scheme_details['latest NAV- Direct'] = tr.select("td")[3].get_text().strip() regData = tr.find_all("td", recursive=False, class_="text-right period-return-reg", limit=1) dirData = tr.find_all("td", recursive=False, class_="text-right period-return-dir", limit=1) scheme_details['1-Year Return(%)- Regular'] = regData[0]['data-1y'] scheme_details['1-Year Return(%)- Direct'] = dirData[0]['data-1y'] scheme_details['3-Year Return(%)- Regular'] = regData[0]['data-3y'] scheme_details['3-Year Return(%)- Direct'] = dirData[0]['data-3y'] scheme_details['5-Year Return(%)- Regular'] = regData[0]['data-5y'] scheme_details['5-Year Return(%)- Direct'] = dirData[0]['data-5y'] fund_performance.append(scheme_details) except Exception: return self.render_response(['The underlying data is unavailable for Today'], as_json) return self.render_response(fund_performance, as_json)