def get_player_counts( self, steam_id): # patch matches, matches with leaver and oth session = req( f'https://api.opendota.com/api/players/{steam_id}/counts?api_key={self.key}' ) data = loads(session.text) return data
def request(method: str, url: str, headers: dict, **kwargs): session = Session() hdr = { **headers, 'User-Agent': f'{SOFTWARE_VERSION} ({session.headers["User-Agent"]})' } return req(method, url, headers=hdr, **kwargs)
def points(): global _pointsList global chatInfo justStarted = True if isfile(_pointsFileName): try: _pointsList = eval(open(_pointsFileName, 'r')) except: __ = open(_pointsFileName, 'w') __.close() del __ while True: chatInfo = eval( req( method="GET", url="https://tmi.twitch.tv/group/user/%s/chatters" % CHANNEL ).text ) chatters = chatInfo["chatters"] if justStarted: justStarted = False else: for x in chatters: for i in chatInfo["chatters"][x]: if i in _pointsList.keys() and "bot" not in i: _pointsList[i] += 5 elif "bot" not in i: _pointsList[i] = 5 with open(_pointsFileName, 'w') as f: f.write(str(_pointsList)) f.close() t(60)
def tpb(): results = [] htmlstr = html.fromstring url = f'https://thepiratebay.org/search/{q}/0/7/200/' tree = htmlstr(req(url, timeout=3).content) x = tree.xpath add = results.append sep = str.split items = list( zip(x('//a[@class="detLink"]/text()'), x('//font[@class="detDesc"]/text()'), x('//td[@align="right"][1]/text()'), x('//td[@align="right"][2]/text()'), x('//a[contains(@href, "magnet")]/@href'), x('//td[@class="vertTh"]//a[2]/text()'))) low = str.lower for item in items: if cat in low(item[5]) and int(item[2]) > 0: add([ item[0], sep(item[1], ', ')[1][5:-2] + 'B', item[2], item[3], item[4], item[4][20:60] ]) print('tpb processed successfully...') return results
def layer_to_gdf(self,layername,print_req_url=True): ''' transform a vector layer in the wfs datasource to a GeoDataFrame ''' # parameters to create url_request: params = dict(service='WFS', version="1.0.0", request='GetFeature',typeName=layername,outputFormat='json') req_url = req('GET', self.wfs_url, params=params).prepare().url if print_req_url: print(req_url) #record the hash from the data as the hash from json as string: self.layer_hashes[layername] = bf.get_hash_from_text_in_url(req_url) # recording the request url: self.layer_urls[layername] = req_url as_gdf = gpd.read_file(req_url) # dumping to .geojson outpath = os.path.join(bf.default_output_folder,layername+'.geojson') self.layer_filepaths[layername] = outpath if not self.checking_mode: as_gdf.to_file(outpath,driver='GeoJSON') return as_gdf
def get_query_result(url): result = req("get", url) if result.status_code == 200: return result.content.decode('utf-8', 'ignore') raise Exception("error fetching url {}, {}".format(url, result.status_code))
def request(self, url, method='GET', *args, **kwargs): from social.utils import user_agent from social.exceptions import AuthFailed from requests import request as req kwargs.setdefault('headers', {}) if self.setting('VERIFY_SSL') is not None: kwargs.setdefault('verify', self.setting('VERIFY_SSL')) kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or self.setting('URLOPEN_TIMEOUT')) if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']: kwargs['headers']['User-Agent'] = user_agent() try: # import IPython; IPython.embed() response = req(method, url, *args, **kwargs) except ConnectionError as err: raise AuthFailed(self, str(err)) try: response.raise_for_status() except: print(response.json()) print(url) print(kwargs) raise return response
def get_summaries(self): session = req( f'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={self.apikey}&steamids={self.steam_id}') if 'key' in session.text: raise Exception('Key is not working') data = json.loads(session.text) return data["response"]["players"][0]
def put(self): from flask import request from sqlalchemy import and_ from requests import get from json import loads res_id = int(request.args.get('id')) current_username = get_jwt_identity()['username'] current_user = User.find_by_username(current_username) permissions = db.session.query(UserResearchPermission).filter( and_(UserResearchPermission.userId == current_user.id, UserResearchPermission.researchId == res_id)).first() if permissions is None: return { "message": "You don't have permission to edit this research" }, 400 response = req( 'http://localhost:5000/ml/api/v1.0/update?res_id={}'.format( new_research.id)).json() if response['done'] == False: return {"message": "Internal server error"}, 500 current_res = Research.find_by_id(res_id) itters = ConductedResearch.query.filter_by(researchId=res_id).all() print(itters[-1].id) current_res.conducted.append(itters[-1]) db.session.add(itters[-1]) db.session.commit() return {"message": "updated"}
def fetch_notes(account, token: str): payload = 'data={"token": "' + token + '"}' response = req("POST", "https://api.ecoledirecte.com/v3/eleves/" + str(account['id']) + "/notes.awp?verbe=get&", data=payload).json() token = response['token'] or token return response, token
def get_stats_matches( self, steam_id): #win lose, еще есть много переменных с другой инфой session = req( f'https://api.opendota.com/api/players/{steam_id}/wl?api_key={self.key}' ) data = loads(session.text) return data
def _send(self, request, method, params={}, headers=None): if not self.is_available_method(request): return JsonResponse({'status' : HttpResponses.METHOD_NOT_ALLOWED.value}) protocol = 'https' if self.is_secure else 'http' url = protocol + '://' + self.to try: res = req(method.value, url, params=params, headers=headers).json() except Exception as e: print(e) res = {'status' : HttpResponses.INTERNER_SERVER_ERROR.value} return JsonResponse(res)
def __init__(self, s64=None, sid=None): if(s64 != None): s64 = str(s64) if(len(s64) != 17): raise Exception('The Steam64 provided is invalid') aft = f'profiles/{s64}/' elif(sid != None): sid = str(sid) aft = f'id/{sid}/' else: raise Exception('Invalid user parameters') self.url = f'http://steamcommunity.com/{aft}' self.soupMain = soup(req(self.url).text, 'html.parser') if('error' in self.soupMain.title.text.lower()): raise Exception('Error retrieveing Steam Profile') self.soupDate = soup(req(f'{self.url}badges/1/').text, 'html.parser') self.soupBadges = soup(req(f'{self.url}badges/').text, 'html.parser') self.soupGames = soup(req(f'{self.url}games/?tab=all').text, 'html.parser') self.persona = self.getPersona() self.games = self.getGames()
def robar_amigos(usuario_uid, access_token): url = u'https://graph.facebook.com/{0}/' \ u'friends?fields=id,name,location,picture' \ u'&access_token={1}'.format( usuario_uid, access_token,) try: response = req('GET', url) cadena = str(response._content)[2:] amigos = json.loads(cadena[:-1]) #print(amigos["summary"]["total_count"]) return amigos["summary"]["total_count"] except Exception as e: #print(e) return False
def get_profile(self, steam_id32): session = req( f'https://api.opendota.com/api/players/{steam_id32}?api_key={self.key}' ) data = loads(session.text) print(data) stats = { 'dota-plus': data['profile']['plus'], 'cheese': data['profile']['cheese'], 'mmr-estimate': data['mmr_estimate']['estimate'], 'solo-rank': data['mmr_estimate']['solo_competitive_rank'] } return stats
def scrapez(url, xdict): doc = html.fromstring(req(url, headers=head).content) for key, xpath in xdict.items(): try: xdict[key] = doc.xpath(xpath)[0].text_content().strip() except: if key == 'feats': xdict[key] = '' continue print('url = ' + url) print('key = ' + key) raise SystemExit return xdict
def zoo(): xmlstr = etree.fromstring url = f'https://zooqle.com/search?q={q}+category%3A{cat}&sd=d&fmt=rss&pg=' pages = [req(url + str(p), timeout=3).content for p in [1, 2, 3]] trees = [xmlstr(page)[0] for page in pages] items = list( zip([item[0].text for tree in trees for item in tree[8:]], [int(item[6].text) for tree in trees for item in tree[8:]], [item[9].text for tree in trees for item in tree[8:]], [item[10].text for tree in trees for item in tree[8:]], [item[8].text for tree in trees for item in tree[8:]], [item[7].text for tree in trees for item in tree[8:]])) print('zoo processed successfully...') return [list(item) for item in items]
def lime(): htmlstr = html.fromstring url = f'https://www.limetorrents.cc/search/{cat}/{q}/seeds/1/' tree = htmlstr(req(url, timeout=3).content) x = tree.xpath items = list( zip(x('//div[@class="tt-name"]//a[2]/text()'), x('//td[@class="tdnormal"][2]/text()'), x('//table[2]//td[@class="tdseed"]/text()'), x('//table[2]//td[@class="tdleech"]/text()'), x('//a[@class="csprite_dl14"]/@href'))) print('lime processed successfully...') return [ list(item) + [item[4][29:69]] for item in items if int(item[2]) > 0 ]
def get_api_key(self): api_key = self.settings["settings"]["api_key"] if not api_key: return None elif api_key: resp = req( f"https://api.steampowered.com/ISteamUser/ResolveVanityURL/v1/?key={api_key}" "&vanityurl=http://steamcommunity.com/profiles/76561199231140368" ).text if not resp: return None elif json_load(resp)["response"]: return api_key else: return "Bad API Key"
def syntaxnet_api_filter_text(text, types, language): res = req( 'POST', generate_url(s_api.api_ip, port=s_api.api_port, directory='v1/parsey-universal-full'), data=text.encode('latin-1', 'ignore'), headers={'Content-Type': 'text/plain', 'charset': 'utf-8', 'Accept': 'text/plain', 'Content-Language': language.split('_')[-1]} ).text if res == '': return pd.np.array([[]]) res = pd.read_table(StringIO(res), sep="\t", header=None, quoting=3) return pd.np.array(res[[1, 3]][~res[3].isin(types)])
def get_avatar(self): steam_id = self.get_steam_id() data = self.api.get_player_avatar(steam_id) img_data = req(data["avatar_full"]).content with open(f"{steam_id}.jpg", "wb") as image: image.write(img_data) pixmap = QPixmap(f"{steam_id}.jpg") big_pm = pixmap.scaled(285, 265, Qt.KeepAspectRatio, Qt.FastTransformation) self.output_label.setPixmap(big_pm) os.remove(rf'{os.path.abspath(os.curdir)}/{steam_id}.jpg') self.progressBar.setValue(100)
def login(username: str, password: str, token: str = None): payload = 'data={ "identifiant": "' + username + \ '", "motdepasse": "' + password + '", "acceptationCharte": true }' try: response = req("POST", "https://api.ecoledirecte.com/v3/login.awp", data=payload).json() token = response['token'] or token return response, token except Exception as exception: if type(exception).__name__ == "ConnectionError": print("[reverse bold red]La connexion a échoué[/]") print("[red]Vérifiez votre connexion Internet.[/]") else: print("[reverse bold red]Une erreur inconnue est survenue.[/]") calm_exit()
def request_with_retries(arg_list, request_type='post'): """ :param list arg_list: :param str request_type: :returns: `` -- """ if request_type == 'post': from requests import post as req else: # elif request_type == 'get': from requests import get as req for i in range(MAX_RETRIES): r = req(*arg_list) if r.ok: return r return r
def logout(): ACCESS_TOKEN = app.blueprints['github'].token['access_token'] CLIENT_ID = os.environ.get("GITHUB_OAUTH_CLIENT_ID") payload = "{\"access_token\": \"%s\"}" % (ACCESS_TOKEN) logout_url = f"https://api.github.com/applications/{CLIENT_ID}/grant" headers = { 'Authorization': 'Basic NjliYTRiMTBhNGE0Y2RhM2IxNzQ6MDJlN2FmYTQ1NTIxYmYyMzBhYzNkNTg4MGQ0MWIwNGRlMWUzYWY1OQ==', 'Content-Type': 'application/json', 'Cookie': '_octo=GH1.1.2130686163.1612643408; logged_in=no' } resp = req("DELETE", logout_url, headers=headers, data=payload) if resp.ok: del app.blueprints['github'].token session.clear() return "Ok" else: abort(401)
def rarbg(): url = 'https://torrentapi.org/pubapi_v2.php' payload = { 'mode': 'search', 'search_string': q, 'category': cat, 'limit': '100', 'sort': 'seeders', 'min_seeders': '1', 'format': 'json_extended', 'token': token } items = json.loads(req(url, timeout=3, params=payload).text)['torrent_results'] print('rarbg processed successfully...') keys = ['title', 'size', 'seeders', 'leechers', 'download'] return [[item[key] for key in keys] + [item['download'][20:60]] for item in items]
def get_faceit_avatar(self): data = self.api.get_faceit(self.get_steam_id()) if 'Error: ' in data: return self.output(data) elif data["avatar"] == "Avatar is not available": return self.output("Error: Avatar is not available") img_data = req(data["avatar"]).content with open(f"{data['nickname']}.jpg", "wb") as image: image.write(img_data) pixmap = QPixmap(f"{data['nickname']}.jpg") big_pm = pixmap.scaled(225, 255, Qt.KeepAspectRatio, Qt.FastTransformation) self.output_label.setPixmap(big_pm) os.remove(rf'{os.path.abspath(os.curdir)}/{data["nickname"]}.jpg') self.progressBar.setValue(100)
def get_answers(clue_in): if clue_in.answers is None: url = 'http://www.wordplays.com/crossword-solver/' # encode URL for c in clue_in.clue: if c == ' ': url += '-' elif c == ',': url += '%2C' elif c == ':': url += '%3A' elif c == '?': url += '%3F' elif c == '\'': url += '%27' elif c == '(': url += '%28' elif c == ')': url += '%29' else: url += c pybot.chill_out_for_a_bit(3) # don't send requests too quickly r = req(url) if r.status_code != 200: print('Nope', url) # get ranks and answers scraped = findall(r'class=stars>(.*?)<td class=clue', r.text) # clean up and put into list answers = [] for clue in scraped: stars = len(findall(r'<div></div>', clue)) ans = findall(r'crossword-clues/(.*?)"', clue)[0].strip().lower() if len(ans) == clue_in.length: answers.append((stars, ans)) return answers else: return clue_in.answers
def search(q, cat): logger.info('beginning search') torlist = [] ext = torlist.extend jtkn = req('https://torrentapi.org/pubapi_v2.php?get_token=get_token') token = json.loads(jtkn.text)['token'] funcs = [(eztv, q), (lime, (q, cat)), (rarbg, (q, cat, token)), (tpb, (q, cat)), (zoo, (q, cat))] pool = Pool(5) tormaps = pool.map(run_func, funcs) pool.close() pool.join() [ext(tormap) for tormap in tormaps] print('# of results (pre-filtor) = ' + str(len(torlist))) logger.info('search complete, attempting to run filtor...') try: results = filtor(torlist, q) print('# of results (post-filtor) = ' + str(len(results))) return results except Exception as e: logging.exception(e)
def get_ticker(): if 'ticker' in request.args: ticker = request.args.get('ticker') ticker = str(ticker).upper() get_ticker_data_from_post = Posts.query.filter(Posts.stock_ticker == ticker).first() if get_ticker_data_from_post: d1 = get_ticker_data_from_post.toDict(True) ##### hit polygon api volume = '' week_high = '' week_low = '' POLYGON_API_KEY = 'YvETvDJe59N6Duvha_iEQPLFepUqsZwR' todayDate = datetime.today() toDate = todayDate.strftime("%Y-%m-%d") fromDate = str(todayDate.year - 1) + "-" + str(todayDate.month).zfill(2) + "-" + str(todayDate.day).zfill(2) volume_url = "https://api.polygon.io/v2/aggs/ticker/"+ticker+"/range/1/year/"+fromDate+"/"+toDate+"?unadjusted=true&sort=asc&limit=120&apiKey="+POLYGON_API_KEY; print(volume_url) response2 = req(method="GET", url=volume_url) result2 = json.loads(response2.text) if 'results' in result2 and len(result2['results'])>0: volume = result2['results'][0]["v"] week_high = result2['results'][0]["h"] week_low = result2['results'][0]["l"] d2 = { "volume": volume, "week_high": week_high, "week_low": week_low } data = dict(d1, **d2) d = json.dumps(data) return d return error_json("Invalid Ticker " + ticker) else: return error_json("Invalid Data")
def get_guardian(number): across, down = {}, {} r = req('http://www.theguardian.com/crosswords/quick/' + number + '/blind') if r.status_code != 200: raise PuzzleNotFound() resp = ' '.join(r.text.split()) acr = findall(r'<li>(.*?) </li>', findall(r'<h2>Across(.*?)</u', resp)[0]) dow = findall(r'<li>(.*?) </li>', findall(r'<h2>Down(.*?)</ul>', resp)[0]) # build Clue objects from given puzzle for a in acr: clue = a.strip().split() across[int(clue[0])] = gen_guardian_clue(clue, 'across') for d in dow: clue = d.strip().split() down[int(clue[0])] = gen_guardian_clue(clue, 'down') return across, down
def descargar_imagen(backend, strategy, user, response, details, is_new=False, *args, **kwargs): # Save Facebook profile photo into a user profile, assuming a profile model ''' print('descargar_imagen') print(user) print(response) print(details) print(backend) print(strategy) print(is_new) print(args) print(kwargs) #''' if is_new: #print('es nuevo ') if backend.name == 'facebook': #print('de facebook') url = 'http://graph.facebook.com/{0}/picture'.format(response['id']) try: response = req('GET', url, params={'type': 'large'}) response.raise_for_status() except HTTPError as e: #print(e) pass else: #perfil_usuario = PerfilUsuario.objects.get(usuario=user) #perfil_usuario.imagen.save('{0}_facebook.png'.format(user.username), ContentFile(response.content)) #perfil_usuario.save() #logger.info('imagen facebook descargada usuario nuevo') pass elif backend.name == 'twitter': #print('de twitter') #print(response) url = response.get('profile_image_url', '').replace('_normal', '') #print(url) try: response = req('GET', url, params={'type': 'large'}) #print('response') #print(response) response.raise_for_status() except HTTPError as e: #print(e) pass else: #perfil_usuario = PerfilUsuario.objects.get(usuario=user) #perfil_usuario.imagen.save('{0}_twitter.png'.format(user.username), ContentFile(response.content)) #perfil_usuario.save() #print('imagen twitter descargada usuario nuevo') pass else: #print('es ya usuario viejo ') if backend.name is 'facebook': #perfil_usuario = PerfilUsuario.objects.get(usuario=user) try: #dimenciones = perfil_usuario.imagen._get_size() #print('si tiene imagen, no se descarga nada') return except Exception as e : #print(e) url = 'http://graph.facebook.com/{0}/picture'.format(response['id']) try: response = req('GET', url, params={'type': 'large'}) response.raise_for_status() except HTTPError as e: #print(e) pass else: #print('de facebook') #print(response) #perfil_usuario.imagen.save('{0}_facebook.jpg'.format(user.username), ContentFile(response.content)) #perfil_usuario.save() #print('imagen facebook descargada usuario viejo') pass pass elif backend.name is 'twitter': #perfil_usuario = PerfilUsuario.objects.get(usuario=user) try: #dimenciones = perfil_usuario.imagen._get_size() #logger.info('si tiene imagen, no se descarga nada') return except Exception as e: url = response.get('profile_image_url', '').replace('_normal', '') try: response = req('GET', url, params={'type': 'large'}) response.raise_for_status() except HTTPError as e: #print(e) pass else: #print('de twitter') #print(response) #perfil_usuario.imagen.save('{0}_twitter.png'.format(user.username), ContentFile(response.content)) #perfil_usuario.save() #print('imagen twitter descargada usuario viejo') pass pass