def get(self, table, query=None): url = '%s%s?results_per_page=%s' % (self.site, table, self.limit) if query: url = '%s&q=%s' % (url, dmp(query, cls=CustomEncoder)) r = g(url, headers=self.HDR) return loads(r.text)['objects']
def get(self, table, query=None): url = "%s%s?results_per_page=%s" % (self.site, table, self.limit) if query: url = "%s&q=%s" % (url, dmp(query, cls=CustomEncoder)) r = g(url, headers=self.HDR) return loads(r.text)["objects"]
def countries_by_currency(): c_code = {} r = g("https://restcountries.eu/rest/v2/").json() for i in r : for j in i["currencies"]: if j["code"] not in c_code: c_code[j["code"]]=[i["name"]] else: if i["name"] not in c_code[j["code"]]: c_code[j["code"]].append(i["name"]) return c_code
pip unninstal requests #Remove o módulo requests #Utilizando Módulos #1 - Importa o módulo requests import requests requests.get('http://www.uol.com.br') #2 - Importa o módulo, renomeando-o para r. import requests as r r.get('http://www.uol.com.br') #3 - Importa apenas o método get, do módulo requests from requests import get get('http://uol.com.br') #4 - Importa os métodos get e post do módulo requests from requests import get,post #5 - Importa o método get do módulo requests renomeando-o para g. from requests import get as g g('http://uol.com.br') ------------------------------------------------------- Módulos(os): #Interface para ações do OS Utilização: import os Métodos: os.path.join() #Cria o path mantendo compatibilidade do código #Exemplo(linux): os.path.join('/root','Documents') ...output ->'/root/Documents' #Exemplo(windows): os.path.join('C','Documents')
def get(self, table, query=None): base = '%s%s' % (self.site, table) url = '%s?q=%s' % (base, dmp(query, cls=CustomEncoder)) if query else base r = g(url, headers=self.HDR) return loads(r.text)['objects']
from json import loads as l from requests import get as g from pandas.io.json import json_normalize as n from bs4 import BeautifulSoup as b from pandas import concat as c c([ n( l( g("https://xkcd.com<id>info.0.json".replace("<id>", link["href"])).text)) for link in b(g("https://xkcd.com/archive/").text).find( id="middleContainer").find_all("a") ]).to_pickle("xkcd_metadata.pkl")
from requests import get as g from bs4 import BeautifulSoup as bs import pandas as pd requête = g( "https://www.imdb.com/search/title?release_date=2017-01-01,2017-12-31&sort=num_votes,desc&ref_" ) html_soup = bs(requête.text, "html.parser") movie_container = html_soup.find_all('div', class_="lister-item mode-advanced") print(len(movie_container)) #titre d'un film first_movie = movie_container[0] first_name = first_movie.h3.a.text print(first_name) # Année de sortie first_years = first_movie.h3.find( "span", class_="lister-item-year text-muted unbold").text print(first_years) # Note first_imdb = float(first_movie.strong.text) print(first_imdb) # Note metascore first_metascore = first_movie.find("span", class_="metascore favorable") first_metascore = int(first_metascore.text) print(first_metascore)