def getDetails(placeid): placedetailresponse = requests.get(googleplacedetails+googlemapskey+"&placeid="+placeid) placedetails = jsonhelper.loads(placedetailresponse._content.decode('utf-8')) # get the restaurant's website from the place details json. if the restaurant has no website, return try: website = (placedetails["result"]["website"]) except KeyError: return #get the name of the restaurant. this field will always exist in the json response placename = placedetails["result"]["name"] #get the html that corresponds to the restaurant's website and transform it into a string try: restaurantresponse = requests.get(website)._content.decode('utf-8') except UnicodeDecodeError: return #search the restaurant's main html page for an anchor that has a form of menu as its text menu = re.search('<a href=(.*?)>(.*?)(M|m)enu(.*?)</a>', restaurantresponse) if menu: found = menu.group(1) # if the href is relative (ie. no period) if found.find(".") < 0: found = website[0:website.rfind("/")] + found #the menu url comes in ", so we need to strip these out try: menuresponse = requests.get(found.replace("\"", ""))._content.decode('utf-8') except Exception: return #for the purposes of the matching, count successive lines as one and ignore the case of the item to find menuregex = re.compile(">.*?"+itemtofind+".*?<", re.DOTALL | re.IGNORECASE) menuitems = re.findall(menuregex, menuresponse) #for each item that matches the description, print it out with the restaurant where it's found for menuitem in menuitems: print(placename+ " has " +menuitem[menuitem.rfind(">")+1:menuitem.rfind("<")])
def get_random_comic(): # Get the "num" of the latest one to get the total amount of xkcd comics created latest_comic = requests.get("http://xkcd.com/info.0.json").json() # Get a random comic from all time random_num = random.randint(1, latest_comic['num']) random_comic = requests.get("http://xkcd.com/{}/info.0.json".format(random_num)).json() return random_comic
def save_cuenta(request): Cuenta.objects.all().delete() # Para saber si esxiste el usuario. Si existe "<Response [200]>" # url = 'https://api.budgetbakers.com/api/v1/user/exists/[email protected]' # headers = {'X-Token': '5a3709ce-acfb-49fb-8b60-b1b9e55ffb51'} # r = requests.get(url, headers=headers) url = 'https://api.budgetbakers.com/api/v1/accounts' headers = { 'X-Token': '5a3709ce-acfb-49fb-8b60-b1b9e55ffb51', 'X-User': '******' } r = requests.get(url, headers=headers) json = r.json() serializer = CuentaSerializer(data=json, many=True) print(serializer.is_valid()) serializer.save() n = Cuenta.objects.count() if n > 0: messages.add_message(request, messages.SUCCESS, 'Se han exportado ' + str(n) + ' registros') else: messages.add_message(request, messages.WARNING, mark_safe("No se ha exportado, por favor <a href='/cuentas-wallet/'>reintente aqui </a> en unos minutos.")) # if serializer.is_valid(): # print('1. Validooo') # serializer.save() # else: # Cuenta.objects.all().delete() # serializer.save() return redirect('/')
def getnum(): url_forum = 'http://tieba.baidu.com/i/68449687/forum' r2 = requests.get(url_forum) search = re.findall(ur'forum_name":"(.+?)"', r2.text) num_forum = len(search) if num_forum % 20 != 0: num_forum = num_forum / 20 + 1 return num_forum
def DownloadFile(url, path = "%TEMP%"): file = expandvars(join(path, basename(url.split("?")[0]))) r = requests.get(url, stream=True) with open(file, "wb") as f: for chunk in r.iter_content(chunk_size=16384): if chunk: f.write(chunk) return file
def loadFileFromUrl(url): try: uResponse = requests.get(url) except requests.ConnectionError: return "Connection Error" Jresponse = uResponse.text data = json.loads(Jresponse) return data
def get_images(album, headers): response = requests.get(album['albumUrl'], headers=headers) parsed_body = html.fromstring(response.text) js = parsed_body.xpath('//script/text()') text = js[3].encode('utf-8') image_raw = re.findall(r"'photoList':\s*(\[.*?\]),", text)[0] image_list = json.loads(image_raw) return image_list
def fetch_overrides(): """ A job to fetch the caniusepython3 CLI override json file from Github to simplify the overrides. """ override_response = requests.get(OVERRIDE_URL) redis = get_redis() override_json = override_response.json() redis.hmset(OVERRIDE_KEY, override_json)
def _extract_title(self, url): title = None r = requests.get(url) if r.status_code == 200: content = r.text start_pos = content.find('<h1>') end_pos = content.find('</h1>') if 0 < start_pos < end_pos: title = content[start_pos + 4:end_pos].strip() return title
def submit_form(): payload = {ID_USERNAME : USERNAME, ID_EMAIL: EMAIL, ID_PASSWORD: PASSWORD,} resp = requests.get(SIGNUP_URL) print "Response to GET request: %s" % resp.content resp = requests.post(SIGNUP_URL, payload) print "Headers from a POST request response: %s" % resp.headers
def suporte_cadastro_servico(request): # Cadastro if request.method == "POST": form = SvcForm(request.POST) if form.is_valid(): nome = form.cleaned_data['nome'] descricao = form.cleaned_data['descricao'] sigla = form.cleaned_data['sigla'] status_ativacao = form.cleaned_data['status_ativacao'] data = {'nome': nome, 'descricao': descricao, 'sigla': sigla, 'status_ativacao': status_ativacao} form = requests.post('http://localhost:3000/servico/', json=data) else: form = SvcForm() servicos = requests.get('http://localhost:3000/servico/').json() ram = requests.get('http://localhost:3000/ramoAtividade').json() return render(request, 'home/suporte/suporte_cadastro_servico.html', {'servicos': servicos, 'ram': ram, 'form': form})
def clean_data(): dict_file = open("dict.txt", "w") for w in words: # supports only translations to Slovak language req = "https://translate.google.com/translate_a/single?client=gtx&sl=en&tl=sk&dt=t&q=" + w response = requests.get(req) w_t = get_translation(response.text) if w_t and w_t != w: w = w.lower().replace(',', '') w_t = w_t.lower().replace(',', '') words_translations[w] = w_t dict_file.write(w.encode('utf8') + "," + w_t.encode('utf8') + "\n")
def suporte_cadastro_usuario(request): # Cadastro if request.method == "POST": form = UsuFormSuporte(request.POST) if form.is_valid(): nome = form.cleaned_data['nome'] email = form.cleaned_data['email'] status_ativacao = form.cleaned_data['status_ativacao'] data_inativacao = form.cleaned_data['data_inativacao'] empresa = form.cleaned_data['empresa'] data = {'nome': nome, 'email': email, 'status_ativacao': status_ativacao, 'data_inativacao': data_inativacao, 'empresaId': empresa, 'perfilId': 2} form = requests.post('http://localhost:3000/usuario', json=data) else: form = UsuFormSuporte() # listar usuarios admins = requests.get('http://localhost:3000/usuario/2').json() estabelecimentos = requests.get('http://localhost:3000/empresa').json() return render(request, 'home/suporte/suporte_cadastro_admin.html', {'admins': admins, 'estabelecimentos': estabelecimentos, 'form': form})
def suporte_cadastro_estabelecimento(request): # Cadastro if request.method == "POST": form = EmpForm(request.POST) if form.is_valid(): nome_fantasia = form.cleaned_data['nome_fantasia'] razao_social = form.cleaned_data['razao_social'] numero_cnpj = form.cleaned_data['numero_cnpj'] logradouro = form.cleaned_data['logradouro'] numero_logradouro = form.cleaned_data['numero_logradouro'] cidade = form.cleaned_data['cidade'] uf = form.cleaned_data['uf'] cep = form.cleaned_data['cep'] pais = form.cleaned_data['pais'] telefone = form.cleaned_data['telefone'] email = form.cleaned_data['email'] nome_responsavel = form.cleaned_data['nome_responsavel'] cargo_responsavel = form.cleaned_data['cargo_responsavel'] cpf_responsavel = form.cleaned_data['cpf_responsavel'] data_abertura = form.cleaned_data['data_abertura'] data_inativacao = form.cleaned_data['data_inativacao'] status_ativacao = form.cleaned_data['status_ativacao'] ramo_atividade = form.cleaned_data['ramo_atividade'] data={'nome_fantasia': nome_fantasia, 'razao_social': razao_social, 'numero_cnpj': numero_cnpj, 'logradoro': logradouro, 'numero_logradouro': numero_logradouro, 'cidade': cidade, 'uf': uf, 'cep': cep, 'pais': pais, 'telefone': telefone, 'email': email, 'nome_responsavel': nome_responsavel, 'cargo_responsavel': cargo_responsavel, 'cpf_responsavel': cpf_responsavel, 'data_abertura': data_abertura, 'data_inativacao': data_inativacao, 'status_ativacao': status_ativacao, 'ramoAtividadeId': ramo_atividade } form = requests.post('http://localhost:3000/empresa', json=data) else: form = EmpForm() # LISTAR EMPRESAS ramos = requests.get('http://localhost:3000/ramoAtividade').json() empresas = requests.get('http://localhost:3000/empresa').json() return render(request, 'home/suporte/suporte_cadastro_estabelecimento.html', {'empresas': empresas, 'ramos': ramos, 'form': form})
def download_images(images, path): path = path.replace(' ', '_') if not os.path.exists(path): os.makedirs(path) for image in images: image_type = re.findall(r"(\.[a-z|A-Z]*)$", image['url'])[0] image_name = "%s%s" % (image['photoId'], image_type) response = requests.get(image['url']) with open('%s/%s' % (path, image_name), 'wb') as f: f.write(response.content) print "Downloaded %s images in this album." % (len(images))
def ls(prefix): from pip._vendor import requests # pylint: disable=E0611 data = requests.get(STORAGE_URL, params=dict( prefix=prefix, fields='items(name,md5Hash)' )).json() entries = data.get('items', []) for entry in entries: entry['md5Hash'] = entry['md5Hash'].decode('base64').encode('hex') entry['local'] = False # Also look in the local cache entries.extend([ {'name': fname, 'md5Hash': None, 'local': True} for fname in glob.glob(os.path.join(LOCAL_STORAGE_PATH, prefix.split('/')[-1] + '*'))]) return entries
def suporte_cadastro_ramo(request): # Cadastro if request.method == "POST": form = RamForm(request.POST) if form.is_valid(): nome = form.cleaned_data['nome'] descricao = form.cleaned_data['descricao'] status = form.cleaned_data['status_ativacao'] data = {'nome': nome, 'descricao': descricao, 'status_ativacao': status} form = requests.post('http://localhost:3000/ramoAtividade', json=data) else: form = RamForm() ramos = requests.get('http://localhost:3000/ramoAtividade').json() return render(request, 'home/suporte/suporte_cadastro_ramo.html', {'form': form, 'ramos': ramos})
def admin_cadastro_usuario(request): if request.method == "POST": form = UsuFormAdmin(request.POST) if form.is_valid(): nome = form.cleaned_data['nome'] email = form.cleaned_data['email'] status_ativacao = form.cleaned_data['status_ativacao'] data_inativacao = form.cleaned_data['data_inativacao'] data = {'nome': nome, 'email': email, 'status_ativacao': status_ativacao, 'data_inativacao': data_inativacao, 'perfilId': 3, 'empresaId': 1} form = requests.post('http://localhost:3000/usuario', json=data) else: form = UsuFormAdmin() fun = requests.get('http://localhost:3000/usuario/perfil/3').json() return render(request, 'home/admin/admin_cadastro_funcionario.html', {'fun': fun, 'form': form})
def proxy_to(request, path, target_url): url = '%s%s' % (target_url, path) headers = { 'Authorization': '*****@*****.**', 'User-Agent': '*****@*****.**', 'Content-Type': 'application/json' } if request.method == 'GET': proxied_response = requests.get(url, headers=headers) elif request.method == 'POST': proxied_response = requests.post(url, data=request.body, headers=headers) elif request.method == 'PUT': proxied_response = requests.put(url, data=request.body, headers=headers) elif request.method == 'DELETE': proxied_response = requests.delete(url, data=request.body, headers=headers) print url return StreamingHttpResponse(proxied_response)
def get_registros(request, idCaja=0): cantInsertados = 0 if (idCaja != 0): caja = Caja.objects.get(pk=idCaja) yaDescargados = list(MovCaja.objects.exclude(idWallet=None).values_list('idWallet', flat=True)) print(yaDescargados) url = 'https://api.budgetbakers.com/api/v1/records' headers = { 'X-Token': '5a3709ce-acfb-49fb-8b60-b1b9e55ffb51', 'X-User': '******' } r = requests.get(url, headers=headers) json = r.json() for j in json: if j['accountId'] == caja.idCuentaWallet and j['id'] not in yaDescargados: importe = 0 tipoMovCaja = 0 if j['amount'] < 0: importe = j['amount'] * (-1) tipoMovCaja = TipoMovCaja.objects.get(pk=10) #HARDCODED else: importe = j['amount'] tipoMovCaja = TipoMovCaja.objects.get(pk=9) # HARDCODED empresa = Empresa.objects.get(pk=1) #HARDCODED fecha = j['date'] descripcion = 'Wallet: ' + j['note'] idWallet = j['id'] mov = MovCaja(caja=caja, empresa=empresa, fecha=fecha, descripcion=descripcion, importe=importe, tipoMovCaja=tipoMovCaja, idWallet=idWallet, operador=request.user) mov.save() yaDescargados = yaDescargados + [idWallet] print(yaDescargados) cantInsertados += 1 # messages.add_message(request, messages.SUCCESS, 'Se han exportado 5 registros') # messages.add_message(request, messages.WARNING, # mark_safe("Sin respuesta del servicio externo, por favor <a href='/administracion/fondos/movcaja/'>reintente aqui </a> en unos minutos.")) # messages.add_message(request, messages.ERROR, 'Error grave. No se ha realizado la exportación') if cantInsertados > 0: messages.add_message(request, messages.INFO, 'Se han exportado ' + str(cantInsertados) + ' registros') if cantInsertados == 0: messages.add_message(request, messages.WARNING, mark_safe("No se encontraron registros. <a href='/registros-wallet/"+ str(idCaja) +"'>Reintente aqui.</a>")) return redirect('/administracion/fondos/movcaja/')
def backup_albums(config): print '== Module: Albums Backup. This may take a while.' user_albums_url = 'http://photo.renren.com/photo/%s/albumlist/v7' % (config.get('user', 'id')) headers = get_headers(config.get('user', 'cookie')) response = requests.get(user_albums_url, headers=headers) albums = get_albums(response) count = 1 total = len(albums) for album in albums: print '>> [%s/%s] Downloading album `%s`...' % (count, total, album['albumName']) images = get_images(album, headers) album_dir = '%s/albums/%s' % (config.get('user', 'id'), album['albumName']) download_images(images, album_dir) count += 1 print '== Module: Albums Backup. Finished all tasks.'
def test_dead_links(): problemsMap = {} seenLinks = set() nav = Navigator() nav.populate() for identifier, node in nav.identifierNodeMap.items(): soup = BeautifulSoup(node.html, 'html5lib') for link in soup.findAll('a'): ui = UrlInformer(link.get('href')) if ui.isRelative or ui.isMail or ui.url in seenLinks: continue seenLinks.add(ui.url) log.info("test '%s'", ui.url) try: response = requests.get(ui.url, timeout=5, verify=False) if response.status_code != 200: problemsMap[ui.url] = response except Exception as e: problemsMap[ui.url] = e pprint.pprint(problemsMap) assert not problemsMap
def getVideoInfo(url): timeout = 200 socket.setdefaulttimeout(timeout) sleep_download_time = 2 time.sleep(sleep_download_time) headers = { 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding':'gzip, deflate, sdch', 'Accept-Language':'zh-CN,zh;q=0.8', 'Cache-Control':'max-age=0', 'Proxy-Connection':'keep-alive', 'Host':'www.85porn.net', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36' } # request = urllib2.Request(url,headers=headers) request = urllib2.Request(url) # webContent = urllib2.urlopen(request) webContent = requests.get(url) data = webContent.read() webContent.close() # 利用BeautifulSoup读取视频列表网页数据 soup = BeautifulSoup(data) #获取videl url videoSource = soup.findAll('source') videoUrl = '' for singleRes in videoSource: videoUrl = singleRes['src'] # 依次取出不同匹配内容 #获取video title title = soup.findAll('h4',{'class':'visible-xs big-title-truncate m-t-0'})[0].string return videoUrl,title
def query(self): response = requests.get("https://hiraparac2014.mybluemix.net/snake") self.list = response.json() response = requests.get("https://hiraparac2014.mybluemix.net/food") self.food = response.json()
def lista(): ramos = requests.get('http://localhost:3000/ramoAtividade').json() return ramos
config = json.loads(config_str) print(config_str) print(config) # API_KEY 가져오 youtube_api_key = config['youtube']['API_KEY'] print(youtube_api_key) params = { 'part': 'snippet', 'q': '한지민', 'maxResults': 30, 'key': youtube_api_key } r = requests.get('https://www.googleapis.com/youtube/v3/search', params=params) result = r.text result_dict = json.loads(result) kind = result_dict['kind'] etag = result_dict['etag'] next_page_token = result_dict['nextPageToken'] region_code = result_dict['regionCode'] page_info = result_dict['pageInfo'] page_info_total_results_per_page = page_info['totalResults'] page_info_results_per_page = page_info['resultsPerPage'] print('kind: %s' % kind) print('etag: %s' % etag) print('next_page_token: %s' % next_page_token) print('region_code : %s' % region_code)
from pip._vendor import requests import requests com = 'https://detik.com' try: response = requests.get(com) if response.status_code == 200: print(f'success! Response= {response.status_code}') print(f'Content {response.text}') else: print(f'oopsy daisy ada kesalahan ente {response.status_code}') except Exception as e: print('There is an error', e) print('Program ended')
def getConfLocation(): response = requests.get('http://172.18.0.4/api/data/config/?format=json&confname=coinmachine') portNo = ast.literal_eval(response.text)[0]["confvalue"] return portNo
if __name__ == "__main__": with open('local-corp-test-sample-corps.csv') as csvfile: reader = csv.reader(csvfile) populate_queries(csvfile) for query in queries['entity_type']['queries']: params = query.split(',') source_id = params[0].strip() type = params[1].strip() tob_url = TOB_URL_PREFIX entry = { 'url': f'{tob_url}/api/topic/ident/{type}/{source_id}/formatted', } r = requests.get(entry['url']) res = r.json() remove_dates(res) remove_db_ids(res) entry['result'] = res entry['result_str'] = json.dumps(res) queries['entity_type']['entries'][source_id] = entry write_path = 'local-corp-test-sample-corps.json' if not os.path.exists(write_path): open(write_path, 'w').close() with open(write_path, 'r+') as jsonfile: res = jsonfile.read() if res == '':
import pip._vendor.requests as rq if __name__ == "__main__": url = "http://www.pythonchallenge.com/pc/def/equality.html" r = rq.get(url) raw = r.text start = raw.find('!') msg = raw[start:-5].rstrip("\n") res = '' for i in range(len(msg)): if msg[i].islower() and msg[i - 3:i + 4].isalpha(): if msg[i - 4].islower() and msg[i - 3:i].isupper( ) and msg[i + 1:i + 4].isupper() and msg[i + 4].islower(): res += msg[i - 4:i + 5] + "\n" print(res)
def lista(): empresa = requests.get('http://localhost:3000/empresa').json() return empresa
def work(config): refresh_token(config) with open('credentials.json') as fp: credentials = json.load(fp) try: fp = open('state.json') except IOError: state = { 'open_orders': {}, } else: with fp: state = json.load(fp) print 'Check open orders change' current_open_orders = {} for currency_pair in ('btc_krw', 'eth_krw'): resp = requests.get('https://api.korbit.co.kr/v1/user/orders/open', headers={ 'Authorization': 'Bearer {}'.format(credentials['access_token']), }, params={ 'currency_pair': currency_pair, }) resp.raise_for_status() current_open_orders.update(dict((order['id'], order) for order in resp.json())) changes = [] for id, order in current_open_orders.items(): if id not in state['open_orders']: changes.append((None, order)) else: prev_order = state['open_orders'][id] if order != prev_order: changes.append((prev_order, order)) for id, prev_order in state['open_orders'].items(): if id not in current_open_orders: changes.append((prev_order, None)) closed_orders = [prev for (prev, cur) in changes if cur is None] if closed_orders: # check status for disappeared open orders (could be deleted) resp = requests.get('https://api.korbit.co.kr/v1/user/orders', headers={ 'Authorization': 'Bearer {}'.format(credentials['access_token']), }, params={ 'id': [order['id'] for order in closed_orders], }) resp.raise_for_status() not_deleted_order_ids = set(order['id'] for order in resp.json()) closed_orders = [order for order in closed_orders if order['id'] in not_deleted_order_ids] if closed_orders: push(config, { 'type': 'note', 'title': u'[Korbit] 체결됨', 'body': '\n'.join( json.dumps(order, indent=2, ensure_ascii=False) for order in closed_orders ), }) state['open_orders'] = current_open_orders with open('state.json', 'w') as fp: json.dump(state, fp)
# # # print(requests.post('http://localhost:5000//orders/complete', json= # { # "courier_id": 2, # "order_id": 10, # "complete_time": "2021-01-10T10:33:01.42Z" # }).json()) # # print(requests.post('http://localhost:5000//orders/complete', json= # { # "courier_id": 2, # "order_id": 11, # "complete_time": "2021-01-10T10:33:01.42Z" # }).json()) # # print(requests.post('http://localhost:5000//orders/complete', json= # { # "courier_id": 2, # "order_id": 12, # "complete_time": "2021-01-10T10:33:01.42Z" # }).json()) # # print(requests.post('http://localhost:5000//orders/complete', json= # { # "courier_id": 2, # "order_id": 9, # "complete_time": "2021-01-10T10:33:01.42Z" # }).json()) print(get('http://localhost:5000/couriers/2').json())
import math import sys from os import rename import pip._vendor.requests from pip._vendor import requests r = requests.get("https://coreyms.com") print(r.status_code) print(r.ok)
'LinkStyles-link_2v8n4 LinkStyles-left_3Y6oH LinkStyles-small_od-Hs LinkStyles-isBold_2ZPpu' }) p = soup.find_all('span', {'class': 'keyInfo-content_1iwSZ'}) price = p[2].get_text() author = soup.find( 'h2', { 'class': 'heading-wrapper_1at_r heading-sBreakpointAlignmentleft_Gh9ud heading-sBreakpointSizelarge_1a0Mj heading-black_6_KIa heading-isRegular_1inPG' }) arr = [cat[1].get_text(), price, author.get_text()] return arr url = 'https://www.futurelearn.com/courses?filter_category=open&filter_course_type=open&filter_availability=started&all_courses=1' response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') courses = soup.find_all( class_='m-card Container-wrapper_GWW4X Container-grey_3ORsI') data = {} data['courses'] = [] i = 0 for course in courses: image = course.find(class_='image-cover_3Epqi')['src'] author = course.find( class_=
def get_clients(self, request, format=None): r = requests.get("https://sandbox.moip.com.br/assinaturas/v1/customers", headers=create_moip_header()).json() clients = [{'code': client['code'], 'name': client['fullname']} for client in r['customers']] return Response(clients)
def get_plans(self, request, format=None): r = requests.get("https://sandbox.moip.com.br/assinaturas/v1/plans", headers=create_moip_header()).json() plans = [{'code': plan['code'], 'name': plan['name']} for plan in r['plans']] return Response(plans)
def servico_por_id(pk): servico = requests.get('http://localhost:3000/servico/' + pk).json() return servico
def posts(self): response = requests.get("https://jsonplaceholder.typicode.com/posts") return response.json()
def empresa_por_id(id): return requests.get('http://localhost:3000/empresa/' + str(id)).json()
def scrap_images(): url = input('Saisis une url valide ...') url = str(url) return requests.get(url).text.count("<img")
0: 'Neutral', -1: 'Unproductive', -2: 'Very Unproductive' } if not os.path.exists(API_KEY): print('X') print('---') print('Missing API Key') exit() with open(API_KEY) as fp: key = fp.read().strip() result = requests.get('https://www.rescuetime.com/anapi/data', params={ 'key': key, 'resolution_time': 'day', 'restrict_begin': '2016-01-05', 'restrict_end': '2016-01-05', 'format': 'json', 'restrict_kind': 'productivity', }).json() pulse = requests.get('https://www.rescuetime.com/anapi/current_productivity_pulse.json', params={ 'key': key, }).json() print('%s | color=%s' % (pulse['pulse'], pulse['color'])) print('---') print('Rescue Time | href=https://www.rescuetime.com/dashboard?src=bitbar') for rank, seconds, people, productivty in result['rows']: print('%s %s' % (MAPPING[productivty], round(seconds / 60, 2)))
def get_Html(self, url): try: r = requests.get(url) return r.text except ConnectionError as e: print("Error")
#Basic web scrap that will return the title, link and summary of your search! from bs4 import BeautifulSoup from pip._vendor import requests search = input("What would you like to search? ") params = {"q": search} #Must have below line headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" } res = requests.get("http://www.bing.com/search", params=params, headers=headers) print("\n") soup = BeautifulSoup(res.text, "html.parser") # print(soup.prettify()) #Finds all 'ol' HTML elements with id equal to 'b_results' results = soup.find("ol", {"id": "b_results"}) #Finds all 'li' HTML elements with class equal to 'b_algo' inside the ones found above links = results.findAll("li", {"class": "b_algo"}) # print(results) # print(links)
# coding:utf-8 # 引入相关模块 import request from bs4 import BeautifulSoup from pip._vendor import requests url = "http://www.autohome.com.cn/beijing/" # 请求腾讯新闻的URL,获取其text文本 wbdata = requests.get(url).text # 对获取到的文本进行解析 soup = BeautifulSoup(wbdata, 'lxml') # 从解析文件中通过select选择器定位指定的元素,返回一个列表 news_titles = soup.select("div.text > em.f14 > a.linkto") # 对返回的列表进行遍历 for n in news_titles: # 提取出标题和链接信息 title = n.get_text() link = n.get("href") data = {'标题': title, '链接': link} print(data)
def retrieve_url(self): url_to_retrieve = requests.get(f"{self.parent_ip}/scrapper/latest_url") return url_to_retrieve
from selenium import webdriver from playsound import playsound from pip._vendor import requests headers = { #Transfer-Encoding: chunked # 보내는 양을 모를 때 헤더에 포함 'Host': 'kakaoi-newtone-openapi.kakao.com', 'Content-Type': 'application/xml', 'X-DSS-Service': 'DICTATION', 'Authorization': f'KakaoAK 00000000000000000', } url = "https://dapi.kakao.com/v2/local/search/category.json?category_group_code=CE7&radius=350&y=37.550950&x=126.941017" result = requests.get(urlparse(url).geturl(), headers={"Authorization": "KakaoAK 00000000000000000"}) # 본인 api 키 입력 driver = webdriver.Chrome( executable_path=r'C:\Users\may05\PycharmProjects\chromedriver.exe' ) # 본인 크롬 드라이버 위치 입력 driver.implicitly_wait(3) json_obj = result.json() market_list = json_obj.get("documents") idx = 1 for market in market_list: driver.get(market.get("place_url")) menu_list = driver.find_element_by_class_name( "list_menu").find_elements_by_class_name("loss_word") # 에러 처리 안되어있어서 조금만 class name 바뀌면 에러남. 나중에 시간나면 고칠 예정
# coding:utf-8 #天善智能 Python网络爬虫实战 from bs4 import BeautifulSoup from datetime import datetime from pip._vendor import requests from future.types.newbytes import unicode res = requests.get('http://news.sina.com.cn/china/') res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') #lesson 8 for news in soup.select('.news-item'): if len(news.select('h2')) > 0: h2 = news.select('h2')[0].text time = news.select('.time')[0].text a = news.select('a')[0]['href'] # print (','.join([time,h2,a])) #上面这个是为实现列表的所有元素都实现中文输出,否则即使所有元素都是utf-8格式也没有办法保证列表形式下的输出 #lesson 9-10-11 抓取新闻内文页面 res = requests.get( "http://news.sina.com.cn/c/gat/2016-09-06/doc-ifxvqctu6364150.shtml") res.encoding = unicode soup = BeautifulSoup(res.text, "html.parser") h1 = soup.select("#artibodyTitle")[0].text # .time-source下内容还包括了内容来源,需要筛选 # 如果输出中有//, 添加strip()来去除 # 注意encode()函数不改变对象自身编码,只返回一个指定编码的对象 timesource = soup.select(".time-source")[0].contents[0].strip().encode('utf-8') # 内容来源
Run this program on a mobile phone with the following apps: Android: https://play.google.com/store/apps/details?id=ru.iiec.pydroid3&hl=en_US&gl=US iPhone: https://apps.apple.com/in/app/python3ide/id1357215444 Copy and paste the code in 'product_to_sats_calculator.py' to app or download the .py file and open it in the app. See an example screenshot of the output here: https://i.postimg.cc/fRPdWTJM/sat-price-calc.jpg """ # Import modules. from pip._vendor import requests # This needs to be fixed. It should just be "import requests" but I cannot get it to # to work. # Extract the bitcoin USD price from the API text and convert it to a float. api_url = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json') btc_usd_string = api_url.text[ 372: 383] # Isolate USD price from API text; for example, price looks like '37762.7667'. btc_usd = float(btc_usd_string.replace( ',', '')) # Remove comma from price string and convert the price to a # float; includes four decimal places for more precision. # Display dividers, title, and bitcoin price in USD. main_title = "Heady Tie Dyes" # Set the title of the store; The text within the quotes can be edited as needed. print("=" * 25) # Divider. print(main_title.center(25)) # Display centered title. print("=" * 25) # Divider. btc_usd_rounded = str(format(round( btc_usd, 2), ',')) # Round two decimal places, format, and make string. print(("1 BTC = $" + btc_usd_rounded).center(25)) # Display Bitcoin in USD.
def getCommentCounts(newsurl): m = re.search('doc-i(.+).shtml', newsurl) newsid = m.group(1) comments = requests.get(commentURL.format(newsid)) jd = json.loads(comments.text.strip('var data=')) return jd['result']['count']['total']
def ramo_por_id(pk): ramo = requests.get('http://localhost:3000/ramoAtividade/'+pk).json() return ramo
def index(): roles = requests.get(f'{API_URL}/roles') roles = roles.json() #print(roles) return render_template('index.html', roles=roles)
def m(lang,str1): if lang =='en': jsonUrl = 'https://minecraft.gamepedia.com/api.php?action=query&format=json&prop=info&inprop=url&redirects&titles=' metaUrl = 'https://minecraft.gamepedia.com/' else: jsonUrl = 'https://minecraft-'+lang+'.gamepedia.com/api.php?action=query&format=json&prop=info&inprop=url&redirects&titles=' metaUrl = 'https://minecraft-'+lang+'.gamepedia.com/' try: pageName = str1 getUrl = jsonUrl+pageName metaText = requests.get(getUrl,timeout=15) try: file = json.loads(metaText.text) pages = file['query']['pages'] pageID = sorted(pages.keys())[0] if int(pageID) == -1: if 'missing' in pages['-1']: try: if lang =='en': searchUrl = 'https://minecraft.gamepedia.com/api.php?action=query&generator=search&gsrsearch='+str1+'&gsrsort=just_match&gsrenablerewrites&prop=info&gsrlimit=1&format=json' else: searchUrl = 'https://minecraft-'+lang+'.gamepedia.com/api.php?action=query&generator=search&gsrsearch='+str1+'&gsrsort=just_match&gsrenablerewrites&prop=info&gsrlimit=1&format=json' getSearch = requests.get(searchUrl) parseSearch = json.loads(getSearch.text) searchPage = parseSearch['query']['pages'] searchPageID = sorted(searchPage.keys())[0] searchTitle = searchPage[searchPageID]['title'] return ('[{"text":"发生错误:","color":"red"},{"text":"找不到条目,您是否要找的是:","color":"reset"},{"text":"'+searchTitle+'","bold":true,"underlined":true,"color":"white","clickEvent":{"action":"run_command","value":"!!&wiki-'+lang+' '+searchTitle+'"}},{"text":"?","color":"reset"}]') except Exception: return('[{"text":"发生错误:","color":"red"},{"text":"找不到条目。","color":"reset"}]') else: return ('[{"text":"您要的"},{"text":"'+pageName+'","bold":true,"underlined":true,"clickEvent":{"action":"open_url","value":"'+metaUrl+urllib.parse.quote(pageName.encode('UTF-8'))+'"}}]') # return ('您要的'+pagename+':'+l+urllib.parse.quote(pagename.encode('UTF-8'))) else: try: pageUrl = pages[pageID]['fullurl'] if lang =='en': result = re.match(r'https://minecraft\.gamepedia.com/(.*)', pageUrl, re.M | re.I) descUrl = 'https://minecraft.gamepedia.com/api.php?action=query&prop=extracts&exsentences=1&&explaintext&exsectionformat=wiki&format=json&titles=' + result.group(1) else: result = re.match(r'https://minecraft-(.*)\.gamepedia.com/(.*)', pageUrl, re.M | re.I) descUrl = 'https://minecraft-'+result.group(1)+'.gamepedia.com/api.php?action=query&prop=extracts&exsentences=1&&explaintext&exsectionformat=wiki&format=json&titles='+result.group(2) getDesc = requests.get(descUrl,timeout=5) parseDesc = json.loads(getDesc.text) descText = parseDesc['query']['pages'][pageID]['extract'] try: paraGraph = re.match(r'.*(\#.*)',str1) page = pages[pageID]['fullurl'] + urllib.parse.quote(paraGraph.group(1).encode('UTF-8')) except Exception: page = pages[pageID]['fullurl'] resultName = re.match(r'https://.*?/(.*)',Page) unquoteName = re.sub('_',' ',unquoteName) if unquoteName == str1: return ('[{"text":"您要的"},{"text":"'+pageName+'","bold":true,"underlined":true,"clickEvent":{"action":"open_url","value":"'+page+'"}},{"text":":"},{"text":"'+descText+'"}]') else: return ('[{"text":"您要的"},{"text":"'+pageName+'","bold":true,"underlined":true,"clickEvent":{"action":"open_url","value":"'+page+'"}},{"text":"('+str1+'->'+unquoteName+'):"},{"text":"'+descText+'"}]') # return('您要的'+pagename+":"+xx) except Exception as e: try: paraGraph = re.match(r'.*(\#.*)',str1) page = pages[pageID]['fullurl'] + urllib.parse.quote(paraGraph.group(1).encode('UTF-8')) except Exception: page = pages[pageID]['fullurl'] resultName = re.match(r'https://.*?/(.*)',page) unquoteName = urllib.parse.unquote(resultName.group(1),encoding='UTF-8') unquoteName = re.sub('_',' ',unquoteName) if unquoteName == str1: return ('[{"text":"您要的"},{"text":"'+pageName+'","bold":true,"underlined":true,"clickEvent":{"action":"open_url","value":"'+page+'"}}]') else: return ('[{"text":"您要的"},{"text":"'+pageName+'","bold":true,"underlined":true,"clickEvent":{"action":"open_url","value":"'+Page+'"}},{"text":"('+str1+'->'+unquoteName+')"}]') except Exception as e: print(str(e)) return('[{"text":"发生错误:","color":"red"},{"text":"内容非法。","color":"reset"}]') except Exception as e: return('[{"text":"发生错误:","color":"red"},{"text":"'+str(e)+'","color":"reset"}]')
def act_on_heater(api_endpoint): url = API_PROTO + API_SERVER + API_PATH + api_endpoint response = json.dumps(requests.get(url).content.decode().strip()) return response
def lista(): servico = requests.get('http://localhost:3000/servico').json() return servico
import pip._vendor.requests as requests import smtplib from email.message import EmailMessage import json status = 0 with open("websitedetails.json") as f: jsonData = json.load(f) WebSiteName = jsonData['WebSiteDetail']['Name'] urlAddress = jsonData['WebSiteDetail']['URL'] try: response = requests.get(urlAddress) status = response.status_code response.raise_for_status() except requests.HTTPError as http_err: print(f'HTTP error occured: {http_err}') except Exception as err: print(f'Other error occured: {err}') # =================================================================== # --- Sender and Reciever Email Address # =================================================================== cssValue = ''' table#websiteTable{ table-layout: auto; width: 50%; } table, th, td{ border: 1px solid black; text-align: center; }
def busca_por_cnpj(cnpj): empresa = requests.get('http://localhost:3000/empresa/' + cnpj).json() return empresa
def start(self, packName): if os.path.exists(packName + 'test/'): shutil.rmtree(packName + 'test/') os.makedirs(packName + 'test/') else: os.makedirs(packName + 'test/') loadplace = "file.txt" baseq = 'https://pypi.org/simple/' baseq += packName try: urllib.request.urlretrieve(baseq, 'text.txt') except urllib.error.HTTPError: print("Package " + packName + " not found") return f = open('text.txt', 'r') line = f.read() f.close() while True: line1 = line i = line.rfind('<a href=') line = line[i + 9:] ii = line.rfind('#sha') line = line[:ii] if i == -1: return if line[len(line) - 1] != 'z': break else: line = line1[:i] f = open(r'tester' + packName + '.whl', "wb") ufr = requests.get(line) f.write(ufr.content) f.close() try: z = zipfile.ZipFile('tester' + packName + '.whl', 'r') z.extractall(packName + 'test/') z.close() except OSError: return filename = 'METADATA' buf = '' for root, dirnames, filenames in os.walk(packName + 'test/'): for file in filenames: if file == filename: buf = root + '\\' + file if buf == '': return buf = buf.replace('/', '\\') f = open(buf, 'r') line = f.read() f.close() #os.remove('tester' + packName + ".whl") line = line.replace('\n\n', '\n') while len(line) > 0: i = line.find('Requires-Dist:') line = line[i:] j = line.find('(') j1 = line.find("\n") j2 = line.find(';') j3 = line.find('[') j4 = line.find('extra') if i == -1: return if j4 != -1 and j4 < j1: return if j3 < j and j3 != -1 and j3 < j1: bufline = line[15:j3] elif j != -1 and j < j1: bufline = line[15:j - 1] elif j2 != -1 and j2 < j1: if line[j2 - 1] != ' ': bufline = line[15:j2] else: bufline = line[15:j2 - 1] else: bufline = line[15:j1] if bufline == '': return res = "\"" + packName + "\"" + '->' + "\"" + bufline + "\"" + ';' if res in self.data: bufline else: print(res) self.data.append(res) self.start(bufline) line = line[j1 + 1:] if i == -1: break
#!/usr/bin/python3 # encoding: utf-8 from bs4 import BeautifulSoup from pip._vendor import requests import datetime from tkinter import * from tkinter import messagebox url = "https://sevilla.abc.es/rss/feeds/Sevilla_Sevilla.xml" resp = requests.get(url) soup = BeautifulSoup(resp.content, features="xml") items = [] titulos = [] links = [] fechas = [] top = Tk() def bdCargada(): for i in soup.findAll('item'): items.append(i) for i in items: titulos.append(i.title.text) links.append(i.link.text) fechas.append(i.pubDate.text) messagebox.showinfo('BD cargada', 'BD cargada correctamente') text = Text(top)
def get_google_provider_cfg(): return requests.get(GOOGLE_DISCOVERY_URL).json()