import requests as rq from bs4 import BeautifulSoup from noti import send head_title = ["정치","경제","사회","생활/문화","세계","IT/과학"] url = 'https://news.naver.com/' req= rq.get(url) soup = BeautifulSoup(req.text,"html.parser") for idx in range(0,6): head = head_title[idx]+"\n" ranking_titles = soup.select(f"#ranking_10{idx} > ul > li > a") body_data = "" for idx,rank in enumerate(ranking_titles,1): body_data += f'●{idx}위 {rank.text}\n{url+rank["href"]}\n' data = head+body_data send(data)
def main(): final_data = readData() rand_num = random.randint(0, len(final_data)) selected_data = template(final_data[rand_num]) send(selected_data)
} table_rows = soup.select('.num tbody tr') for row in table_rows[1:]: city = row.select('th')[0].text tds = row.select('td') result["city"].append(city) result["prev_rise"].append(tds[0].text) result["confirm"].append(tds[1].text) result["die"].append(tds[2].text) result["rate"].append(tds[3].text) result["check"].append(tds[4].text) result['count'] += 1 data = template(result) print(data) standard = soup.select('.timetable p span')[0].text + ' 기준' helper = ''' Column Description: - city: 도시 - CheckInc: 확진자증감 - Confirm: 확진자 - Dead: 사망자 - Incidence: 발생률 - Inspec: 검사자수 ''' send(standard + data + helper)
for coin in COIN_TICKERS: BASE_URL = "https://crix-api-cdn.upbit.com/v1/crix/candles/minutes/1?code=CRIX.UPBIT.%s&count=0" % ( coin) res = rq.get(BASE_URL) data = res.json()[0] exchange = '업비트' code = coin openingPrice = "{:,}".format(data['openingPrice']) highPrice = "{:,}".format(data['highPrice']) lowPrice = "{:,}".format(data['lowPrice']) tradePrice = "{:,}".format(data['tradePrice']) candleAccTradeVolume = "{:,}".format(data['candleAccTradeVolume']) candleAccTradePrice = "{:,}".format(data['candleAccTradePrice']) msg += ''' 거래소: %s, 코드: %s, 시작가: %s, 고가: %s, 저가: %s, 채결가: %s, 채결량(1Day): %s, 채결금액(1Day): %s ''' % (exchange, code, openingPrice, highPrice, lowPrice, tradePrice, candleAccTradeVolume, candleAccTradePrice) send(msg) # print(msg)
for li in lis[:5]: content = li.find_all('span') rank = content[0].text source = content[1].text link = li.find('a').get('href') txt = li.text.strip().replace(" ", "").replace("\n", " ").replace( " ", " :").replace(" ", " /") result.append(f'{txt} / {link}') result.append("") return result if __name__ == "__main__": # URL[0] : 새솔동, URL[1] : 안산시 사 URL = {'새솔동' : 'https://n.weather.naver.com/today/02590140', '안산' : 'https://n.weather.naver.com/today/02271103'} for k,v in URL.items(): weather_data = today_weather(k, v) weather = ("\n".join(str(i) for i in weather_data)) send(weather) english_data = today_english() english = ("\n".join(str(i) for i in english_data)) # 리스트를 한줄로 출력 "" : 한줄로 이어서, "\n" 줄바꿈으로 출력 send(english) news_data = today_news() news = ("\n\n".join(str(i) for i in news_data)) send(news)