예제 #1
0
file = open("naver_books.csv", mode="w", newline="")
writer = csv.writer(file)
writer.writerow(["title", "img_src", "detail_link", "author", "publisher"])

final_result = []

for i in range(8):
    book_html = requests.get(
        f'https://book.naver.com/category/index.nhn?cate_code=100&tab=new_book&list_type=list&sort_type=publishday&page={i+2}'
    )
    book_soup = BeautifulSoup(book_html.text, 'html.parser')
    book_list_box = book_soup.find("ol", {"class": "basic"})
    book_list = book_list_box.find_all("li")

    final_result = final_result + extract_info(book_list)

print(book_list)

for result in final_result:
    row = []
    row.append(result['title'])
    row.append(result['img_src'])
    row.append(result['detail_link'])
    row.append(result['author'])
    row.append(result['publisher'])

    writer.writerow(row)

# print(final_result)
from bs4 import BeautifulSoup
from note import extract_info
import csv

file = open("hospital.csv", mode="w", encoding='euc-kr', newline='')
writer = csv.writer(file)
writer.writerow(["city", "district", "title", "number"])

hospital_html = requests.get(
    'https://www.mohw.go.kr/react/popup_200128_3.html')
hospital_html.encoding = 'utf-8'
hospital_soup = BeautifulSoup(hospital_html.text, "html.parser")

# 이제부터 시도, 시군구, 선별진료소(이름), 전화번호 크롤링 후 csv 파일에 저장하시면 됩니다!

hospital_list_box = hospital_soup.find("tbody", {"class": "tb_center"})
hospital_list = hospital_list_box.find_all('tr')

print(hospital_list)

final_result = extract_info(hospital_list)

for hospital_row in final_result:
    row = []
    row.append(hospital_row['시도'])
    row.append(hospital_row['시군구'])
    row.append(hospital_row['선별진료소'])
    row.append(hospital_row['전화번호'])
    writer.writerow(row)

print("크롤링 끝!")
예제 #3
0
import requests
from bs4 import BeautifulSoup
from note import extract_info
import csv

file = open("notes.csv", mode="w", newline='')
writer = csv.writer(file)
writer.writerow(["title", "price", "img_src", 'link'])

final_result = []

for i in range(30):
    note_html = requests.get(f'https://search.shopping.naver.com/search/all.nhn?pagingIndex={i+1}&pagingSize=80&query=λ…ΈνŠΈ')
    note_soup = BeautifulSoup(note_html.text, "html.parser")
    note_list_box = note_soup.find("ul", {"class" : "goods_list"})
    note_list = note_list_box.find_all('li', {"class" : "_itemSection"})

    final_result = final_result + extract_info(note_list)


for result in final_result:

    row = []
    row.append(result['title'])
    row.append(result['price'])
    row.append(result['img_src'])
    row.append(result['link'])

    writer.writerow(row)