def main():
    if holiday.isHoliday(dt.now().strftime('%Y%m%d')):
        return

    # テクニカル - 出来高急増銘柄
    url = "https://kabutan.jp/tansaku/?mode=2_0311&market=0&capitalization=-1&stc=v3&stm=1&page="
    # 日次別ファイル作成
    file1 = open(
        './volume_rapid_increase_ranking/volume_rapid_increase_ranking_' +
        dt.now().strftime('%Y%m%d') + '.csv',
        'w',
        encoding="utf-8")
    writer1 = csv.writer(file1, lineterminator='\n')
    # 累積ファイル作成
    file2 = open('./data/volume_rapid_increase_ranking.csv',
                 'a',
                 encoding="utf-8")
    writer2 = csv.writer(file2, lineterminator='\n')

    # ヘッダー
    writer1.writerow([
        "日付", "順位", "コード", "銘柄名", "市場", "株価", "S高", "前日比", "出来高", "PER", "PBR",
        "利回り"
    ])
    rank = 1
    for i in range(1, 8):
        html_data = requests.get(url + str(i))
        soup = BeautifulSoup(html_data.content, "html.parser")
        table = soup.findAll("table", {"class": "stock_table st_market"})[0]
        tbody = table.find("tbody").findAll("tr")
        for index, row in enumerate(tbody):
            td = row.findAll(['td'])
            th = row.findAll(['th'])
            writer1.writerow([
                dt.now().strftime('%Y/%m/%d'), rank, td[0].get_text(),
                th[0].get_text(), td[1].get_text(), td[4].get_text(),
                td[5].get_text(), td[6].get_text(), td[7].get_text(),
                td[8].get_text(), td[9].get_text(), td[10].get_text()
            ])
            writer2.writerow([
                dt.now().strftime('%Y/%m/%d'), rank, td[0].get_text(),
                th[0].get_text(), td[1].get_text(), td[4].get_text(),
                td[5].get_text(), td[6].get_text(), td[7].get_text(),
                td[8].get_text(), td[9].get_text(), td[10].get_text()
            ])
            rank += 1
    file1.close()
    file2.close()
Exemplo n.º 2
0
def main():
    if holiday.isHoliday(dt.now().strftime('%Y%m%d')):
        return

    # 取引時間中、決算発表・業績修正銘柄
    url1 = "https://kabutan.jp/warning/?mode=4_2&market=0&capitalization=-1&stc=&stm=0&page="
    # 取引終了後、決算発表・業績修正銘柄
    url2 = "https://kabutan.jp/warning/?mode=4_3&market=0&capitalization=-1&stc=&stm=0&page="
    file = open('./data/earnings_announcement.csv', 'a', encoding="utf-8")
    writer = csv.writer(file, lineterminator='\n')
    for i in range(1, 20):
        html_data = requests.get(url1 + str(i))
        soup = BeautifulSoup(html_data.content, "html.parser")
        table = soup.findAll("table", {"class": "stock_table"})[0]
        tbody = table.find("tbody").findAll("tr")
        for index, row in enumerate(tbody):
            th = row.findAll(['th'])
            td = row.findAll(['td'])
            writer.writerow([
                td[0].get_text().strip(), th[0].get_text().strip(),
                td[1].get_text().strip(), td[3].get_text().strip(),
                td[4].get_text().strip(), td[6].get_text().strip(),
                td[7].get_text().strip(), td[8].get_text().strip(),
                td[9].get_text().strip(),
                dt.now().strftime('%Y/%m/%d')
            ])
    for i in range(1, 60):
        html_data = requests.get(url2 + str(i))
        soup = BeautifulSoup(html_data.content, "html.parser")
        table = soup.findAll("table", {"class": "stock_table"})[0]
        tbody = table.find("tbody").findAll("tr")
        for index, row in enumerate(tbody):
            th = row.findAll(['th'])
            td = row.findAll(['td'])
            writer.writerow([
                td[0].get_text().strip(), th[0].get_text().strip(),
                td[1].get_text().strip(), td[3].get_text().strip(),
                td[4].get_text().strip(), td[6].get_text().strip(),
                td[7].get_text().strip(), td[8].get_text().strip(),
                td[9].get_text().strip(),
                dt.now().strftime('%Y/%m/%d')
            ])
    file.close()
Exemplo n.º 3
0
def main():
    if holiday.isHoliday(dt.now().strftime('%Y%m%d')):
        return

    html_data = requests.get(
        'https://info.finance.yahoo.co.jp/ranking/?kd=27&mk=1&tm=d&vl=a')
    soup = BeautifulSoup(html_data.content, "html.parser")
    table = soup.findAll("table", {"class": "rankingTable"})[0]
    thead = table.find("thead").findAll("tr")
    tbody = table.find("tbody").findAll("tr")

    # 日次別ファイル作成
    file1 = open('./limit_high/limit_high_' + dt.now().strftime('%Y%m%d') +
                 '.csv',
                 'w',
                 encoding="utf-8")
    writer1 = csv.writer(file1, lineterminator='\n')
    # 累積ファイル作成
    file2 = open('./data/limit_high.csv', 'a', encoding="utf-8")
    writer2 = csv.writer(file2, lineterminator='\n')

    # ヘッダー
    for index, row in enumerate(thead):
        cell = row.findAll(['th'])
        writer1.writerow(
            ["日付", "コード", "市場", "名称", "取引値", "前日比(%)", "前日比", "高値"])
    for index, row in enumerate(tbody):
        cell = row.findAll(['td'])
        writer1.writerow([
            dt.now().strftime('%Y/%m/%d'), cell[0].get_text(),
            cell[1].get_text(), cell[2].get_text(), cell[4].get_text(),
            cell[5].get_text(), cell[6].get_text(), cell[7].get_text()
        ])
        writer2.writerow([
            dt.now().strftime('%Y/%m/%d'), cell[0].get_text(),
            cell[1].get_text(), cell[2].get_text(), cell[4].get_text(),
            cell[5].get_text(), cell[6].get_text(), cell[7].get_text()
        ])
    file1.close()
    file2.close()