Exemplo n.º 1
0
            try:
                data_df = self.database.get_data(
                    self.database_name,
                    self.collection_name,
                    query={"Date": {
                        "$regex": _date
                    }})
            except Exception:
                continue
            if data_df is None:
                continue
            data_df_drop_duplicate = data_df.drop_duplicates(["Url"])
            for _id in list(
                    set(data_df["_id"]) - set(data_df_drop_duplicate["_id"])):
                collection.delete_one({'_id': _id})
                self.delete_num += 1
            # logging.info("{} finished ... ".format(_date))
        logging.info(
            "DB:{} - COL:{} had {} data length originally, now has deleted {} depulications ... "
            .format(self.database_name, self.collection_name,
                    str(len(date_list)), self.delete_num))


if __name__ == "__main__":
    from Killua.deduplication import Deduplication
    from Kite import config

    Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK).run()
    Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
    Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_JRJ).run()
Exemplo n.º 2
0
# """
# if __name__ == "__main__":
#     nbd_spyder = NbdSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_NBD)
#     nbd_spyder.get_historical_news(start_page=684)
#
#     Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
#     DeNull(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
"""
Example-2:
爬取实时新闻数据
"""
if __name__ == '__main__':
    from Kite.database import Database
    from Kite import config

    from Leorio.tokenization import Tokenization

    from Killua.denull import DeNull
    from Killua.deduplication import Deduplication

    import threading

    # 如果没有历史数据从头爬取,如果已爬取历史数据,则从最新的时间开始爬取
    # 如历史数据中最近的新闻时间是"2020-12-09 20:37:10",则从该时间开始爬取
    nbd_spyder = NbdSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_NBD)
    nbd_spyder.get_historical_news()

    Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()
    DeNull(config.DATABASE_NAME, config.COLLECTION_NAME_NBD).run()

    nbd_spyder.get_realtime_news()
Exemplo n.º 3
0
# nbd_spyder = NbdSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_NBD)
# nbd_spyder.get_historical_news(684)

# 2. 抽取出新闻中所涉及的股票,并保存其股票代码在collection中新的一列
from Leorio.tokenization import Tokenization

tokenization = Tokenization(import_module="jieba",
                            user_dict="./Leorio/financedict.txt")
tokenization.update_news_database_rows(config.DATABASE_NAME, "cnstock")
# tokenization.update_news_database_rows(config.DATABASE_NAME, "nbd")
# tokenization.update_news_database_rows(config.DATABASE_NAME, "jrj")

# 3. 针对历史数据进行去重清洗
from Killua.deduplication import Deduplication

Deduplication("finnewshunter", "cnstock").run()
# Deduplication("finnewshunter", "nbd").run()
# Deduplication("finnewshunter", "jrj").run()  # 暂时只有jrj需要去重

# 4. 将历史数据中包含null值的行去掉
from Killua.denull import DeNull

# DeNull("finnewshunter", "cnstock").run()
# DeNull("finnewshunter", "nbd").run()
# DeNull("finnewshunter", "jrj").run()

# 5. 创建新的数据库,针对每一个股票,将所有涉及该股票的新闻都保存在新的数据库,并贴好"利好","利空"和"中性"标签
from Killua.buildstocknewsdb import GenStockNewsDB
# gen_stock_news_db = GenStockNewsDB()
# gen_stock_news_db.get_all_news_about_specific_stock("finnewshunter", "cnstock")
# gen_stock_news_db.get_all_news_about_specific_stock("finnewshunter", "nbd")