예제 #1
0
파일: __init__.py 프로젝트: puppylpg/oddish
    import datetime

    from src.crawl import item_crawler
    from src.util import suggestion
    from src.util.logger import log

    start = datetime.datetime.now()
    log.info("Start Time: {}".format(start))

    table = item_crawler.crawl()

    if (table is not None) and len(table) > 0:
        suggestion.suggest(table)
    else:
        log.error(
            'No correct csgo items remain. Please check if conditions are to strict.'
        )

    if args.output is not None:
        database = [x.to_dict() for x in table]
        with open(args.output, "w", encoding='utf-8') as f:
            f.write(json.dumps(database))

    end = datetime.datetime.now()
    log.info("END: {}. TIME USED: {}.".format(end, end - start))
else:
    from PyQt5 import QtWidgets
    from src.ui.oddish import oddish

    app = QtWidgets.QApplication(sys.argv)
    MainWindow = QtWidgets.QMainWindow()
예제 #2
0
async def crawl_goods_by_price_section(category=None):
    root_url = goods_section_root_url(category)
    log.info('GET: {}'.format(root_url))

    root_json = get_json_dict(root_url, config.BUFF_COOKIE)
    category_items = []

    tasks = []
    timeout = aiohttp.ClientTimeout(total=30 * 60)
    if config.PROXY:
        # use socks
        connector = ProxyConnector.from_url(config.PROXY, limit=5)
    else:
        connector = aiohttp.TCPConnector(limit=5)

    if 'data' not in root_json:
        log.error('Error happens:')
        log.error(root_json)
        if 'error' in root_json:
            log.error('Error field: ' + root_json['error'])
        log.error(
            'Please paste correct buff cookie to config, current cookie:' +
            str(config.BUFF_COOKIE))
        return None

    if ('total_page' not in root_json['data']) or ('total_count'
                                                   not in root_json['data']):
        log.error(
            "No specific page and count info for root page. Please check buff data structure."
        )

    total_page = root_json['data']['total_page']
    total_count = root_json['data']['total_count']

    # buff有个page_size参数,默认一页请求20个item,最多80
    # 尝试使用80,能将对buff的访问量减少为原来的1/4。暂时不作为可配置项,硬编码在代码里
    use_max_page_size = True
    max_page_size = 80
    default_page_size = 20

    # 使用80一页后,新的页码
    if use_max_page_size:
        total_page = math.ceil(total_count / max_page_size)

    log.info('Totally {} items of {} pages to crawl.'.format(
        total_count, total_page))
    async with aiohttp.ClientSession(cookies=config.STEAM_COOKIE,
                                     headers=get_headers(),
                                     connector=connector,
                                     timeout=timeout) as session:
        # get each page
        for page_num in range(1, total_page + 1):
            log.info('Page {} / {}'.format(page_num, total_page))
            page_url = goods_section_page_url(
                category,
                page_num,
                page_size=max_page_size
                if use_max_page_size else default_page_size)
            page_json = get_json_dict(page_url, config.BUFF_COOKIE)
            if (page_json is not None) and ('data' in page_json) and (
                    'items' in page_json['data']):
                # items on this page
                items_json = page_json['data']['items']
                for item in items_json:
                    # get item
                    csgo_item = collect_item(item)
                    if csgo_item is not None:
                        category_items.append(csgo_item)
                        try:
                            tasks.append(
                                async_crawl_item_history_price(
                                    len(category_items), category_items[-1],
                                    session))
                        except Exception as e:
                            log.error(traceback.format_exc())

                stamp = time.time()
                try:
                    await asyncio.gather(*tasks)
                except Exception as e:
                    log.error(traceback.format_exc())
                tasks = []
                if not exist(page_url):
                    await timer.async_sleep_awhile(0, time.time() - stamp)
            else:
                log.warn(
                    "No specific data for page {}. Skip this page.".format(
                        page_url))
    return category_items
예제 #3
0
def crawl_goods_by_price_section(category=None):
    root_url = goods_section_root_url(category)
    log.info('GET: {}'.format(root_url))

    root_json = get_json_dict(root_url, buff_cookies)

    category_items = []

    if root_json is not None:
        if 'data' not in root_json:
            log.error('Error happens:')
            log.error(root_json)
            if 'error' in root_json:
                log.error('Error field: ' + root_json['error'])
            log.error(
                'Please paste correct buff cookie to config, current cookie:' +
                BUFF_COOKIE)
            exit(1)

        if ('total_page'
                not in root_json['data']) or ('total_count'
                                              not in root_json['data']):
            log.error(
                "No specific page and count info for root page. Please check buff data structure."
            )

        total_page = root_json['data']['total_page']
        total_count = root_json['data']['total_count']

        # buff有个page_size参数,默认一页请求20个item,最多80
        # 尝试使用80,能将对buff的访问量减少为原来的1/4。暂时不作为可配置项,硬编码在代码里
        use_max_page_size = True
        max_page_size = 80
        default_page_size = 20

        # 使用80一页后,新的页码
        if use_max_page_size:
            total_page = math.ceil(total_count / max_page_size)

        log.info('Totally {} items of {} pages to crawl.'.format(
            total_count, total_page))
        # get each page
        for page_num in range(1, total_page + 1):
            log.info('Page {} / {}'.format(page_num, total_page))
            page_url = goods_section_page_url(
                category,
                page_num,
                page_size=max_page_size
                if use_max_page_size else default_page_size)
            page_json = get_json_dict(page_url, buff_cookies)
            if (page_json is not None) and ('data' in page_json) and (
                    'items' in page_json['data']):
                # items on this page
                items_json = page_json['data']['items']
                for item in items_json:
                    # get item
                    csgo_item = collect_item(item)
                    if csgo_item is not None:
                        category_items.append(csgo_item)
            else:
                log.warn(
                    "No specific data for page {}. Skip this page.".format(
                        page_url))

    return category_items
예제 #4
0
import datetime

from src.crawl import item_crawler
from src.util import suggestion
from src.util.logger import log

if __name__ == '__main__':
    start = datetime.datetime.now()
    log.info("开始时间: {}".format(start))
    #log 是 程序/log目录下的记录日志对象,对应config中的NORMLAL_LOGGER

    table = item_crawler.crawl()
    if table is not None:
        # suggestion
        suggestion.suggest(table)
        #根据table进行suggest,建议结果放在suggest文件夹中
    else:
        log.error('没有符合条件的物品item,你tmd是不是参数写错了?')

    # endr
    end = datetime.datetime.now()
    log.info("结束时间: {}. 共用时: {}.".format(end, end - start))