Beispiel #1
0
def get_json_dict_raw(url, cookies, proxy=False, times=1):
    if exist(url):
        return fetch(url)

    if times > RETRY_TIMES:
        log.error(
            'Timeout for {} beyond the maximum({}) retry times. SKIP!'.format(
                url, RETRY_TIMES))
        return None

    timer.sleep_awhile()
    try:
        if proxy and proxies != {}:
            return requests.get(url,
                                headers=headers,
                                cookies=cookies,
                                timeout=5,
                                proxies=proxies).text
        return requests.get(url, headers=headers, cookies=cookies,
                            timeout=5).text
    except Timeout:
        log.warn("timeout for {}. Try again.".format(url))
    except Exception as e:
        log.error("unknown error for {}. Try again. Error string: {}".format(
            url, e))
        log.error(traceback.format_exc())

    data = get_json_dict_raw(url, cookies, proxy, times + 1)
    return data
Beispiel #2
0
def get_json_dict(url, cookies = {}, proxy = False, times = 1, is_steam_request = 0):
    if exist(url):
        return json.loads(fetch(url))
    json_data = get_json_dict_raw(url, cookies, proxy, times, is_steam_request)

    if json_data is None:
        return None
    else:
        # can not store None
        store(url, json_data)
        return json.loads(json_data)
Beispiel #3
0
def get_json_dict(url, cookies, proxy=False, times=1):
    if exist(url):
        return json.loads(fetch(url))
    json_data = get_json_dict_raw(url, cookies, proxy, times)
    store(url, json_data)
    return json.loads(json_data)
Beispiel #4
0
async def crawl_goods_by_price_section(category=None):
    root_url = goods_section_root_url(category)
    log.info('GET: {}'.format(root_url))

    root_json = get_json_dict(root_url, config.BUFF_COOKIE)
    category_items = []

    tasks = []
    timeout = aiohttp.ClientTimeout(total=30 * 60)
    if config.PROXY:
        # use socks
        connector = ProxyConnector.from_url(config.PROXY, limit=5)
    else:
        connector = aiohttp.TCPConnector(limit=5)

    if 'data' not in root_json:
        log.error('Error happens:')
        log.error(root_json)
        if 'error' in root_json:
            log.error('Error field: ' + root_json['error'])
        log.error(
            'Please paste correct buff cookie to config, current cookie:' +
            str(config.BUFF_COOKIE))
        return None

    if ('total_page' not in root_json['data']) or ('total_count'
                                                   not in root_json['data']):
        log.error(
            "No specific page and count info for root page. Please check buff data structure."
        )

    total_page = root_json['data']['total_page']
    total_count = root_json['data']['total_count']

    # buff有个page_size参数,默认一页请求20个item,最多80
    # 尝试使用80,能将对buff的访问量减少为原来的1/4。暂时不作为可配置项,硬编码在代码里
    use_max_page_size = True
    max_page_size = 80
    default_page_size = 20

    # 使用80一页后,新的页码
    if use_max_page_size:
        total_page = math.ceil(total_count / max_page_size)

    log.info('Totally {} items of {} pages to crawl.'.format(
        total_count, total_page))
    async with aiohttp.ClientSession(cookies=config.STEAM_COOKIE,
                                     headers=get_headers(),
                                     connector=connector,
                                     timeout=timeout) as session:
        # get each page
        for page_num in range(1, total_page + 1):
            log.info('Page {} / {}'.format(page_num, total_page))
            page_url = goods_section_page_url(
                category,
                page_num,
                page_size=max_page_size
                if use_max_page_size else default_page_size)
            page_json = get_json_dict(page_url, config.BUFF_COOKIE)
            if (page_json is not None) and ('data' in page_json) and (
                    'items' in page_json['data']):
                # items on this page
                items_json = page_json['data']['items']
                for item in items_json:
                    # get item
                    csgo_item = collect_item(item)
                    if csgo_item is not None:
                        category_items.append(csgo_item)
                        try:
                            tasks.append(
                                async_crawl_item_history_price(
                                    len(category_items), category_items[-1],
                                    session))
                        except Exception as e:
                            log.error(traceback.format_exc())

                stamp = time.time()
                try:
                    await asyncio.gather(*tasks)
                except Exception as e:
                    log.error(traceback.format_exc())
                tasks = []
                if not exist(page_url):
                    await timer.async_sleep_awhile(0, time.time() - stamp)
            else:
                log.warn(
                    "No specific data for page {}. Skip this page.".format(
                        page_url))
    return category_items