def parse_binary_search(self, response, minimum=None, maximum=None): offset = int(get_parameter_value(response.request.url, 'offset')) first_offset = response.request.meta['first'] if minimum and maximum: self.logger.info(f'Starting binary search for {first_offset:,} within [{minimum:,}, {maximum:,}]') elif self.is_http_success(response): minimum = response.request.meta['minimum'] maximum = offset else: minimum = offset + 1 maximum = response.request.meta['maximum'] # If the search succeeded, parse the response as usual. We use a threshold, because getting the exact # millisecond requires 27 requests. if minimum + THRESHOLD >= maximum: self.logger.info(f'New offset found after {first_offset:,} at {maximum:,}!') if offset == maximum: # If the last request used the offset, we can reuse its response. yield from self.parse(response) else: url = replace_parameters(response.request.url, offset=maximum) yield self._build_request(url, self.parse, {}) else: url = replace_parameters(response.request.url, offset=(minimum + maximum) // 2) yield self._build_request(url, self.parse_binary_search, {'minimum': minimum, 'maximum': maximum, 'first': first_offset})
def parse_list(self, response, **kwargs): hrefs = response.xpath('//a/@href').getall() for href in hrefs: if '.json' in href: # the href looks like # http://www.google.com/url?q=http%3A%2F%2F116.202.173.47%3A8080%2Fmd_covid_2020-11-06.json&sa=D&sntz=1 url = get_parameter_value(href, 'q') if not url: url = href yield self.build_request(url, formatter=components(-1))
def parse(self, response): # If the request was successful, parse the response as usual. if self.is_http_success(response): yield self.build_file_from_response(response, data_type=self.data_type) # Use `dont_filter` in case the search for a successful timestamp used the same offset. Use `dont_retry` # since errors are expected. yield self.next_link(response, dont_filter=True, meta={'dont_retry': True}) # Otherwise, parse the response as usual, then (1) pick a date range and (2) do a binary search within it. # This approach assumes that, if two offsets error, then intervening offsets error, too. else: yield self.build_file_error_from_response(response) # If the error occurs on the first request, we have no starting offset. if get_parameter_value(response.request.url, 'offset'): yield from self.parse_date_range(response)
def parse_date_range(self, response): offset = int(get_parameter_value(response.request.url, 'offset')) # Scrapy uses `datetime.datetime.utcnow()`, so we don't need to worry about time zones. start_time = int(self.crawler.stats.get_value('start_time').timestamp() * 1000) # We use the first offset to calculate the new offset, and in log lessages. first_offset = response.request.meta.get('first', offset) # The exponent for the exponential search. exponent = response.request.meta.get('exponent', -1) + 1 # If this offset succeeded, do a binary search from the previous offset to this offset. if self.is_http_success(response): yield from self.parse_binary_search(response, response.request.meta['prev'], offset) # If this offset failed and reached a limit, stop. elif offset >= start_time or exponent > EXPONENT_LIMIT: self.logger.info(f'No offset found after {first_offset:,} within {2 ** EXPONENT_LIMIT} days.') yield self.build_file_error_from_response(response) # Otherwise, continue. else: new_offset = min(first_offset + MILLISECONDS_PER_DAY * 2 ** exponent, start_time) url = replace_parameters(response.request.url, offset=new_offset) yield self._build_request(url, self.parse_date_range, {'prev': offset, 'exponent': exponent, 'first': first_offset})
def test_get_parameter_value(url, expected): assert get_parameter_value(url, 'page') == expected