class _SitemapWebClient(AbstractWebClient): # Some webservers might be generating huge sitemaps on the fly, so this is why it's rather big. __HTTP_REQUEST_TIMEOUT = 60 __slots__ = [ '__ua', ] def __init__(self): self.__ua = UserAgent() self.__ua.set_timeout(self.__HTTP_REQUEST_TIMEOUT) def set_max_response_data_length(self, max_response_data_length: int) -> None: self.__ua.set_max_size(max_response_data_length) def get(self, url: str) -> AbstractWebClientResponse: ua_response = self.__ua.get(url) if ua_response.is_success(): return _SitemapWebClientResponse(ua_response=ua_response) else: return WebClientErrorResponse( message=ua_response.status_line(), retryable=ua_response.code() in RETRYABLE_HTTP_STATUS_CODES, )
def _get_content_from_api(self, query: str, start_date: datetime, end_date: datetime) -> str: """Fetch the posts data from thw ch api and return the http response content.""" ch_monitor_id = int(query) log.debug("crimson_hexagon_twitter.fetch_posts") ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) config = TopicsMineConfig() api_key = config.crimson_hexagon_api_key() start_arg = start_date.strftime('%Y-%m-%d') end_arg = (end_date + datetime.timedelta(days=1)).strftime('%Y-%m-%d') url = ( "https://api.crimsonhexagon.com/api/monitor/posts?auth=%s&id=%d&start=%s&end=%s&extendLimit=true" % (api_key, ch_monitor_id, start_arg, end_arg)) log.debug("crimson hexagon url: " + url) response = ua.get(url) if not response.is_success(): raise McPostsCHTwitterDataException("error fetching posts: " + response.decoded_content()) return response.decoded_content()
def _get_user_agent() -> UserAgent: """Get a properly configured user agent.""" ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) return ua
def fetch_posts(self, query: dict, start_date: datetime, end_date: datetime) -> list: """Fetch tweets from archive.org that match the given query for the given day.""" ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) end_date = end_date + datetime.timedelta(days=1) start_arg = start_date.strftime('%Y-%m-%d') end_arg = end_date.strftime('%Y-%m-%d') enc_query = urlencode({ 'q': query, 'date_from': start_arg, 'date_to': end_arg }) url = "https://searchtweets.archivelab.org/export?" + enc_query log.debug("archive.org url: " + url) response = ua.get(url) if not response.is_success(): raise McPostsArchiveTwitterDataException( "error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() # sometimes we get null characters, which choke the csv module decoded_content = decoded_content.replace('\x00', '') meta_tweets = [] lines = decoded_content.splitlines()[1:] for row in csv.reader(lines, delimiter="\t"): fields = 'user_name user_screen_name lang text timestamp_ms url'.split( ' ') meta_tweet = {} for i, field in enumerate(fields): meta_tweet[field] = row[i] if i < len(row) else '' if 'url' not in meta_tweet or meta_tweet['url'] == '': log.warning("meta_tweet '%s' does not have a url" % str(row)) continue meta_tweet['tweet_id'] = get_tweet_id_from_url(meta_tweet['url']) meta_tweets.append(meta_tweet) add_tweets_to_meta_tweets(meta_tweets) return meta_tweets
def fetch_meta_tweets_from_ch(query: str, day: str) -> list: """Fetch day of tweets from crimson hexagon""" ch_monitor_id = int(query) ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) config = mediawords.util.config.get_config() if 'crimson_hexagon' not in config or 'key' not in config[ 'crimson_hexagon']: raise McFetchTopicTweetsConfigException( "no key in mediawords.yml at //crimson_hexagon/key.") key = config['crimson_hexagon']['key'] next_day = day + datetime.timedelta(days=1) day_arg = day.strftime('%Y-%m-%d') next_day_arg = next_day.strftime('%Y-%m-%d') url = ( "https://api.crimsonhexagon.com/api/monitor/posts?auth=%s&id=%d&start=%s&end=%s&extendLimit=true" % (key, ch_monitor_id, day_arg, next_day_arg)) log.debug("crimson hexagon url: " + url) response = ua.get(url) if not response.is_success(): raise McFetchTopicTweetsDataException("error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() data = dict(mediawords.util.parse_json.decode_json(decoded_content)) if 'status' not in data or not data['status'] == 'success': raise McFetchTopicTweetsDataException("Unknown response status: " + str(data)) meta_tweets = data['posts'] for mt in meta_tweets: mt['tweet_id'] = get_tweet_id_from_url(mt['url']) return meta_tweets
def fetch_posts(ch_monitor_id: int, day: datetime.datetime) -> dict: """Implement fetch_posts on ch api using the config data from mediawords.yml.""" ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) config = mediawords.util.config.get_config() if 'crimson_hexagon' not in config or 'key' not in config[ 'crimson_hexagon']: raise McFetchTopicTweetsConfigException( "no key in mediawords.yml at //crimson_hexagon/key.") key = config['crimson_hexagon']['key'] next_day = day + datetime.timedelta(days=1) day_arg = day.strftime('%Y-%m-%d') next_day_arg = next_day.strftime('%Y-%m-%d') url = ( "https://api.crimsonhexagon.com/api/monitor/posts?auth=%s&id=%d&start=%s&end=%s&extendLimit=true" % (key, ch_monitor_id, day_arg, next_day_arg)) log.debug("crimson hexagon url: " + url) response = ua.get(url) if not response.is_success(): raise McFetchTopicTweetsDataException("error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() data = dict(mediawords.util.parse_json.decode_json(decoded_content)) if 'status' not in data or not data['status'] == 'success': raise McFetchTopicTweetsDataException("Unknown response status: " + str(data)) return data
def fetch_posts(ch_monitor_id: int, day: datetime.datetime) -> dict: """Implement fetch_posts on ch api using the config data from mediawords.yml.""" ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) config = mediawords.util.config.get_config() if 'crimson_hexagon' not in config or 'key' not in config['crimson_hexagon']: raise McFetchTopicTweetsConfigException("no key in mediawords.yml at //crimson_hexagon/key.") key = config['crimson_hexagon']['key'] next_day = day + datetime.timedelta(days=1) day_arg = day.strftime('%Y-%m-%d') next_day_arg = next_day.strftime('%Y-%m-%d') url = ("https://api.crimsonhexagon.com/api/monitor/posts?auth=%s&id=%d&start=%s&end=%s&extendLimit=true" % (key, ch_monitor_id, day_arg, next_day_arg)) log.debug("crimson hexagon url: " + url) response = ua.get(url) if not response.is_success(): raise McFetchTopicTweetsDataException("error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() data = dict(mediawords.util.parse_json.decode_json(decoded_content)) if 'status' not in data or not data['status'] == 'success': raise McFetchTopicTweetsDataException("Unknown response status: " + str(data)) return data
def __annotate_text(self, text: str) -> Union[dict, list]: """Fetch JSON annotation for text, decode it into dictionary / list.""" text = decode_object_from_bytes_if_needed(text) if text is None: fatal_error("Text is None.") if len(text) == 0: # Annotators accept empty strings, but that might happen with some stories so we're just die()ing here raise McJSONAnnotationFetcherException("Text is empty.") log.info("Annotating %d characters of text..." % len(text)) # Trim the text because that's what the annotator will do, and if the text is empty, we want to fail early # without making a request to the annotator at all text = text.strip() if self.__TEXT_LENGTH_LIMIT > 0: text_length = len(text) if text_length > self.__TEXT_LENGTH_LIMIT: log.warning( "Text length (%d) has exceeded the request text length limit (%d) so I will truncate it." % ( text_length, self.__TEXT_LENGTH_LIMIT, )) text = text[:self.__TEXT_LENGTH_LIMIT] # Make a request ua = UserAgent() ua.set_timing([1, 2, 4, 8]) ua.set_timeout(self.__HTTP_TIMEOUT) ua.set_max_size(None) request = None try: request = self._request_for_text(text=text) if request is None: raise McJSONAnnotationFetcherException( "Returned request is None.") except Exception as ex: # Assume that this is some sort of a programming error too fatal_error( "Unable to create annotator request for text '%s': %s" % ( text, str(ex), )) # Wait for the service's HTTP port to become open as the service might be # still starting up somewhere uri = furl(request.url()) hostname = str(uri.host) port = int(uri.port) assert hostname, f"URL hostname is not set for URL {url}" assert port, f"API URL port is not set for URL {url}" if not wait_for_tcp_port_to_open( port=port, hostname=hostname, retries=self.__ANNOTATOR_SERVICE_TIMEOUT, ): # Instead of throwing an exception, just crash the whole application # because there's no point in continuing on running it whatsoever. fatal_error( "Annotator service at {url} didn't come up in {timeout} seconds, exiting..." .format( url=url, timeout=self.__ANNOTATOR_SERVICE_TIMEOUT, )) log.debug("Sending request to %s..." % request.url()) response = ua.request(request) log.debug("Response received.") # Force UTF-8 encoding on the response because the server might not always # return correct "Content-Type" results_string = response.decoded_utf8_content() if not response.is_success(): # Error; determine whether we should be blamed for making a malformed # request, or is it an extraction error log.warning("Request failed: %s" % response.decoded_content()) if response.code() == HTTPStatus.REQUEST_TIMEOUT.value: # Raise on request timeouts without retrying anything because those usually mean that we posted # something funky to the annotator service and it got stuck raise McJSONAnnotationFetcherException( "The request timed out, giving up; text length: %d; text: %s" % ( len(text), text, )) if response.error_is_client_side(): # Error was generated by the user agent client code; likely didn't reach server at all (timeout, # unresponsive host, etc.) fatal_error("User agent error: %s: %s" % ( response.status_line(), results_string, )) else: # Error was generated by server http_status_code = response.code() if http_status_code == HTTPStatus.METHOD_NOT_ALLOWED.value \ or http_status_code == HTTPStatus.BAD_REQUEST.value: # Not POST, empty POST fatal_error('%s: %s' % ( response.status_line(), results_string, )) elif http_status_code == HTTPStatus.INTERNAL_SERVER_ERROR.value: # Processing error -- raise so that the error gets caught and logged into a database raise McJSONAnnotationFetcherException( 'Annotator service was unable to process the download: %s' % results_string) else: # Shutdown the extractor on unconfigured responses fatal_error('Unknown HTTP response: %s: %s' % ( response.status_line(), results_string, )) if results_string is None or len(results_string) == 0: raise McJSONAnnotationFetcherException( "Annotator returned nothing for text: %s" % text) log.debug("Parsing response's JSON...") results = None try: results = decode_json(results_string) if results is None: raise McJSONAnnotationFetcherException( "Returned JSON is None.") except Exception as ex: # If the JSON is invalid, it's probably something broken with the remote service, so that's why whe do # fatal_error() here fatal_error("Unable to parse JSON response: %s\nJSON string: %s" % ( str(ex), results_string, )) log.debug("Done parsing response's JSON.") response_is_valid = False try: response_is_valid = self._fetched_annotation_is_valid(results) except Exception as ex: fatal_error( "Unable to determine whether response is valid: %s\nJSON string: %s" % (str(ex), results_string)) if not response_is_valid: fatal_error("Annotator response is invalid for JSON string: %s" % results_string) log.info("Done annotating %d characters of text." % len(text)) return results
def solr_request(path: str, params: SolrParams = None, content: Union[str, SolrParams] = None, content_type: Optional[str] = None, config: Optional[CommonConfig] = None) -> str: """ Send a request to Solr. :param path: Solr path to call, e.g. 'select'. :param params: Query parameters to add to the path. :param content: String or dictionary content to send via POST request. :param content_type: Content-Type for the POST content. :param config: (testing) Configuration object :return: Raw response content on success, raise exception on error. """ path = decode_object_from_bytes_if_needed(path) params = decode_object_from_bytes_if_needed(params) content = decode_object_from_bytes_if_needed(content) content_type = decode_object_from_bytes_if_needed(content_type) if not path: raise McSolrRequestInvalidParamsException("Path is unset.") if params: if not isinstance(params, dict): raise McSolrRequestInvalidParamsException( f"Params is not a dictionary: {params}") if content: if not (isinstance(content, str) or isinstance(content, dict)): raise McSolrRequestInvalidParamsException( f"Content is not a string not a dictionary: {content}") if not config: config = CommonConfig() solr_url = config.solr_url() if not params: params = {} abs_uri = furl(f"{solr_url}/mediacloud/{path}") abs_uri = abs_uri.set(params) abs_url = str(abs_uri) ua = UserAgent() ua.set_timeout(__QUERY_HTTP_TIMEOUT) ua.set_max_size(None) # Remediate CVE-2017-12629 q_param = str(params.get('q', '')) if 'xmlparser' in q_param.lower(): raise McSolrRequestQueryErrorException( "XML queries are not supported.") # Solr might still be starting up so wait for it to expose the collections list __wait_for_solr_to_start(config=config) if content: if not content_type: fallback_content_type = 'text/plain; charset=utf-8' log.warning( f"Content-Type is not set; falling back to '{fallback_content_type}'" ) content_type = fallback_content_type if isinstance(content, dict): content = urlencode(content, doseq=True) content_encoded = content.encode('utf-8', errors='replace') request = Request(method='POST', url=abs_url) request.set_header(name='Content-Type', value=content_type) request.set_header(name='Content-Length', value=str(len(content_encoded))) request.set_content(content_encoded) else: request = Request(method='GET', url=abs_url) log.debug(f"Sending Solr request: {request}") response = ua.request(request) if not response.is_success(): error_message = __solr_error_message_from_response(response=response) raise McSolrRequestQueryErrorException( f"Error fetching Solr response: {error_message}") return response.decoded_content()
def __annotate_text(self, text: str) -> Union[dict, list]: """Fetch JSON annotation for text, decode it into dictionary / list.""" text = decode_object_from_bytes_if_needed(text) if text is None: fatal_error("Text is None.") if len(text) == 0: # Annotators accept empty strings, but that might happen with some stories so we're just die()ing here raise McJSONAnnotationFetcherException("Text is empty.") log.info(f"Annotating {len(text)} characters of text...") # Trim the text because that's what the annotator will do, and if the text is empty, we want to fail early # without making a request to the annotator at all text = text.strip() if self.__TEXT_LENGTH_LIMIT > 0: text_length = len(text) if text_length > self.__TEXT_LENGTH_LIMIT: log.warning( f"Text length ({text_length}) has exceeded the request text length limit" f"({self.__TEXT_LENGTH_LIMIT}) so I will truncate it.") text = text[:self.__TEXT_LENGTH_LIMIT] # Make a request ua = UserAgent() ua.set_timing([1, 2, 4, 8]) ua.set_timeout(self.__HTTP_TIMEOUT) ua.set_max_size(None) request = None try: request = self._request_for_text(text=text) if request is None: raise McJSONAnnotationFetcherException( "Returned request is None.") except Exception as ex: # Assume that this is some sort of a programming error too fatal_error( f"Unable to create annotator request for text '{text}': {ex}") # Wait for the service's HTTP port to become open as the service might be # still starting up somewhere uri = furl(request.url()) hostname = str(uri.host) port = int(uri.port) assert hostname, f"URL hostname is not set for URL {request.url()}" assert port, f"API URL port is not set for URL {request.url()}" if not wait_for_tcp_port_to_open( port=port, hostname=hostname, retries=self.__ANNOTATOR_SERVICE_TIMEOUT, ): # Instead of throwing an exception, just crash the whole application # because there's no point in continuing on running it whatsoever. fatal_error( f"Annotator service at {request.url()} didn't come up in {self.__ANNOTATOR_SERVICE_TIMEOUT} seconds, " f"exiting...") log.debug(f"Sending request to {request.url()}...") # Try requesting a few times because sometimes it throws a connection error, e.g.: # # WARNING mediawords.util.web.user_agent: Client-side error while processing request <PreparedRequest [POST]>: # ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) # WARNING mediawords.annotator.fetcher: Request failed: ('Connection aborted.', ConnectionResetError(104, # 'Connection reset by peer')) # ERROR mediawords.util.process: User agent error: 400 Client-side error: ('Connection aborted.', # ConnectionResetError(104, 'Connection reset by peer')) response = None retries = 60 sleep_between_retries = 1 for retry in range(1, retries + 1): if retry > 1: log.warning(f"Retrying ({retry} / {retries})...") response = ua.request(request) if response.is_success(): break else: if response.error_is_client_side(): log.error( f"Request failed on the client side: {response.decoded_content()}" ) time.sleep(sleep_between_retries) else: break log.debug("Response received.") # Force UTF-8 encoding on the response because the server might not always # return correct "Content-Type" results_string = response.decoded_utf8_content() if not response.is_success(): # Error; determine whether we should be blamed for making a malformed # request, or is it an extraction error log.warning(f"Request failed: {response.decoded_content()}") if response.code() == HTTPStatus.REQUEST_TIMEOUT.value: # Raise on request timeouts without retrying anything because those usually mean that we posted # something funky to the annotator service and it got stuck raise McJSONAnnotationFetcherException( f"The request timed out, giving up; text length: {len(text)}; text: {text}" ) if response.error_is_client_side(): # Error was generated by the user agent client code; likely didn't reach server at all (timeout, # unresponsive host, etc.) fatal_error( f"User agent error: {response.status_line()}: {results_string}" ) else: # Error was generated by server http_status_code = response.code() if http_status_code == HTTPStatus.METHOD_NOT_ALLOWED.value \ or http_status_code == HTTPStatus.BAD_REQUEST.value: # Not POST, empty POST fatal_error(f'{response.status_line()}: {results_string}') elif http_status_code == HTTPStatus.INTERNAL_SERVER_ERROR.value: # Processing error -- raise so that the error gets caught and logged into a database raise McJSONAnnotationFetcherException( f'Annotator service was unable to process the download: {results_string}' ) else: # Shutdown the extractor on unconfigured responses fatal_error( f'Unknown HTTP response: {response.status_line()}: {results_string}' ) if results_string is None or len(results_string) == 0: raise McJSONAnnotationFetcherException( f"Annotator returned nothing for text: {text}") log.debug("Parsing response's JSON...") results = None try: results = decode_json(results_string) if results is None: raise McJSONAnnotationFetcherException( "Returned JSON is None.") except Exception as ex: # If the JSON is invalid, it's probably something broken with the remote service, so that's why whe do # fatal_error() here fatal_error( f"Unable to parse JSON response: {ex}\nJSON string: {results_string}" ) log.debug("Done parsing response's JSON.") response_is_valid = False try: response_is_valid = self._fetched_annotation_is_valid(results) except Exception as ex: fatal_error( f"Unable to determine whether response is valid: {ex}\nJSON string: {results_string}" ) if not response_is_valid: fatal_error( f"Annotator response is invalid for JSON string: {results_string}" ) log.info(f"Done annotating {len(text)} characters of text.") return results
def sitemap_useragent() -> UserAgent: ua = UserAgent() ua.set_max_size(__MAX_SITEMAP_SIZE) return ua
def fetch_posts(self, query: str, start_date: datetime, end_date: datetime) -> list: """Fetch day of tweets from crimson hexagon""" ch_monitor_id = int(query) log.debug("crimson_hexagon_twitter.fetch_posts") ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) config = TopicsMineConfig() api_key = config.crimson_hexagon_api_key() end_date = end_date + datetime.timedelta(days=1) start_arg = start_date.strftime('%Y-%m-%d') end_arg = end_date.strftime('%Y-%m-%d') url = ( "https://api.crimsonhexagon.com/api/monitor/posts?auth=%s&id=%d&start=%s&end=%s&extendLimit=true" % (api_key, ch_monitor_id, start_arg, end_arg)) log.debug("crimson hexagon url: " + url) response = ua.get(url) if not response.is_success(): raise McPostsCHTwitterDataException("error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() data = dict(decode_json(decoded_content)) if 'status' not in data or not data['status'] == 'success': raise McPostsCHTwitterDataException("Unknown response status: " + str(data)) meta_tweets = data['posts'] for mt in meta_tweets: mt['tweet_id'] = get_tweet_id_from_url(mt['url']) add_tweets_to_meta_tweets(meta_tweets) posts = [] for mt in meta_tweets: log.warning("mt: %d" % mt['tweet_id']) if 'tweet' in mt: post = { 'post_id': mt['tweet_id'], 'data': mt, 'content': mt['tweet']['text'], 'publish_date': mt['tweet']['created_at'], 'author': mt['tweet']['user']['screen_name'], 'channel': mt['tweet']['user']['screen_name'], 'url': mt['url'] } posts.append(post) return posts