def listen_with_handlers(logger, amqp, handlers): """Uses the specified list of handlers when subscribing to the appropriate queue""" handlers_by_name = dict([(handler.name, handler) for handler in handlers]) queue = os.environ['AMQP_QUEUE'] response_queues = {} last_processed_at = None min_td_btwn_reqs = timedelta( seconds=float(os.environ['MIN_TIME_BETWEEN_REQUESTS_S'])) failed_requests_counter = 0 explicit_ratelimit_until = None def reddit_request_callback(endpoint_name, resp): nonlocal failed_requests_counter nonlocal explicit_ratelimit_until if resp.status_code >= 200 and resp.status_code <= 299: failed_requests_counter = max(0, failed_requests_counter - 1) else: failed_requests_counter += 1 if 'x-ratelimit-reset' in resp.headers and 'x-ratelimit-remaining' in resp.headers: reset = float(resp.headers['x-ratelimit-reset']) remaining = float(resp.headers['x-ratelimit-remaining']) logger.print(Level.TRACE, ('Received ratelimit headers from endpoint {}; ' + 'x-ratelimit-reset: {}, x-ratelimit-remaining: {}'), endpoint_name, reset, remaining) if remaining <= 1: logger.print( Level.WARN, ('Setting explicit ratelimit from ratelimit headers ' + 'in response to {}! reset={}, remaining={}'), endpoint_name, reset, remaining) explicit_ratelimit_until = time.time() + reset def delay_for_reddit(): if last_processed_at is None: return target_delay = (min_td_btwn_reqs if failed_requests_counter == 0 else timedelta( seconds=min((10 * (2**failed_requests_counter)), 1800))) if explicit_ratelimit_until is not None: seconds_until_reset = explicit_ratelimit_until - time.time() target_delay = timedelta( seconds=max(target_delay, seconds_until_reset + 1)) delay_so_far = datetime.now() - last_processed_at if delay_so_far < target_delay: req_sleep_time = target_delay - delay_so_far time.sleep(req_sleep_time.total_seconds()) time_btwn_clean = timedelta(hours=1) remember_td = timedelta(days=1) last_cleaned_respqueues = datetime.now() reddit = Reddit() reddit.request_callback = reddit_request_callback auth = None min_time_to_expiry = timedelta(minutes=1) channel = amqp.channel() channel.queue_declare(queue) for method_frame, properties, body_bytes in channel.consume( queue, inactivity_timeout=600): if (datetime.now() - last_cleaned_respqueues) > time_btwn_clean: last_cleaned_respqueues = datetime.now() for k in list(response_queues.keys()): val = response_queues[k] time_since_seen = datetime.now() - val['last_seen_at'] if time_since_seen > remember_td: logger.print( Level.DEBUG, 'Forgetting about response queue {} - last saw it {} ago', k, time_since_seen) del response_queues[k] logger.connection.commit() if method_frame is None: logger.print(Level.TRACE, 'No messages in the last 10 minutes') logger.connection.commit() continue body_str = body_bytes.decode('utf-8') try: body = json.loads(body_str) except json.JSONDecodeError as exc: logger.exception( Level.WARN, 'Received non-json packet! Error info: doc={}, msg={}, pos={}, lineno={}, colno={}', exc.doc, exc.msg, exc.pos, exc.lineno, exc.colno) logger.connection.commit() channel.basic_nack(method_frame.delivery_tag, requeue=False) continue if _detect_structure_errors_with_logging(logger, body_str, body): logger.connection.commit() channel.basic_nack(method_frame.delivery_tag, requeue=False) continue resp_info = response_queues.get(body['response_queue']) if resp_info is None: logger.print(Level.DEBUG, 'New response queue {} detected at version {}', body['response_queue'], body['version_utc_seconds']) if not body['response_queue'].startswith('void'): channel.queue_declare(body['response_queue']) resp_info = {'version': body['version_utc_seconds']} response_queues[body['response_queue']] = resp_info elif not body.get('ignore_version') and body[ 'version_utc_seconds'] < resp_info['version']: logger.print( Level.DEBUG, 'Ignoring message to response queue {} with type {}; ' 'specified version={} is below current version={}', body['response_queue'], body['type'], body['version_utc_seconds'], resp_info['version']) logger.connection.commit() channel.basic_nack(method_frame.delivery_tag, requeue=False) continue elif body['version_utc_seconds'] > resp_info['version']: logger.print( Level.DEBUG, 'Detected newer version for response queue {}, was {} and is now {}', body['response_queue'], resp_info['version'], body['version_utc_seconds']) resp_info['version'] = body['version_utc_seconds'] logger.connection.commit() resp_info['last_seen_at'] = datetime.now() if body['type'] not in handlers_by_name: logger.print( Level.WARN, 'Received request to response queue {} with an unknown type {}', body['response_queue'], body['type']) logger.connection.commit() channel.basic_nack(method_frame.delivery_tag, requeue=False) continue logger.print( Level.TRACE, 'Processing request to response queue {} with type {} ({})', body['response_queue'], body['type'], body['uuid']) logger.connection.commit() if auth is None or auth.expires_at < (datetime.now() + min_time_to_expiry): logger.print(Level.TRACE, 'Reauthenticating with reddit (expires at {})', auth.expires_at if auth is not None else 'None') logger.connection.commit() delay_for_reddit() auth = _auth(reddit, logger) last_processed_at = datetime.now() if auth is None: logger.print( Level.WARN, 'Failed to authenticate with reddit! Will nack, requeue=True' ) logger.connection.commit() channel.basic_nack(method_frame.delivery_tag, requeue=True) continue handler = handlers_by_name[body['type']] if handler.requires_delay: delay_for_reddit() try: status, info = handler.handle(reddit, auth, body['args']) except: # noqa: E722 logger.exception( Level.WARN, 'An exception occurred while processing request to response ' 'queue {} with type {}: body={}', body['response_queue'], body['type'], body) status = 'failure' info = None if handler.requires_delay: last_processed_at = datetime.now() handle_style = _get_handle_style(body.get('style'), status) logger.print( getattr(Level, handle_style['log_level']), 'Got status {} to response type {} for queue {} ({}) - handling with operation {}', status, body['type'], body['response_queue'], body['uuid'], handle_style['operation']) logger.connection.commit() if status == 401: logger.print( Level.INFO, 'Due to 401 status code, purging cached authorization information. ' 'It should not have expired until {}', auth.expires_at) logger.connection.commit() auth = None if body['response_queue'].startswith('void'): channel.basic_ack(method_frame.delivery_tag) elif handle_style['operation'] == 'copy': channel.basic_publish( '', body['response_queue'], json.dumps({ 'uuid': body['uuid'], 'type': 'copy', 'status': status, 'info': info })) channel.basic_ack(method_frame.delivery_tag) elif handle_style['operation'] == 'retry': new_bod = body.copy() new_bod['ignore_version'] = handle_style.get( 'ignore_version', False) channel.basic_publish('', queue, json.dumps(new_bod)) channel.basic_nack(method_frame.delivery_tag, requeue=False) elif handle_style['operation'] == 'success': channel.basic_publish( '', body['response_queue'], json.dumps({ 'uuid': body['uuid'], 'type': 'success' })) channel.basic_ack(method_frame.delivery_tag) else: if handle_style['operation'] != 'failure': logger.print( Level.WARN, 'Unknown handle style {} to status {} to resposne queue {} for type {}' ' - treating as failure', handle_style['operation'], status, body['response_queue'], body['type']) channel.basic_publish( '', body['response_queue'], json.dumps({ 'uuid': body['uuid'], 'type': 'failure' })) channel.basic_nack(method_frame.delivery_tag, requeue=False)