response.raise_for_status() return response.json()["lines"] if __name__ == "__main__": parser = argparse.ArgumentParser() chronicle_auth.add_argument_credentials_file(parser) parser.add_argument("-n", "--name", type=str, required=True, help="unique name for the list") parser.add_argument( "-f", "--list_file", type=argparse.FileType("w"), required=True, # File example: # python3 -m lists.get_list <other args> -f <path> # STDIN example: # cat <path> | python3 -m lists.get_list <other args> -f - help="path of a file to write the list content to, or - for STDOUT") args = parser.parse_args() session = chronicle_auth.init_session( chronicle_auth.init_credentials(args.credentials_file)) list_lines = get_list(session, args.name) args.list_file.write("\n".join(list_lines) + "\n")
def stream_detection_alerts_in_retry_loop( credentials_file: str, process_detection_batch_callback: Callable[[DetectionBatch], None], initial_continuation_time: Optional[datetime.datetime] = None, ): """Calls stream_detection_alerts and manages state for reconnections. Args: credentials_file: Path to credentials file, used to make an authorized session for HTTP requests. process_detection_batch_callback: A callback functions that operates on a single detection batch. (e.g. to integrate with other platforms) initial_continuation_time: A continuation time to be used in the initial stream_detection_alerts connection (default = server will set this to the time of connection). Subsequent stream_detection_alerts connections will use continuation times from past connections. Raises: RuntimeError: Hit retry limit after multiple consecutive failures without success. """ continuation_time = datetime_converter.strftime(initial_continuation_time) # Our retry loop uses exponential backoff with a retry limit. # For simplicity, we retry for all types of errors. max_consecutive_failures = 7 consecutive_failures = 0 while True: if consecutive_failures > max_consecutive_failures: raise RuntimeError("exiting retry loop. consecutively failed " + f"{consecutive_failures} times without success") if consecutive_failures: sleep_duration = 2**consecutive_failures _LOGGER_.info("sleeping %d seconds before retrying", sleep_duration) time.sleep(sleep_duration) req_data = {} if not continuation_time else { "continuationTime": continuation_time } # Connections may last hours. Make a new authorized session every retry loop # to avoid session expiration. session = chronicle_auth.init_session( chronicle_auth.init_credentials(credentials_file)) # This function runs until disconnection. response_code, disconnection_reason, most_recent_continuation_time = stream_detection_alerts( session, req_data, process_detection_batch_callback) if most_recent_continuation_time: consecutive_failures = 0 continuation_time = most_recent_continuation_time else: _LOGGER_.info(disconnection_reason if disconnection_reason else "connection unexpectedly closed") # Do not retry if the disconnection was due to invalid arguments. # We assume a disconnection was due to invalid arguments if the connection # was refused with HTTP status code 400. if response_code == 400: raise RuntimeError("exiting retry loop. connection refused " + f"due to invalid arguments {req_data}") consecutive_failures += 1