enable_ingestion: Whether to enable GCP log ingestion or not. Raises: requests.exceptions.HTTPError: HTTP request resulted in an error (response.status_code >= 400). """ name = f"organizations/{organization_id}/gcpAssociations/{organization_id}/gcpSettings" url = f"{SERVICE_MANAGEMENT_API_BASE_URL}/v1/{name}" body = { "log_flow": "LOG_FLOW_INACTIVE", } if enable_ingestion: body["log_flow"] = "LOG_FLOW_ACTIVE" response = http_session.request("PATCH", url, json=body) if response.status_code >= 400: print(response.text) response.raise_for_status() if __name__ == "__main__": cli = initialize_command_line_args() if not cli: sys.exit(1) # A sanity check failed. session = chronicle_auth.initialize_http_session( cli.credentials_file, scopes=AUTHORIZATION_SCOPES) update_gcp_settings(session, cli.organization_id, cli.ingestion)
# "ruleExecution": { # "ruleId": "ru_<UUID>", # "versionId": "ru_<UUID>@v_<seconds>_<nanoseconds>", # "windowEndTime": "yyyy-mm-ddThh:mm:ssZ", # "windowStartTime": "yyyy-mm-ddThh:mm:ssZ" # }, # "text": "<error message>" # } if response.status_code >= 400: print(response.text) response.raise_for_status() return response.json() if __name__ == "__main__": parser = argparse.ArgumentParser() chronicle_auth.add_argument_credentials_file(parser) regions.add_argument_region(parser) parser.add_argument("-ei", "--error_id", type=str, required=True, help="error ID (for Detect errors: 'ed_<UUID>')") args = parser.parse_args() CHRONICLE_API_BASE_URL = regions.url(CHRONICLE_API_BASE_URL, args.region) session = chronicle_auth.initialize_http_session(args.credentials_file) error = get_error(session, args.error_id) print(json.dumps(error, indent=2))
def stream_detection_alerts_in_retry_loop( credentials_file: str, process_detection_batch_callback: Callable[[DetectionBatch], None], initial_continuation_time: Optional[datetime.datetime] = None, ): """Calls stream_detection_alerts and manages state for reconnections. Args: credentials_file: Path to credentials file, used to make an authorized session for HTTP requests. process_detection_batch_callback: A callback functions that operates on a single detection batch. (e.g. to integrate with other platforms) initial_continuation_time: A continuation time to be used in the initial stream_detection_alerts connection (default = server will set this to the time of connection). Subsequent stream_detection_alerts connections will use continuation times from past connections. Raises: RuntimeError: Hit retry limit after multiple consecutive failures without success. """ continuation_time = datetime_converter.strftime(initial_continuation_time) # Our retry loop uses exponential backoff with a retry limit. # For simplicity, we retry for all types of errors. max_consecutive_failures = 7 consecutive_failures = 0 while True: if consecutive_failures > max_consecutive_failures: raise RuntimeError("exiting retry loop. consecutively failed " + f"{consecutive_failures} times without success") if consecutive_failures: sleep_duration = 2**consecutive_failures _LOGGER_.info("sleeping %d seconds before retrying", sleep_duration) time.sleep(sleep_duration) req_data = {} if not continuation_time else { "continuationTime": continuation_time } # Connections may last hours. Make a new authorized session every retry loop # to avoid session expiration. session = chronicle_auth.initialize_http_session(credentials_file) # This function runs until disconnection. response_code, disconnection_reason, most_recent_continuation_time = stream_detection_alerts( session, req_data, process_detection_batch_callback) if most_recent_continuation_time: consecutive_failures = 0 continuation_time = most_recent_continuation_time else: _LOGGER_.info(disconnection_reason if disconnection_reason else "connection unexpectedly closed") # Do not retry if the disconnection was due to invalid arguments. # We assume a disconnection was due to invalid arguments if the connection # was refused with HTTP status code 400. if response_code == 400: raise RuntimeError("exiting retry loop. connection refused " + f"due to invalid arguments {req_data}") consecutive_failures += 1