def list_structured_query_events(
        http_session: requests.AuthorizedSession,
        raw_query: str,
        start_time: datetime.datetime,
        end_time: datetime.datetime,
        page_size: Optional[int] = 100000) -> Mapping[str, Sequence[Any]]:
    """Lists up to 10,000 UDM events that match the query.

  If you receive the maximum number of results, there might still be more
  discovered within the specified time range. You might want to narrow the time
  range and issue the call again to ensure you have visibility on the results.

  You can use the API call ListStructuredQueryEvents to search for UDM events
  by UDM field.

  Args:
    http_session: Authorized session for HTTP requests.
    raw_query: Query for searching UDM events.
    start_time: Inclusive beginning of the time range of events to return, with
      any timezone (even a timezone-unaware datetime object, i.e. local time).
    end_time: The exclusive end of the time range of events to return, with any
      timezone (even a timezone-unaware datetime object, i.e. local time).
    page_size: Maximum number of events to return, up to 10,000 (default =
      10,000).

  Returns:
    {
      "results": [
        {
          "event": "..." <-- UDM event
          "eventLogToken": "..."
        },
        ...More events...
      ],
      "moreDataAvailable": true
      "runtimeErrors": [
        {
          "errorText": "..."
        },
        ...More errors...
      ]
    }

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
    (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v1/events/liststructuredqueryevents"
    params = {
        "raw_query": raw_query,
        "start_time": datetime_converter.strftime(start_time),
        "end_time": datetime_converter.strftime(end_time),
        "page_size": page_size
    }
    response = http_session.request("GET", url, params=params)

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    return response.json()
def list_asset_events(
    http_session: requests.AuthorizedSession,
    indicator: str,
    asset: str,
    start_time: datetime.datetime,
    end_time: datetime.datetime,
    ref_time: datetime.datetime,
    page_size: Optional[int] = 0
) -> Tuple[Sequence[Mapping[str, Any]], str, bool]:
    """Lists up to 10,000 UDM events that reference an asset in a time range.

  If there are more matching events than what was returned (according to the
  second element in the tuple returned by this function), you should split this
  call into multiple shorter time ranges to ensure you have visibility into all
  the available events.

  Args:
    http_session: Authorized session for HTTP requests.
    indicator: Type of asset indicator - either "hostname", "asset_ip_address",
      "mac", or "product_id".
    asset: Asset indicator value - a specific hostname, IP address, MAC address,
      or event ID from the logging product that generated the event.
    start_time: Inclusive beginning of the time range of events to return, with
      any timezone (even a timezone-unaware datetime object, i.e. local time).
    end_time: The exclusive end of the time range of events to return, with any
      timezone (even a timezone-unaware datetime object, i.e. local time).
    ref_time: Reference time, used to disambiguate asset indicators, which may
      refer to different assets at different points in time, with any timezone
      (even a timezone-unaware datetime object, i.e. local time).
    page_size: Maximum number of events to return, up to 10,000 (default =
      10,000).

  Returns:
    Tuple with 3 elements: (1) a list of all the UDM events (within the defined
    page) as Python dictionaries, (2) a Boolean value indicating whether there
    are matching events that were not returned, i.e. whether the matching event
    count exceeded the page size, and (3) a URL to see all these asset events in
    the Chronicle web UI.

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
    (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v1/asset/listevents"
    params = {
        "asset." + indicator: asset,
        "start_time": datetime_converter.strftime(start_time),
        "end_time": datetime_converter.strftime(end_time),
        "reference_time": datetime_converter.strftime(ref_time),
        "page_size": page_size,
    }
    response = http_session.request("GET", url, params=params)

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    d = response.json()
    return d.get("events", []), d.get("moreDataAvailable", False), d["uri"][0]
示例#3
0
def run_retrohunt(http_session: requests.AuthorizedSession, version_id: str,
                  start_time: datetime.datetime,
                  end_time: datetime.datetime) -> Mapping[str, Any]:
    """Run a retrohunt.

  Args:
    http_session: Authorized session for HTTP requests.
    version_id: Unique ID of the detection rule to run a retrohunt for
      ("ru_<UUID>" or "ru_<UUID>@v_<seconds>_<nanoseconds>"). If a version
      suffix isn't specified we use the rule's latest version.
    start_time: The start time of the time range the retrohunt will process.
    end_time: The end time of the time range the retrohunt will process.

  Returns:
    New retrohunt that was started for the given rule.

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
      (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v2/detect/rules/{version_id}:runRetrohunt"
    body = {
        "start_time": datetime_converter.strftime(start_time),
        "end_time": datetime_converter.strftime(end_time),
    }

    response = http_session.request("POST", url, json=body)
    # Expected server response:
    # {
    #   "retrohuntId": "oh_<UUID>",
    #   "ruleId": "ru_<UUID>",
    #   "versionId": "ru_<UUID>@v_<seconds>_<nanoseconds>",
    #   "eventStartTime": "yyyy-mm-ddThh:mm:ss.ssssssZ",
    #   "eventEndTime": "yyyy-mm-ddThh:mm:ss.ssssssZ",
    #   "retrohuntStartTime": "yyyy-mm-ddThh:mm:ss.ssssssZ",
    #   "retrohuntEndTime": "yyyy-mm-ddThh:mm:ss.ssssssZ", <- only if completed.
    #   "state": "RUNNING"/"DONE"/"CANCELLED",
    #   "progressPercentage": "<value from 0.00 to 100.00>"
    # }

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    return response.json()
示例#4
0
def test_rule(
    http_session: google_requests.AuthorizedSession,
    rule_content: str,
    event_start_time: datetime.datetime,
    event_end_time: datetime.datetime,
    max_results: int = 0):
  """Calls stream_test_rule once to test rule.

  Args:
    http_session: Authorized session for HTTP requests.
    rule_content: Content of a detection rule, used to evaluate logs.
    event_start_time: Start time of the time range of logs to test rule over.
    event_end_time: End time of the time range of logs to test rule over
      (max allowed time range duration is 2 weeks).
    max_results: Maximum number of detections to return.
      Must be nonnegative and is capped at a server-side limit of 10,000.
      Optional - if not specified, a server-side default of 1,000 is used.

  Raises:
    RuntimeError: Streaming connection was unexpectedly closed or aborted.
  """
  req_data = {
      "rule.rule_text": rule_content,
      "start_time": datetime_converter.strftime(event_start_time),
      "end_time": datetime_converter.strftime(event_end_time),
      "max_results": max_results
  }

  dets, errs, disconnection_reason = stream_test_rule(http_session, req_data)

  # Print out the total number of detections/rule execution errors
  # that were successfully found from testing the rule, up to the point
  # of disconnection.
  print(f"Got {len(dets)} detections and {len(errs)} rule execution errors")

  if disconnection_reason:
    raise RuntimeError(f"Connection failed: {disconnection_reason}. Retry "
                       "testing the rule.")
示例#5
0
def list_iocs(http_session: requests.AuthorizedSession,
              start_time: datetime.datetime,
              page_size: Optional[int] = 10000) -> Mapping[str, Any]:
    """Lists up to 10,000 Indications of Compromise (IoCs) after the start time.

  If you receive the maximum number of results, there might still be more
  discovered within the specified time range. You might want to narrow the time
  range and issue the call again to ensure you have visibility on the results.

  You can use the API call ListArtifactAssets to drill-down on the assets
  associated with this IoC (hostnames, IP and MAC addresses).

  Args:
    http_session: Authorized session for HTTP requests.
    start_time: Inclusive beginning of the time range of IoCs to return, with
      any timezone (even a timezone-unaware datetime object, i.e. local time).
    page_size: Maximum number of IoCs to return, up to 10,000 (default =
      10,000).

  Returns:
    {
      "response": {
        "matches": [
          {
            "artifact": {
              "domainName": "..." <-- Or destination IP address, or file hashes
            },
            "sources": [
              {
                "source": "..."
                "confidenceScore": {
                  "normalizedConfidenceScore": "..." <-- e.g. low/medium/high
                  "intRawConfidenceScore": 0,
                },
                "rawSeverity": "...",
                "category": "...",
              }
            ],
            "iocIngestTime": "yyyy-mm-ddThh:mm:ssZ",
            "firstSeenTime": "yyyy-mm-ddThh:mm:ss.ssssssZ",
            "lastSeenTime": "yyyy-mm-ddThh:mm:ss.ssssssZ",
            "uri": [
              "https://customer.backstory.chronicle.security/..."
            ]
          },
          ...More matches...
        ]
      }
    }

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
    (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v1/ioc/listiocs"
    s = datetime_converter.strftime(start_time)
    params = {"start_time": s, "page_size": page_size}
    response = http_session.request("GET", url, params=params)

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    return response.json()
def stream_detection_alerts_in_retry_loop(
    credentials_file: str,
    process_detection_batch_callback: Callable[[DetectionBatch], None],
    initial_continuation_time: Optional[datetime.datetime] = None,
):
    """Calls stream_detection_alerts and manages state for reconnections.


  Args:
    credentials_file: Path to credentials file, used to make an authorized
      session for HTTP requests.
    process_detection_batch_callback: A callback functions that operates on a
      single detection batch. (e.g. to integrate with other platforms)
    initial_continuation_time: A continuation time to be used in the initial
      stream_detection_alerts connection (default = server will set this to the
      time of connection). Subsequent stream_detection_alerts connections will
      use continuation times from past connections.

  Raises:
    RuntimeError: Hit retry limit after multiple consecutive failures
      without success.

  """
    continuation_time = datetime_converter.strftime(initial_continuation_time)

    # Our retry loop uses exponential backoff with a retry limit.
    # For simplicity, we retry for all types of errors.
    max_consecutive_failures = 7
    consecutive_failures = 0
    while True:
        if consecutive_failures > max_consecutive_failures:
            raise RuntimeError("exiting retry loop. consecutively failed " +
                               f"{consecutive_failures} times without success")

        if consecutive_failures:
            sleep_duration = 2**consecutive_failures
            _LOGGER_.info("sleeping %d seconds before retrying",
                          sleep_duration)
            time.sleep(sleep_duration)

        req_data = {} if not continuation_time else {
            "continuationTime": continuation_time
        }

        # Connections may last hours. Make a new authorized session every retry loop
        # to avoid session expiration.
        session = chronicle_auth.init_session(
            chronicle_auth.init_credentials(credentials_file))

        # This function runs until disconnection.
        response_code, disconnection_reason, most_recent_continuation_time = stream_detection_alerts(
            session, req_data, process_detection_batch_callback)

        if most_recent_continuation_time:
            consecutive_failures = 0
            continuation_time = most_recent_continuation_time
        else:
            _LOGGER_.info(disconnection_reason if disconnection_reason else
                          "connection unexpectedly closed")

            # Do not retry if the disconnection was due to invalid arguments.
            # We assume a disconnection was due to invalid arguments if the connection
            # was refused with HTTP status code 400.
            if response_code == 400:
                raise RuntimeError("exiting retry loop. connection refused " +
                                   f"due to invalid arguments {req_data}")

            consecutive_failures += 1
示例#7
0
def list_detections(
        http_session: requests.AuthorizedSession,
        version_id: str,
        page_size: int = 0,
        page_token: str = "",
        detection_start_time: Optional[datetime.datetime] = None,
        detection_end_time: Optional[datetime.datetime] = None,
        alert_state: str = "") -> Tuple[Sequence[Mapping[str, Any]], str]:
    """Retrieves all the detections of the specified version_id.

  Args:
    http_session: Authorized session for HTTP requests.
    version_id: Unique ID of the detection rule to list detections for.
      Valid version ID formats:
        Detections for a specific rule version:
        "ru_<UUID>@v_<seconds>_<nanoseconds>"
        Detections for the latest version of a rule: "ru_<UUID>"
        Detections across all versions of a rule: "ru_<UUID>@-"
        Detections across all rules and all versions: "-"
    page_size: Maximum number of detections in the response.
      Must be non-negative, and is capped at a server-side limit of 1000.
      Optional - we use a server-side default of 100 if the size is 0 or a
      None value.
    page_token: Base64-encoded string token to retrieve a specific page of
      results. Optional - we retrieve the first page if the token is an empty
      string or a None value.
    detection_start_time: The time to start listing detections from, inclusive
      (default = no min detection_start_time).
    detection_end_time: The time to end listing detections to, exclusive
      (default = no max detection_end_time).
    alert_state: A string that filters which detections are returned by their
      AlertState (i.e. 'ALERTING', 'NOT_ALERTING') (default = no filter on
      alert_state).

  Returns:
    All the detections (within the defined page) ordered by descending
    detection_time, as well as a Base64 token for getting the detections of the
    next page (an empty token string means the currently retrieved page is the
    last one).

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
      (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v2/detect/rules/{version_id}/detections"
    params_list = [
        ("page_size", page_size),
        ("page_token", page_token),
        ("detection_start_time",
         datetime_converter.strftime(detection_start_time)),
        ("detection_end_time",
         datetime_converter.strftime(detection_end_time)),
        ("alert_state", alert_state),
    ]
    params = {k: v for k, v in params_list if v}

    response = http_session.request("GET", url, params=params)
    # Expected server response:
    # {
    #   "detections": [
    #     {
    #       "id": "de_<UUID>",
    #       "type": "RULE_DETECTION",
    #       "createdTime": "yyyy-mm-ddThh:mm:ssZ",
    #       "detectionTime": "yyyy-mm-ddThh:mm:ssZ",
    #       "timeWindow": {
    #         "startTime": "yyyy-mm-ddThh:mm:ssZ",
    #         "endTime": "yyyy-mm-ddThh:mm:ssZ",
    #       }
    #       "collectionElements": [
    #         {
    #           "label": "e1",
    #           "references": [
    #             {
    #               "event": <UDM keys and values / sub-dictionaries>...
    #             },
    #             ...
    #           ],
    #         },
    #         {
    #           "label": "e2",
    #           ...
    #         },
    #         ...
    #       ],
    #       "detection": [
    #         {
    #           "ruleId": "ru_<UUID>",
    #           "ruleName": "<rule_name>",
    #           "ruleVersion": "ru_<UUID>@v_<seconds>_<nanoseconds>",
    #           "urlBackToProduct": "<URL>",
    #           "alertState": "ALERTING"/"NOT_ALERTING",
    #           "ruleType": "SINGLE_EVENT"/"MULTI_EVENT",
    #           "detectionFields": [
    #             {
    #               "key": "<field name>",
    #               "value": "<field value>"
    #             }
    #           ]
    #         },
    #       ],
    #     },
    #     ...
    #   ],
    #   "nextPageToken": "<next_page_token>"
    # }

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    json = response.json()
    return json.get("detections", []), json.get("nextPageToken", "")
def list_errors(
    http_session: requests.AuthorizedSession,
    error_category: str = "",
    error_start_time: Optional[datetime.datetime] = None,
    error_end_time: Optional[datetime.datetime] = None,
    version_id: str = "",
    page_size: int = 0,
    page_token: str = "") -> Tuple[Sequence[Mapping[str, Any]], str]:
  """Lists errors.

  Args:
    http_session: Authorized session for HTTP requests.
    error_category: A string that filters which errors are returned by their
      ErrorCategory (i.e. 'RULES_EXECUTION_ERROR')
      (default = no filter on error category).
    error_start_time: The time to start listing errors from, inclusive
    (default = no min error_start_time).
    error_end_time: The time to end listing errors to, exclusive (default = no
      max error_end_time).
    version_id: Unique ID of the detection rule to retrieve errors for
      ("ru_<UUID>" or "ru_<UUID>@v_<seconds>_<nanoseconds>"). If a version
      suffix isn't specified, we list errors for all versions of that rule.
    page_size: Maximum number of errors to return.
      Must be non-negative, and is capped at a server-side limit of 1000
      (default = server-side default of 100)
    page_token: Page token from a previous ListErrors call used for pagination.
      If not specified, the first page is returned.

  Returns:
    List of errors and a page token for the next page of errors, if there are
    any.

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
      (response.status_code >= 400).
  """
  url = f"{CHRONICLE_API_BASE_URL}/v2/health/errors"
  params_list = [("category", error_category),
                 ("start_time", datetime_converter.strftime(error_start_time)),
                 ("end_time", datetime_converter.strftime(error_end_time)),
                 ("rule_filter.version_id", version_id),
                 ("page_size", page_size), ("page_token", page_token)]
  params = {k: v for k, v in params_list if v}

  response = http_session.request("GET", url, params=params)
  # Expected server response:
  # {
  #   "errors": [
  #     {
  #         'category': '<category>',
  #         'errorId': 'ed_<UUID>',
  #         'errorTime': 'yyyy-mm-ddThh:mm:ssZ',
  #         'ruleExecution': {
  #           'ruleId': 'ru_<UUID>',
  #           'versionId': 'ru_<UUID>@v_<seconds>_<nanoseconds>',
  #           'windowEndTime': 'yyyy-mm-ddThh:mm:ssZ',
  #           'windowStartTime': 'yyyy-mm-ddThh:mm:ssZ'
  #         },
  #         'text': '<error_message>'
  #     },
  #     ...
  #   ],
  #   "nextPageToken": "<next_page_token>"
  # }

  if response.status_code >= 400:
    print(response.text)
  response.raise_for_status()
  j = response.json()
  return j.get("errors", []), j.get("nextPageToken", "")
def list_alerts(
        http_session: requests.AuthorizedSession,
        start_time: datetime.datetime,
        end_time: datetime.datetime,
        page_size: Optional[int] = 100000) -> Mapping[str, Sequence[Any]]:
    """Lists up to 100,000 asset- and user-based alerts in the given time range.

  If you receive the maximum number of results, there might still be more
  discovered within the specified time range. You might want to narrow the time
  range and issue the call again to ensure you have visibility on the results.

  Args:
    http_session: Authorized session for HTTP requests.
    start_time: The inclusive beginning of the time range of alerts to return,
      with any timezone (even a timezone-unaware datetime object, i.e. local
      time).
    end_time: The exclusive end of the time range of alerts to return, with any
      timezone (even a timezone-unaware datetime object, i.e. local time).
    page_size: Maximum number of alerts to return, up to 100,000 (default =
      100,000).

  Returns:
    {
      "alerts": [
        ...One or more asset alerts (if zero, no "alerts" field at all)...
      ],
      "userAlerts": [
        ...One or more user alerts (if zero, no "userAlerts" field at all)...
      ]
    }

  Asset alert structure:
    {
      "asset": {
        "hostname": "..." <-- Or IP address, MAC address, product ID
      },
      "alertInfos": [
        ...One or more alert infos...
      ]
    }

  User alert structure:
    {
      "user": {
        "email": "..." <-- Or user name, Windows SID, employee ID, LDAP ID
      },
      "alertInfos": [
        ...One or more alert infos...
      ]
    }

  Alert info structure:
    {
      "name": "...",
      "sourceProduct": "...",
      "timestamp": "yyyy-mm-ddThh:mm:ssZ",
      "rawLog": "...", <-- Base64 encoded
      "uri": [
        "https://customer.backstory.chronicle.security/..."
      ],
      "udmEvent": {
        ...
      }
    }

  Raises:
    requests.exceptions.HTTPError: HTTP request resulted in an error
    (response.status_code >= 400).
  """
    url = f"{CHRONICLE_API_BASE_URL}/v1/alert/listalerts"
    params = {
        "start_time": datetime_converter.strftime(start_time),
        "end_time": datetime_converter.strftime(end_time),
        "page_size": page_size
    }
    response = http_session.request("GET", url, params=params)

    if response.status_code >= 400:
        print(response.text)
    response.raise_for_status()
    return response.json()