def _checkModelsStatus(modelsJson, url, emailParams):
    """
  For all models, checks if the model has an error status.
  If model was OK, but is now in error, its db flag is set and an email is
  sent.
  If model was in error and is still in error, no email is sent.
  If model was in error, but is now OK, its db flag is cleared.

  :param modelsJson: A JSON containing descriptions of the models.
  """
    g_logger.debug("Checking models' status")
    modelsInError = ""
    for model in modelsJson:
        uid = model["uid"]
        if model["status"] == MetricStatus.ERROR:
            if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid):
                addErrorFlag(schema.modelsMonitorErrorFlags, uid)
                modelsInError += str(model) + "\n\n"
        else:
            removeErrorFlag(schema.modelsMonitorErrorFlags, uid)

    if modelsInError != "":
        g_logger.info("Found models entering error status")
        issue = _getIssueString("Model(s) entering error status.\n",
                                modelsInError)
        error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                              resourceName=url,
                                              message=issue,
                                              params=emailParams)
    else:
        g_logger.info(
            "Looking good -- no models were found in error status =)")
def _checkModelsStatus(modelsJson, url, emailParams):
  """
  For all models, checks if the model has an error status.
  If model was OK, but is now in error, its db flag is set and an email is
  sent.
  If model was in error and is still in error, no email is sent.
  If model was in error, but is now OK, its db flag is cleared.

  :param modelsJson: A JSON containing descriptions of the models.
  """
  g_logger.debug("Checking models' status")
  modelsInError = ""
  for model in modelsJson:
    uid = model["uid"]
    if model["status"] == MetricStatus.ERROR:
      if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid):
        addErrorFlag(schema.modelsMonitorErrorFlags, uid)
        modelsInError += str(model) + "\n\n"
    else:
      removeErrorFlag(schema.modelsMonitorErrorFlags, uid)

  if modelsInError != "":
    g_logger.info("Found models entering error status")
    issue = _getIssueString("Model(s) entering error status.\n", modelsInError)
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=url,
                                          message=issue,
                                          params=emailParams)
  else:
    g_logger.info("Looking good -- no models were found in error status =)")
Пример #3
0
def _reportMetrics(monitoredResource, metrics, emailParams):
    """
  Sends email notification of specified out-of-order metrics. Avoids sending
  duplicate notifications using monitorsdb.

  :param monitoredResource: Path of the database containing metric_data table
  :type monitoredResource: string
  :param metrics: A list of out-of-order metric rows
  :type metrics: list
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
    if len(metrics) > 0:
        message = (
            "The following rows of metric_data table were out of order:\n"
            "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) "
            "\tmax(timestamp) \tmetric name\n")
        for row in metrics:
            message += str(row) + "\n"
        g_logger.info(message)

        if not monitorUtils.containsErrorFlag(
                schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER):
            monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags,
                                      _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER)
            g_logger.info(
                "Check FAILS -- metric order error found. Sending an "
                "error report.")
            error_reporting.sendMonitorErrorEmail(
                monitorName=_MONITOR_NAME,
                resourceName=monitoredResource,
                message=message,
                params=emailParams)
        else:
            g_logger.info("Check FAILS -- metric order error found. Error "
                          "flag exists; error report suppressed.")

    else:
        g_logger.info(
            "Check PASSES -- all metrics were found to be in order =)")
        monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags,
                                     _FLAG_METRIC_ORDER)
def _reportMetrics(monitoredResource, metrics, emailParams):
  """
  Sends email notification of specified out-of-order metrics. Avoids sending
  duplicate notifications using monitorsdb.

  :param monitoredResource: Path of the database containing metric_data table
  :type monitoredResource: string
  :param metrics: A list of out-of-order metric rows
  :type metrics: list
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
  if len(metrics) > 0:
    message = ("The following rows of metric_data table were out of order:\n"
              "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) "
              "\tmax(timestamp) \tmetric name\n")
    for row in metrics:
      message += str(row) + "\n"
    g_logger.info(message)

    if not monitorUtils.containsErrorFlag(schema.metricOrderMonitorErrorFlags,
                                          _FLAG_METRIC_ORDER):
      monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags,
                                _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER)
      g_logger.info("Check FAILS -- metric order error found. Sending an "
                    "error report.")
      error_reporting.sendMonitorErrorEmail(
        monitorName=_MONITOR_NAME,
        resourceName=monitoredResource,
        message=message,
        params=emailParams)
    else:
      g_logger.info("Check FAILS -- metric order error found. Error "
                    "flag exists; error report suppressed.")

  else:
    g_logger.info("Check PASSES -- all metrics were found to be in order =)")
    monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags,
                                 _FLAG_METRIC_ORDER)
def _connectAndCheckModels(modelsUrl, apiKey, requestTimeout, emailParams):
    """
  Check the Taurus models for error status.

  :param modelsUrl
  :param apiKey
  :param requestTimeout
  :return A detected issue message or None
  :rtype string
  """
    try:
        g_logger.debug("Connecting to Taurus models")
        response = requests.get(modelsUrl,
                                auth=(apiKey, ""),
                                timeout=requestTimeout,
                                verify=False)
        removeErrorFlag(schema.modelsMonitorErrorFlags,
                        Flags.REQUESTS_EXCEPTION)
    except requests.exceptions.RequestException:
        g_logger.exception(
            "RequestException calling: %s with apiKey %s and "
            "timeout: %s", modelsUrl, apiKey, requestTimeout)
        issue = traceback.format_exc() + "\n"
        _reportIssue(Flags.REQUESTS_EXCEPTION, modelsUrl, issue, emailParams)
        return

    statusCode = response.status_code
    if statusCode is 200:
        removeErrorFlag(schema.modelsMonitorErrorFlags, Flags.HTTP_STATUS_CODE)
    else:
        g_logger.error("Received abnormal HTTP status code: %s", statusCode)
        issue = _getIssueString("Received abnormal HTTP status code",
                                statusCode)
        _reportIssue(Flags.HTTP_STATUS_CODE, modelsUrl, issue, emailParams)
        return

    try:
        responseJson = response.json()
        removeErrorFlag(schema.modelsMonitorErrorFlags, Flags.RESPONSE_JSON)
    except ValueError:
        g_logger.error(
            "ValueError encountered loading JSON. Response text: %s",
            response.text)
        issue = "ValueError encountered loading JSON."
        _reportIssue(Flags.RESPONSE_JSON, modelsUrl, issue, emailParams)
        return

    _checkModelsStatus(responseJson, modelsUrl, emailParams)
def _connectAndCheckModels(modelsUrl, apiKey, requestTimeout, emailParams):
  """
  Check the Taurus models for error status.

  :param modelsUrl
  :param apiKey
  :param requestTimeout
  :return A detected issue message or None
  :rtype string
  """
  try:
    g_logger.debug("Connecting to Taurus models")
    response = requests.get(modelsUrl, auth=(apiKey, ""),
                            timeout=requestTimeout, verify=False)
    removeErrorFlag(schema.modelsMonitorErrorFlags, Flags.REQUESTS_EXCEPTION)
  except requests.exceptions.RequestException:
    g_logger.exception("RequestException calling: %s with apiKey %s and "
                       "timeout: %s", modelsUrl, apiKey, requestTimeout)
    issue = traceback.format_exc() + "\n"
    _reportIssue(Flags.REQUESTS_EXCEPTION, modelsUrl, issue, emailParams)
    return

  statusCode = response.status_code
  if statusCode is 200:
    removeErrorFlag(schema.modelsMonitorErrorFlags, Flags.HTTP_STATUS_CODE)
  else:
    g_logger.error("Received abnormal HTTP status code: %s", statusCode)
    issue = _getIssueString("Received abnormal HTTP status code", statusCode)
    _reportIssue(Flags.HTTP_STATUS_CODE, modelsUrl, issue, emailParams)
    return

  try:
    responseJson = response.json()
    removeErrorFlag(schema.modelsMonitorErrorFlags, Flags.RESPONSE_JSON)
  except ValueError:
    g_logger.error("ValueError encountered loading JSON. Response text: %s",
                   response.text)
    issue = "ValueError encountered loading JSON."
    _reportIssue(Flags.RESPONSE_JSON, modelsUrl, issue, emailParams)
    return

  _checkModelsStatus(responseJson, modelsUrl, emailParams)