def _checkModelsStatus(modelsJson, url, emailParams): """ For all models, checks if the model has an error status. If model was OK, but is now in error, its db flag is set and an email is sent. If model was in error and is still in error, no email is sent. If model was in error, but is now OK, its db flag is cleared. :param modelsJson: A JSON containing descriptions of the models. """ g_logger.debug("Checking models' status") modelsInError = "" for model in modelsJson: uid = model["uid"] if model["status"] == MetricStatus.ERROR: if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid): addErrorFlag(schema.modelsMonitorErrorFlags, uid) modelsInError += str(model) + "\n\n" else: removeErrorFlag(schema.modelsMonitorErrorFlags, uid) if modelsInError != "": g_logger.info("Found models entering error status") issue = _getIssueString("Model(s) entering error status.\n", modelsInError) error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME, resourceName=url, message=issue, params=emailParams) else: g_logger.info( "Looking good -- no models were found in error status =)")
def _checkModelsStatus(modelsJson, url, emailParams): """ For all models, checks if the model has an error status. If model was OK, but is now in error, its db flag is set and an email is sent. If model was in error and is still in error, no email is sent. If model was in error, but is now OK, its db flag is cleared. :param modelsJson: A JSON containing descriptions of the models. """ g_logger.debug("Checking models' status") modelsInError = "" for model in modelsJson: uid = model["uid"] if model["status"] == MetricStatus.ERROR: if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid): addErrorFlag(schema.modelsMonitorErrorFlags, uid) modelsInError += str(model) + "\n\n" else: removeErrorFlag(schema.modelsMonitorErrorFlags, uid) if modelsInError != "": g_logger.info("Found models entering error status") issue = _getIssueString("Model(s) entering error status.\n", modelsInError) error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME, resourceName=url, message=issue, params=emailParams) else: g_logger.info("Looking good -- no models were found in error status =)")
def _reportIssue(uid, url, issueMessage, emailParams): """ Reports an issue if no database flag is present. :param uid: Unique issue ID :param url: request URL :param issueMessage: Issue details """ if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid): addErrorFlag(schema.modelsMonitorErrorFlags, uid) error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME, resourceName=url, message=issueMessage, params=emailParams) else: g_logger.info("Asked to report issue %s, however db flag for issue " "exists.", uid)
def _reportIssue(uid, url, issueMessage, emailParams): """ Reports an issue if no database flag is present. :param uid: Unique issue ID :param url: request URL :param issueMessage: Issue details """ if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid): addErrorFlag(schema.modelsMonitorErrorFlags, uid) error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME, resourceName=url, message=issueMessage, params=emailParams) else: g_logger.info( "Asked to report issue %s, however db flag for issue " "exists.", uid)
def _reportMetrics(monitoredResource, metrics, emailParams): """ Sends email notification of specified out-of-order metrics. Avoids sending duplicate notifications using monitorsdb. :param monitoredResource: Path of the database containing metric_data table :type monitoredResource: string :param metrics: A list of out-of-order metric rows :type metrics: list :param emailParams: Parameters for sending email :type emailParams: dict """ if len(metrics) > 0: message = ( "The following rows of metric_data table were out of order:\n" "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) " "\tmax(timestamp) \tmetric name\n") for row in metrics: message += str(row) + "\n" g_logger.info(message) if not monitorUtils.containsErrorFlag( schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER): monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER) g_logger.info( "Check FAILS -- metric order error found. Sending an " "error report.") error_reporting.sendMonitorErrorEmail( monitorName=_MONITOR_NAME, resourceName=monitoredResource, message=message, params=emailParams) else: g_logger.info("Check FAILS -- metric order error found. Error " "flag exists; error report suppressed.") else: g_logger.info( "Check PASSES -- all metrics were found to be in order =)") monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER)
def _reportMetrics(monitoredResource, metrics, emailParams): """ Sends email notification of specified out-of-order metrics. Avoids sending duplicate notifications using monitorsdb. :param monitoredResource: Path of the database containing metric_data table :type monitoredResource: string :param metrics: A list of out-of-order metric rows :type metrics: list :param emailParams: Parameters for sending email :type emailParams: dict """ if len(metrics) > 0: message = ("The following rows of metric_data table were out of order:\n" "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) " "\tmax(timestamp) \tmetric name\n") for row in metrics: message += str(row) + "\n" g_logger.info(message) if not monitorUtils.containsErrorFlag(schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER): monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER) g_logger.info("Check FAILS -- metric order error found. Sending an " "error report.") error_reporting.sendMonitorErrorEmail( monitorName=_MONITOR_NAME, resourceName=monitoredResource, message=message, params=emailParams) else: g_logger.info("Check FAILS -- metric order error found. Error " "flag exists; error report suppressed.") else: g_logger.info("Check PASSES -- all metrics were found to be in order =)") monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER)