def _checkModelsStatus(modelsJson, url, emailParams):
  """
  For all models, checks if the model has an error status.
  If model was OK, but is now in error, its db flag is set and an email is
  sent.
  If model was in error and is still in error, no email is sent.
  If model was in error, but is now OK, its db flag is cleared.

  :param modelsJson: A JSON containing descriptions of the models.
  """
  g_logger.debug("Checking models' status")
  modelsInError = ""
  for model in modelsJson:
    uid = model["uid"]
    if model["status"] == MetricStatus.ERROR:
      if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid):
        addErrorFlag(schema.modelsMonitorErrorFlags, uid)
        modelsInError += str(model) + "\n\n"
    else:
      removeErrorFlag(schema.modelsMonitorErrorFlags, uid)

  if modelsInError != "":
    g_logger.info("Found models entering error status")
    issue = _getIssueString("Model(s) entering error status.\n", modelsInError)
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=url,
                                          message=issue,
                                          params=emailParams)
  else:
    g_logger.info("Looking good -- no models were found in error status =)")
def _reportDatabaseIssue(uid, url, issueMessage, emailParams):
    """
  Reports a database issue only if flag is not present in local file.
  :param uid: Unique issue ID
  :param url: request URL
  :param issueMessage: Issue details
  """
    with open(_DB_ERROR_FLAG_FILE, "rb") as fp:
        try:
            flagDict = json.load(fp)
        except ValueError:
            g_logger.exception("Failed to load JSON from db error flag file")
            raise

        if uid not in flagDict:
            flagDict[uid] = uid
            error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                                  resourceName=url,
                                                  message=issueMessage,
                                                  params=emailParams)
        else:
            g_logger.info(
                "Suppressing the urge to report issue %s because a local "
                "file flag for this issue exists.", uid)

    with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump(flagDict, fp)
def _checkModelsStatus(modelsJson, url, emailParams):
    """
  For all models, checks if the model has an error status.
  If model was OK, but is now in error, its db flag is set and an email is
  sent.
  If model was in error and is still in error, no email is sent.
  If model was in error, but is now OK, its db flag is cleared.

  :param modelsJson: A JSON containing descriptions of the models.
  """
    g_logger.debug("Checking models' status")
    modelsInError = ""
    for model in modelsJson:
        uid = model["uid"]
        if model["status"] == MetricStatus.ERROR:
            if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid):
                addErrorFlag(schema.modelsMonitorErrorFlags, uid)
                modelsInError += str(model) + "\n\n"
        else:
            removeErrorFlag(schema.modelsMonitorErrorFlags, uid)

    if modelsInError != "":
        g_logger.info("Found models entering error status")
        issue = _getIssueString("Model(s) entering error status.\n",
                                modelsInError)
        error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                              resourceName=url,
                                              message=issue,
                                              params=emailParams)
    else:
        g_logger.info(
            "Looking good -- no models were found in error status =)")
def _reportDatabaseIssue(uid, url, issueMessage, emailParams):
  """
  Reports a database issue only if flag is not present in local file.
  :param uid: Unique issue ID
  :param url: request URL
  :param issueMessage: Issue details
  """
  with open(_DB_ERROR_FLAG_FILE, "rb") as fp:
    try:
      flagDict = json.load(fp)
    except ValueError:
      g_logger.exception("Failed to load JSON from db error flag file")
      raise

    if uid not in flagDict:
      flagDict[uid] = uid
      error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                            resourceName=url,
                                            message=issueMessage,
                                            params=emailParams)
    else:
      g_logger.info("Suppressing the urge to report issue %s because a local "
                    "file flag for this issue exists.", uid)

  with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
    json.dump(flagDict, fp)
def main():
  """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
  try:
    args = _getArgs()
    logging_support.LoggingSupport.initLogging(loggingLevel=args.loggingLevel,
                                               logToFile=True)

    confDir = os.path.dirname(args.monitorConfPath)
    confFileName = os.path.basename(args.monitorConfPath)
    config = Config(confFileName, confDir)

    modelsUrl = config.get("S1", "MODELS_MONITOR_TAURUS_MODELS_URL")
    apiKey = config.get("S1", "MODELS_MONITOR_TAURUS_API_KEY")

    emailParams = dict(senderAddress=config.get("S1", "MODELS_MONITOR_EMAIL_SENDER_ADDRESS"),
                       recipients=config.get("S1", "MODELS_MONITOR_EMAIL_RECIPIENTS"),
                       awsRegion= config.get("S1", "MODELS_MONITOR_EMAIL_AWS_REGION"),
                       sesEndpoint=config.get("S1", "MODELS_MONITOR_EMAIL_SES_ENDPOINT"),
                       awsAccessKeyId=None,
                       awsSecretAccessKey=None)

    dbConf= os.getenv("TAURUS_MONITORS_DB_CONFIG_PATH",
                      "Couldn't read TAURUS_MONITORS_DB_CONFIG_PATH")
    g_logger.info("TAURUS_MONITORS_DB_CONFIG_PATH: %s", dbConf)
    g_logger.info("DB CONF DIR: %s", CONF_DIR)

    if args.testEmail:
      g_logger.info("Sending an email for test purposes.")
      error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                            resourceName=modelsUrl,
                                            message="Test issue",
                                            isTest=True,
                                            params=emailParams)

    # Create a db error flag file if it doesn't already exist
    if not os.path.isfile(_DB_ERROR_FLAG_FILE):
      g_logger.debug("Making DB error flag file")
      with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump({}, fp)

    _connectAndCheckModels(modelsUrl, apiKey, args.requestTimeout, emailParams)
    _clearDatabaseIssue("sqlalchemy.exc.OperationalError")

  except OperationalError:
    g_logger.critical("Failed due to sqlalchemy.exc.OperationalError")
    issue = _getIssueString("sqlalchemy.exc.OperationalError",
                            traceback.format_exc())
    _reportDatabaseIssue("sqlalchemy.exc.OperationalError", modelsUrl, issue,
                         emailParams)
  except Exception:
    # Unexpected Exceptions are reported every time.
    g_logger.critical("%s failed due to unexpected Exception. \n", __name__)
    g_logger.critical("Traceback:\n", exc_info=True)
    issue = _getIssueString("Unexpected Exception", traceback.format_exc())
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=modelsUrl,
                                          message=issue,
                                          params=emailParams)
예제 #6
0
 def checkAll(self):
     """ Run all previously-registered checks and send an email upon failure
 """
     for check in self._checks:
         try:
             check(self.server)
         except Exception as err:
             error_reporting.sendMonitorErrorEmail(
                 monitorName=__name__ + ":" + check.__name__,
                 resourceName=repr(self.server),
                 message=traceback.format_exc(),
                 params=self.emailParams)
 def checkAll(self):
   """ Run all previously-registered checks and send an email upon failure
   """
   for check in self._checks:
     try:
       check(self.server)
     except Exception as err:
       error_reporting.sendMonitorErrorEmail(
         monitorName=__name__ + ":" + check.__name__,
         resourceName=repr(self.server),
         message=traceback.format_exc(),
         params=self.emailParams)
예제 #8
0
    def dispatchNotification(self, checkFn, excType, excValue, excTraceback):
        """  Send notification.

    :param function checkFn: The check function that raised an exception
    :param type excType: Exception type
    :param exception excValue: Exception value
    :param traceback excTraceback: Exception traceback

    Required by MonitorDispatcher abc protocol.
    """
        error_reporting.sendMonitorErrorEmail(
            monitorName=__name__ + ":" + checkFn.__name__,
            resourceName=repr(self),
            message=self.formatTraceback(excType, excValue, excTraceback),
            subjectPrefix=self.subjectPrefix,
            params=self.emailParams)
def _reportIssue(uid, url, issueMessage, emailParams):
  """
  Reports an issue if no database flag is present.
  :param uid: Unique issue ID
  :param url: request URL
  :param issueMessage: Issue details
  """
  if not containsErrorFlag(schema.modelsMonitorErrorFlags, uid):
    addErrorFlag(schema.modelsMonitorErrorFlags, uid)
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=url,
                                          message=issueMessage,
                                          params=emailParams)
  else:
    g_logger.info("Asked to report issue %s, however db flag for issue "
                  "exists.", uid)
예제 #10
0
def _reportIssue(uid, url, issueMessage, emailParams):
  """
  Reports an issue if no database flag is present.
  :param uid: Unique issue ID
  :param url: request URL
  :param issueMessage: Issue details
  """
  if not _containsIssueFlag(uid):
    _addIssueFlag(uid, uid)
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=url,
                                          message=issueMessage,
                                          params=emailParams)
  else:
    g_logger.info("Asked to report issue %s, however db flag for issue "
                  "exists.", uid)
예제 #11
0
  def dispatchNotification(self, checkFn, excType, excValue, excTraceback):
    """  Send notification.

    :param function checkFn: The check function that raised an exception
    :param type excType: Exception type
    :param exception excValue: Exception value
    :param traceback excTraceback: Exception traceback

    Required by MonitorDispatcher abc protocol.
    """
    error_reporting.sendMonitorErrorEmail(
      monitorName=__name__ + ":" + checkFn.__name__,
      resourceName=repr(self),
      message=self.formatTraceback(excType, excValue, excTraceback),
      subjectPrefix=self.subjectPrefix,
      params=self.emailParams
    )
예제 #12
0
  def dispatchNotification(self, checkFn, excType, excValue, excTraceback):
    """  Send notification.

    :param function checkFn: The check function that raised an exception
    :param type excType: Exception type
    :param exception excValue: Exception value
    :param traceback excTraceback: Exception traceback

    Required by MonitorDispatcher abc protocol.
    """
    dispatchKwargs = dict(
      monitorName=__name__ + ":" + checkFn.__name__,
      resourceName=repr(self),
      message=self.formatTraceback(excType, excValue, excTraceback),
      subjectPrefix="Model Latency Monitor",
      params=self.emailParams)

    g_logger.info("Dispatching notification: %r", dispatchKwargs)
    error_reporting.sendMonitorErrorEmail(**dispatchKwargs)
예제 #13
0
    def dispatchNotification(self, checkFn, excType, excValue, excTraceback):
        """  Send notification.

    :param function checkFn: The check function that raised an exception
    :param type excType: Exception type
    :param exception excValue: Exception value
    :param traceback excTraceback: Exception traceback

    Required by MonitorDispatcher abc protocol.
    """
        dispatchKwargs = dict(monitorName=__name__ + ":" + checkFn.__name__,
                              resourceName=repr(self),
                              message=self.formatTraceback(
                                  excType, excValue, excTraceback),
                              subjectPrefix="Model Latency Monitor",
                              params=self.emailParams)

        g_logger.info("Dispatching notification: %r", dispatchKwargs)
        error_reporting.sendMonitorErrorEmail(**dispatchKwargs)
예제 #14
0
def _reportMetrics(monitoredResource, metrics, emailParams):
    """
  Sends email notification of specified out-of-order metrics. Avoids sending
  duplicate notifications using monitorsdb.

  :param monitoredResource: Path of the database containing metric_data table
  :type monitoredResource: string
  :param metrics: A list of out-of-order metric rows
  :type metrics: list
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
    if len(metrics) > 0:
        message = (
            "The following rows of metric_data table were out of order:\n"
            "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) "
            "\tmax(timestamp) \tmetric name\n")
        for row in metrics:
            message += str(row) + "\n"
        g_logger.info(message)

        if not monitorUtils.containsErrorFlag(
                schema.metricOrderMonitorErrorFlags, _FLAG_METRIC_ORDER):
            monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags,
                                      _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER)
            g_logger.info(
                "Check FAILS -- metric order error found. Sending an "
                "error report.")
            error_reporting.sendMonitorErrorEmail(
                monitorName=_MONITOR_NAME,
                resourceName=monitoredResource,
                message=message,
                params=emailParams)
        else:
            g_logger.info("Check FAILS -- metric order error found. Error "
                          "flag exists; error report suppressed.")

    else:
        g_logger.info(
            "Check PASSES -- all metrics were found to be in order =)")
        monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags,
                                     _FLAG_METRIC_ORDER)
def _reportMetrics(monitoredResource, metrics, emailParams):
  """
  Sends email notification of specified out-of-order metrics. Avoids sending
  duplicate notifications using monitorsdb.

  :param monitoredResource: Path of the database containing metric_data table
  :type monitoredResource: string
  :param metrics: A list of out-of-order metric rows
  :type metrics: list
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
  if len(metrics) > 0:
    message = ("The following rows of metric_data table were out of order:\n"
              "UID \tcount(rowid) \tmin(rowid) \tmax(rowid) \tmin(timestamp) "
              "\tmax(timestamp) \tmetric name\n")
    for row in metrics:
      message += str(row) + "\n"
    g_logger.info(message)

    if not monitorUtils.containsErrorFlag(schema.metricOrderMonitorErrorFlags,
                                          _FLAG_METRIC_ORDER):
      monitorUtils.addErrorFlag(schema.metricOrderMonitorErrorFlags,
                                _FLAG_METRIC_ORDER, _FLAG_METRIC_ORDER)
      g_logger.info("Check FAILS -- metric order error found. Sending an "
                    "error report.")
      error_reporting.sendMonitorErrorEmail(
        monitorName=_MONITOR_NAME,
        resourceName=monitoredResource,
        message=message,
        params=emailParams)
    else:
      g_logger.info("Check FAILS -- metric order error found. Error "
                    "flag exists; error report suppressed.")

  else:
    g_logger.info("Check PASSES -- all metrics were found to be in order =)")
    monitorUtils.removeErrorFlag(schema.metricOrderMonitorErrorFlags,
                                 _FLAG_METRIC_ORDER)
예제 #16
0
def _reportDatabaseIssue(issueUID, monitoredResource, issueMessage,
                         emailParams):
    """
  Reports a database issue only if flag is not present in local file.

  :param issueUID: Unique issue ID
  :type issueUID: string
  :param monitoredResource: Description of resource being monitored
  :type monitoredResource: string
  :param issueMessage: Issue details
  :type issueMessage: string
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
    with open(_DB_ERROR_FLAG_FILE, "rb") as fp:
        try:
            flagDict = json.load(fp)
        except ValueError:
            g_logger.exception("Failed to load JSON from db error flag file")
            raise

        if issueUID not in flagDict:
            g_logger.info(
                "Reporting database connection issue: %s and adding flag "
                "to local flag file.", issueUID)
            flagDict[issueUID] = issueUID
            error_reporting.sendMonitorErrorEmail(
                monitorName=_MONITOR_NAME,
                resourceName=monitoredResource,
                message=issueMessage,
                params=emailParams)
        else:
            g_logger.info(
                "Suppressing the urge to report issue %s because a local "
                "file flag for this issue exists.", issueUID)

    with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump(flagDict, fp)
def _reportDatabaseIssue(issueUID, monitoredResource, issueMessage,
                         emailParams):
  """
  Reports a database issue only if flag is not present in local file.

  :param issueUID: Unique issue ID
  :type issueUID: string
  :param monitoredResource: Description of resource being monitored
  :type monitoredResource: string
  :param issueMessage: Issue details
  :type issueMessage: string
  :param emailParams: Parameters for sending email
  :type emailParams: dict
  """
  with open(_DB_ERROR_FLAG_FILE, "rb") as fp:
    try:
      flagDict = json.load(fp)
    except ValueError:
      g_logger.exception("Failed to load JSON from db error flag file")
      raise

    if issueUID not in flagDict:
      g_logger.info("Reporting database connection issue: %s and adding flag "
                    "to local flag file.", issueUID)
      flagDict[issueUID] = issueUID
      error_reporting.sendMonitorErrorEmail(
          monitorName=_MONITOR_NAME,
          resourceName=monitoredResource,
          message=issueMessage,
          params=emailParams)
    else:
      g_logger.info("Suppressing the urge to report issue %s because a local "
                    "file flag for this issue exists.", issueUID)

  with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
    json.dump(flagDict, fp)
예제 #18
0
def main():
    """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
    try:
        args = _getArgs()
        logging_support.LoggingSupport.initLogging(
            loggingLevel=args.loggingLevel, logToFile=True)

        confDir = os.path.dirname(args.monitorConfPath)
        confFileName = os.path.basename(args.monitorConfPath)
        config = Config(confFileName, confDir)

        monitoredResource = config.get("S1", "MONITORED_RESOURCE")
        monitoredResourceNoPwd = (
            monitoredResource.split(":")[0] + ":" +
            monitoredResource.split(":")[1] + ":***@" +
            monitoredResource.split(":")[2].split("@")[1])

        emailParams = dict(senderAddress=config.get("S1",
                                                    "EMAIL_SENDER_ADDRESS"),
                           recipients=config.get("S1", "EMAIL_RECIPIENTS"),
                           awsRegion=config.get("S1", "EMAIL_AWS_REGION"),
                           sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
                           awsAccessKeyId=None,
                           awsSecretAccessKey=None)

        if args.testEmail:
            g_logger.info("Sending an email for test purposes.")
            error_reporting.sendMonitorErrorEmail(
                monitorName=_MONITOR_NAME,
                resourceName=monitoredResourceNoPwd,
                message="Test issue",
                isTest=True,
                params=emailParams)

        # Create a db error flag file if one doesn't already exist
        if not os.path.isfile(_DB_ERROR_FLAG_FILE):
            g_logger.debug("Creating the database error flag file.")
            with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
                json.dump({}, fp)

        # Perform the primary check of metric_data table order
        g_logger.debug("Connecting to resource: %s", monitoredResourceNoPwd)
        engine = sqlalchemy.create_engine(monitoredResource)
        connection = engine.connect()
        metrics = _getOutOfOrderMetrics(connection, _SQL_QUERY)
        _reportMetrics(monitoredResourceNoPwd, metrics, emailParams)

        # If previous method does not throw exception, then we come here and clear
        # the database issue flag
        _clearDatabaseIssue(_FLAG_DATABASE_ISSUE)

    except OperationalError:
        # If database connection fails, report issue
        g_logger.critical("Failed due to " + _FLAG_DATABASE_ISSUE)
        _reportDatabaseIssue(_FLAG_DATABASE_ISSUE, monitoredResourceNoPwd,
                             traceback.format_exc(), emailParams)
    except Exception:
        # If any unexpected exception occurs, try to send an email with traceback
        g_logger.critical("%s failed due to unexpected Exception. \n",
                          traceback.format_exc())
        error_reporting.sendMonitorErrorEmail(
            monitorName=_MONITOR_NAME,
            resourceName=monitoredResourceNoPwd,
            message=traceback.format_exc(),
            params=emailParams)
def main():
  """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
  try:
    args = _getArgs()
    logging_support.LoggingSupport.initLogging(loggingLevel=args.loggingLevel,
                                               console=args.loggingConsole,
                                               logToFile=True)

    confDir = os.path.dirname(args.monitorConfPath)
    confFileName = os.path.basename(args.monitorConfPath)
    config = Config(confFileName, confDir)

    monitoredResource = config.get("S1", "MONITORED_RESOURCE")
    monitoredResourceNoPwd = (monitoredResource.split(":")[0] + ":" +
                              monitoredResource.split(":")[1] + ":***@" +
                              monitoredResource.split(":")[2].split("@")[1])

    emailParams = dict(senderAddress=config.get("S1", "EMAIL_SENDER_ADDRESS"),
                       recipients=config.get("S1", "EMAIL_RECIPIENTS"),
                       awsRegion= config.get("S1", "EMAIL_AWS_REGION"),
                       sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
                       awsAccessKeyId=None,
                       awsSecretAccessKey=None)

    if args.testEmail:
      g_logger.info("Sending an email for test purposes.")
      error_reporting.sendMonitorErrorEmail(
          monitorName=_MONITOR_NAME,
          resourceName=monitoredResourceNoPwd,
          message="Test issue",
          isTest=True,
          params=emailParams)

    # Create a db error flag file if one doesn't already exist
    if not os.path.isfile(_DB_ERROR_FLAG_FILE):
      g_logger.debug("Creating the database error flag file.")
      with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump({}, fp)

    # Perform the primary check of metric_data table order
    g_logger.debug("Connecting to resource: %s", monitoredResourceNoPwd)
    engine = sqlalchemy.create_engine(monitoredResource)
    connection = engine.connect()
    metrics = _getOutOfOrderMetrics(connection, _SQL_QUERY)
    _reportMetrics(monitoredResourceNoPwd, metrics, emailParams)

    # If previous method does not throw exception, then we come here and clear
    # the database issue flag
    _clearDatabaseIssue(_FLAG_DATABASE_ISSUE)

  except OperationalError:
    # If database connection fails, report issue
    g_logger.critical("Failed due to " + _FLAG_DATABASE_ISSUE)
    _reportDatabaseIssue(_FLAG_DATABASE_ISSUE,
                         monitoredResourceNoPwd,
                         traceback.format_exc(),
                         emailParams)
  except Exception:
    # If any unexpected exception occurs, try to send an email with traceback
    g_logger.critical("%s failed due to unexpected Exception. \n",
                      traceback.format_exc())
    error_reporting.sendMonitorErrorEmail(
        monitorName=_MONITOR_NAME,
        resourceName=monitoredResourceNoPwd,
        message=traceback.format_exc(),
        params=emailParams)
def main():
    """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
    try:
        args = _getArgs()
        logging_support.LoggingSupport.initLogging(
            loggingLevel=args.loggingLevel, logToFile=True)

        confDir = os.path.dirname(args.monitorConfPath)
        confFileName = os.path.basename(args.monitorConfPath)
        config = Config(confFileName, confDir)

        modelsUrl = config.get("S1", "TAURUS_MODELS_URL")
        apiKey = config.get("S1", "TAURUS_API_KEY")

        emailParams = dict(
            senderAddress=config.get("S1", "EMAIL_SENDER_ADDRESS"),
            recipients=config.get("S1", "EMAIL_RECIPIENTS"),
            awsRegion=config.get("S1", "EMAIL_AWS_REGION"),
            sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
            awsAccessKeyId=config.get("S1", "EMAIL_SES_AWS_ACCESS_KEY_ID"),
            awsSecretAccessKey=config.get("S1",
                                          "EMAIL_SES_AWS_SECRET_ACCESS_KEY"))

        dbConf = os.getenv("TAURUS_MONITORS_DB_CONFIG_PATH",
                           "Couldn't read TAURUS_MONITORS_DB_CONFIG_PATH")
        g_logger.info("TAURUS_MONITORS_DB_CONFIG_PATH: %s", dbConf)
        g_logger.info("DB CONF DIR: %s", CONF_DIR)

        if args.testEmail:
            g_logger.info("Sending an email for test purposes.")
            error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                                  resourceName=modelsUrl,
                                                  message="Test issue",
                                                  isTest=True,
                                                  params=emailParams)

        # Create a db error flag file if it doesn't already exist
        if not os.path.isfile(_DB_ERROR_FLAG_FILE):
            g_logger.debug("Making DB error flag file")
            with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
                json.dump({}, fp)

        _connectAndCheckModels(modelsUrl, apiKey, args.requestTimeout,
                               emailParams)
        _clearDatabaseIssue("sqlalchemy.exc.OperationalError")

    except OperationalError:
        g_logger.critical("Failed due to sqlalchemy.exc.OperationalError")
        issue = _getIssueString("sqlalchemy.exc.OperationalError",
                                traceback.format_exc())
        _reportDatabaseIssue("sqlalchemy.exc.OperationalError", modelsUrl,
                             issue, emailParams)
    except Exception:
        # Unexpected Exceptions are reported every time.
        g_logger.critical("%s failed due to unexpected Exception. \n",
                          __name__)
        g_logger.critical("Traceback:\n", exc_info=True)
        issue = _getIssueString("Unexpected Exception", traceback.format_exc())
        error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                              resourceName=modelsUrl,
                                              message=issue,
                                              params=emailParams)