def runServer(host="0.0.0.0", port=None, protocol=Protocol.PLAIN,
              transport=Transport.TCP):
  Protocol.current = protocol
  if port is None:
    port = Protocol.getDefaultPort(protocol)

  LOGGER.info("Starting with host=%s, port=%s, protocol=%s, transport=%s",
              host, port, protocol, transport)

  if transport == Transport.UDP:
    server = ThreadedUDPServer((host, port), UDPHandler)
  elif transport == Transport.TCP:
    server = ThreadedTCPServer((host, port), TCPHandler)

  config = Config("application.conf",
                  os.environ["APPLICATION_CONFIG_PATH"])

  global gQueueName
  gQueueName = config.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (config.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))

  # Serve until there is an interrupt
  server.serve_forever()
コード例 #2
0
def runServer(host="0.0.0.0", port=None, protocol=Protocol.PLAIN,
              transport=Transport.TCP):
  Protocol.current = protocol
  if port is None:
    port = Protocol.getDefaultPort(protocol)

  LOGGER.info("Starting with host=%s, port=%s, protocol=%s, transport=%s",
              host, port, protocol, transport)

  if transport == Transport.UDP:
    server = ThreadedUDPServer((host, port), UDPHandler)
  elif transport == Transport.TCP:
    server = ThreadedTCPServer((host, port), TCPHandler)

  config = Config("application.conf",
                  os.environ["APPLICATION_CONFIG_PATH"])

  global gQueueName
  gQueueName = config.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (config.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))

  # Serve until there is an interrupt
  server.serve_forever()
コード例 #3
0
def main():
  logging_support.LoggingSupport().initTool()

  try:
    options = _parseArgs()

    host = options["host"]
    user = options["user"]
    password = options["password"]

    overrideConfig =  Config(config.CONFIG_NAME, config.baseConfigDir,
                             mode=Config.MODE_OVERRIDE_ONLY)

    if not overrideConfig.has_section("repository"):
      overrideConfig.add_section("repository")
    overrideConfig.set("repository", "host", host)
    overrideConfig.set("repository", "user", user)
    overrideConfig.set("repository", "passwd", password)
    overrideConfig.save()

    g_log.info("Override of mysql settings for %s completed successfully",
               overrideConfig.CONFIG_NAME)

  except SystemExit as e:
    if e.code != 0:
      g_log.exception("Failed!")
    raise
  except Exception:
    g_log.exception("Failed!")
    raise
コード例 #4
0
def runServer():
  # Get the current list of custom metrics
  appConfig = Config("application.conf",
                     os.environ.get("APPLICATION_CONFIG_PATH"))

  engine = repository.engineFactory(appConfig)
  global gCustomMetrics
  now = datetime.datetime.utcnow()

  with engine.connect() as conn:
    gCustomMetrics = dict(
      (m.name, [m, now]) for m in repository.getCustomMetrics(conn))

  queueName = appConfig.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (appConfig.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))
  del appConfig

  metricStreamer = MetricStreamer()
  modelSwapper = ModelSwapperInterface()

  with MessageBusConnector() as bus:
    if not bus.isMessageQeueuePresent(queueName):
      bus.createMessageQueue(mqName=queueName, durable=True)
    LOGGER.info("Waiting for messages. To exit, press CTRL+C")
    with bus.consume(queueName) as consumer:
      messages = []
      messageRxTimes = []
      while True:
        message = consumer.pollOneMessage()
        if message is not None:
          messages.append(message)
          if gProfiling:
            messageRxTimes.append(time.time())

        if message is None or len(messages) >= MAX_MESSAGES_PER_BATCH:
          if messages:
            # Process the batch
            try:
              _handleBatch(engine,
                           messages,
                           messageRxTimes,
                           metricStreamer,
                           modelSwapper)
            except Exception:  # pylint: disable=W0703
              LOGGER.exception("Unknown failure in processing messages.")
              # Make sure that we ack messages when there is an unexpected error
              # to avoid getting hung forever on one bad record.

            # Ack all the messages
            messages[-1].ack(multiple=True)
            # Clear the message buffer
            messages = []
            messageRxTimes = []
          else:
            # Queue is empty, wait before retrying
            time.sleep(POLL_DELAY_SEC)
コード例 #5
0
def runServer():
  # Get the current list of custom metrics
  appConfig = Config("application.conf",
                     os.environ["APPLICATION_CONFIG_PATH"])

  engine = repository.engineFactory(appConfig)
  global gCustomMetrics
  now = datetime.datetime.utcnow()

  with engine.connect() as conn:
    gCustomMetrics = dict(
      (m.name, [m, now]) for m in repository.getCustomMetrics(conn))

  queueName = appConfig.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (appConfig.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))
  del appConfig

  metricStreamer = MetricStreamer()
  modelSwapper = ModelSwapperInterface()

  with MessageBusConnector() as bus:
    if not bus.isMessageQeueuePresent(queueName):
      bus.createMessageQueue(mqName=queueName, durable=True)
    LOGGER.info("Waiting for messages. To exit, press CTRL+C")
    with bus.consume(queueName) as consumer:
      messages = []
      messageRxTimes = []
      while True:
        message = consumer.pollOneMessage()
        if message is not None:
          messages.append(message)
          if gProfiling:
            messageRxTimes.append(time.time())

        if message is None or len(messages) >= MAX_MESSAGES_PER_BATCH:
          if messages:
            # Process the batch
            try:
              _handleBatch(engine,
                           messages,
                           messageRxTimes,
                           metricStreamer,
                           modelSwapper)
            except Exception:  # pylint: disable=W0703
              LOGGER.exception("Unknown failure in processing messages.")
              # Make sure that we ack messages when there is an unexpected error
              # to avoid getting hung forever on one bad record.

            # Ack all the messages
            messages[-1].ack(multiple=True)
            # Clear the message buffer
            messages = []
            messageRxTimes = []
          else:
            # Queue is empty, wait before retrying
            time.sleep(POLL_DELAY_SEC)
コード例 #6
0
ファイル: grok_logging.py プロジェクト: darian19/what
def getStandardLogPrefix():
  """Returns a base prefix for logging containing the YOMP_id and version for
  the current instance of YOMP.
  """
  try:
    config = Config("application.conf", CONF_DIR)
    YOMPID = config.get("usertrack", "YOMP_id")
  except ValueError:
    YOMPID = "N/A"
  return 'YOMPID=%s, VER=%s' % (YOMPID, __version__.__version__)
コード例 #7
0
def getStandardLogPrefix():
  """Returns a base prefix for logging containing the htm_it_id and version for
  the current instance of htm-it.
  """
  try:
    config = Config("application.conf", CONF_DIR)
    htmitID = config.get("usertrack", "htm_it_id")
  except ValueError:
    htmitID = "N/A"
  return 'HTMITID=%s, VER=%s' % (htmitID, __version__.__version__)
def getStandardLogPrefix():
    """Returns a base prefix for logging containing the htm_it_id and version for
  the current instance of htm-it.
  """
    try:
        config = Config("application.conf", CONF_DIR)
        htmitID = config.get("usertrack", "htm_it_id")
    except ValueError:
        htmitID = "N/A"
    return 'HTMITID=%s, VER=%s' % (htmitID, __version__.__version__)
コード例 #9
0
ファイル: grok_logging.py プロジェクト: sergius/numenta-apps
def getStandardLogPrefix():
    """Returns a base prefix for logging containing the grok_id and version for
  the current instance of grok.
  """
    try:
        config = Config("application.conf", CONF_DIR)
        grokID = config.get("usertrack", "grok_id")
    except ValueError:
        grokID = "N/A"
    return 'GROKID=%s, VER=%s' % (grokID, __version__.__version__)
コード例 #10
0
  def start(self):
    assert not self.active

    # Apply the config attribute overrides
    self._osEnvironPatch.start()

    # Perform self-validation
    config = Config(self._configName, self._baseConfigDir)
    for sec, attr, val in self._values:
      # This will raise an exception if the expected attribute isn't defined
      r = config.get(sec, attr)
      assert r == val, (
        "Config override failed; sec=%s, attr=%s, expected value=%r, but got %r"
        % (sec, attr, val, r))

    self.active = True
コード例 #11
0
  def setUp(self):
    self.config = Config("application.conf",
                         os.environ.get("APPLICATION_CONFIG_PATH"))


    self.plaintextPort = self.config.getint("metric_listener", "plaintext_port")

    self.initialLoggingString = "Running result quality test using metric: %s"

    # Subscribe to results broadcast from Anomaly Service

    connParams = amqp.connection.getRabbitmqConnectionParameters()

    def deleteAmqpQueue(queue):
      with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as (
          amqpClient):
        amqpClient.deleteQueue(queue=queue, ifUnused=False, ifEmpty=False)

    self.resultsQueueName = (
      "htmengine.result_quality_test.likelihood_results.%s" %
      (uuid.uuid1().hex,))

    with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as (
        amqpClient):
      amqpClient.declareQueue(self.resultsQueueName)
      self.addCleanup(deleteAmqpQueue, self.resultsQueueName)

      amqpClient.bindQueue(
        queue=self.resultsQueueName,
        exchange=self.config.get("metric_streamer", "results_exchange_name"),
        routingKey="")
コード例 #12
0
    def start(self):
        assert not self.active

        # Apply the config attribute overrides
        self._osEnvironPatch.start()

        # Perform self-validation
        config = Config(self._configName, self._baseConfigDir)
        for sec, attr, val in self._values:
            # This will raise an exception if the expected attribute isn't defined
            r = config.get(sec, attr)
            assert r == val, (
                "Config override failed; sec=%s, attr=%s, expected value=%r, but got %r"
                % (sec, attr, val, r))

        self.active = True
コード例 #13
0
    def getExtendedMsg(cls, msg):
        """ Returns the full message to be included in the log. This method is
    specifically used by the logger.debug(msg), logger.warning(msg), etc. in
    sthe ExtendedLogger class.

    :param msg: The msg to be logged.
    :return: The full log message with added prefix.
    """
        try:
            config = Config("application.conf", CONF_DIR)
            if cls.cached_grok_update_epoch:
                duration = time.time() - cls.cached_grok_update_epoch
            else:
                cls.cached_grok_update_epoch = config.getfloat("usertrack", "grok_update_epoch")
                duration = time.time() - cls.cached_grok_update_epoch
            grokExtendedMsg = "<DUR=%f, %s>%s" % (duration, cls._logPrefix, msg)
        except (ImportError, ValueError):
            grokExtendedMsg = "<DUR=NA, %s>%s" % (cls._logPrefix, msg)
        return grokExtendedMsg
コード例 #14
0
ファイル: __init__.py プロジェクト: yinxinwuzeen/numenta-apps
def loadConfig(options):
    """ Load, and return a Config object given a OptionParser object

  :param object options: Object having `monitorConfPath` attr representing the
    path to a configuration file.
  :returns: Config object
  :rtype: nta.utils.config.Config
  """
    confDir = os.path.dirname(options.monitorConfPath)
    confFileName = os.path.basename(options.monitorConfPath)
    return Config(confFileName, confDir)
コード例 #15
0
  def __init__(self):
    (options, args) = self.parser.parse_args()

    if args:
      self.parser.error("Unexpected positional arguments: {}"
                        .format(repr(args)))

    self.server = xmlrpclib.Server(urljoin(options.serverUrl, "RPC2"))

    confDir = os.path.dirname(options.monitorConfPath)
    confFileName = os.path.basename(options.monitorConfPath)
    config = Config(confFileName, confDir)

    self.emailParams = (
      dict(senderAddress=(
            config.get("S1", "MODELS_MONITOR_EMAIL_SENDER_ADDRESS")),
           recipients=config.get("S1", "MODELS_MONITOR_EMAIL_RECIPIENTS"),
           awsRegion= config.get("S1", "MODELS_MONITOR_EMAIL_AWS_REGION"),
           sesEndpoint=config.get("S1", "MODELS_MONITOR_EMAIL_SES_ENDPOINT"),
           awsAccessKeyId=None,
           awsSecretAccessKey=None))
コード例 #16
0
    def getExtendedMsg(cls, msg):
        """ Returns the full message to be included in the log. This method is
    specifically used by the logger.debug(msg), logger.warning(msg), etc. in
    sthe ExtendedLogger class.

    :param msg: The msg to be logged.
    :return: The full log message with added prefix.
    """
        try:
            config = Config("application.conf", CONF_DIR)
            if cls.cached_grok_update_epoch:
                duration = time.time() - cls.cached_grok_update_epoch
            else:
                cls.cached_grok_update_epoch = (config.getfloat(
                    "usertrack", "grok_update_epoch"))
                duration = time.time() - cls.cached_grok_update_epoch
            grokExtendedMsg = "<DUR=%f, %s>%s" % (duration, cls._logPrefix,
                                                  msg)
        except (ImportError, ValueError):
            grokExtendedMsg = "<DUR=NA, %s>%s" % (cls._logPrefix, msg)
        return grokExtendedMsg
    def createDatasourceAdapter(cls, datasource):
        """ Factory for Datasource adapters

    :param datasource: datasource (e.g., "cloudwatch")

    :returns: DatasourceAdapterIface-based adapter object corresponding to the
      given datasource value
    """
        config = Config("application.conf",
                        os.environ.get("APPLICATION_CONFIG_PATH"))
        return cls._adapterRegistry[datasource](
            repository.engineFactory(config).connect)
コード例 #18
0
    def __init__(self, configName, baseConfigDir, values):
        """
    configName: target configuration; see configName definition in
      nta.utils.config
    values: a sequence of overrides, where each element is a three-tuple:
      (<section name>, <attribute name>, <new value>) and <new value> is a string
    """
        self.active = False
        """ True when applied successfully; False after successfully removed or not
    applied """

        # Save for self-validation after patch
        self._configName = configName
        self._baseConfigDir = baseConfigDir
        self._values = copy.deepcopy(values)

        # Verify that the requested attributes already exist and that override
        # values are strings
        config = Config(configName, baseConfigDir)
        for sec, attr, val in values:
            # This will raise an exception if the expected attribute isn't defined
            config.get(sec, attr)

            # Verify that the override value is a string
            if not isinstance(val, types.StringTypes):
                raise TypeError(
                    "Expected a string as override for %r/%r, but got a "
                    "value of type %s; value=%r" % (
                        sec,
                        attr,
                        type(val),
                        val,
                    ))

        # Create the patch, but don't start it yet
        osEnvironOverrideValues = dict(
            (Config(configName, baseConfigDir)._getEnvVarOverrideName(
                configName, sec, attr), val) for sec, attr, val in values)
        self._osEnvironPatch = patch.dict("os.environ",
                                          values=osEnvironOverrideValues)
コード例 #19
0
def main():
  """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
  try:
    args = _getArgs()
    logging_support.LoggingSupport.initLogging(loggingLevel=args.loggingLevel,
                                               logToFile=True)

    confDir = os.path.dirname(args.monitorConfPath)
    confFileName = os.path.basename(args.monitorConfPath)
    config = Config(confFileName, confDir)

    modelsUrl = config.get("S1", "MODELS_MONITOR_TAURUS_MODELS_URL")
    apiKey = config.get("S1", "MODELS_MONITOR_TAURUS_API_KEY")

    emailParams = dict(senderAddress=config.get("S1", "MODELS_MONITOR_EMAIL_SENDER_ADDRESS"),
                       recipients=config.get("S1", "MODELS_MONITOR_EMAIL_RECIPIENTS"),
                       awsRegion= config.get("S1", "MODELS_MONITOR_EMAIL_AWS_REGION"),
                       sesEndpoint=config.get("S1", "MODELS_MONITOR_EMAIL_SES_ENDPOINT"),
                       awsAccessKeyId=None,
                       awsSecretAccessKey=None)

    dbConf= os.getenv("TAURUS_MONITORS_DB_CONFIG_PATH",
                      "Couldn't read TAURUS_MONITORS_DB_CONFIG_PATH")
    g_logger.info("TAURUS_MONITORS_DB_CONFIG_PATH: %s", dbConf)
    g_logger.info("DB CONF DIR: %s", CONF_DIR)

    if args.testEmail:
      g_logger.info("Sending an email for test purposes.")
      error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                            resourceName=modelsUrl,
                                            message="Test issue",
                                            isTest=True,
                                            params=emailParams)

    # Create a db error flag file if it doesn't already exist
    if not os.path.isfile(_DB_ERROR_FLAG_FILE):
      g_logger.debug("Making DB error flag file")
      with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump({}, fp)

    _connectAndCheckModels(modelsUrl, apiKey, args.requestTimeout, emailParams)
    _clearDatabaseIssue("sqlalchemy.exc.OperationalError")

  except OperationalError:
    g_logger.critical("Failed due to sqlalchemy.exc.OperationalError")
    issue = _getIssueString("sqlalchemy.exc.OperationalError",
                            traceback.format_exc())
    _reportDatabaseIssue("sqlalchemy.exc.OperationalError", modelsUrl, issue,
                         emailParams)
  except Exception:
    # Unexpected Exceptions are reported every time.
    g_logger.critical("%s failed due to unexpected Exception. \n", __name__)
    g_logger.critical("Traceback:\n", exc_info=True)
    issue = _getIssueString("Unexpected Exception", traceback.format_exc())
    error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                          resourceName=modelsUrl,
                                          message=issue,
                                          params=emailParams)
def main():
  logging_support.LoggingSupport().initTool()


  parser = argparse.ArgumentParser(description=__doc__)

  parser.add_argument(
    "--apikey",
    required=True,
    dest="apikey",
    metavar="API_KEY",
    help=("Taurus Engine's REST API key"))

  args = parser.parse_args()

  if not args.apikey:
    msg = "Missing or empty api key"
    g_log.error(msg)
    parser.error(msg)


  conf = taurus_engine.config

  assert conf.has_section("security"), (
    "Section 'security' is not in {}".format(conf))

  assert conf.has_option("security", "apikey"), (
    "security/apikey option is not in {}".format(conf))


  confWriter = Config(configName=conf.configName,
                      baseConfigDir=conf.baseConfigDir,
                      mode=Config.MODE_OVERRIDE_ONLY)

  if not confWriter.has_section("security"):
    confWriter.add_section("security")

  confWriter.set("security", "apikey", args.apikey)

  confWriter.save()

  g_log.info(
    "Override of Taurus Engine REST API key completed successfully via %r",
    confWriter)
コード例 #21
0
def reset():
  """
  Reset the htmengine database; upon successful completion, the necessary schema
  are created, but the tables are not populated
  """
  # Make sure we have the latest version of configuration
  config = Config("application.conf",
                  os.environ.get("APPLICATION_CONFIG_PATH"))
  dbName = config.get("repository", "db")

  resetDatabaseSQL = (
      "DROP DATABASE IF EXISTS %(database)s; "
      "CREATE DATABASE %(database)s;" % {"database": dbName})
  statements = resetDatabaseSQL.split(";")

  engine = getUnaffiliatedEngine(config)
  with engine.connect() as connection:
    for s in statements:
      if s.strip():
        connection.execute(s)

  migrate()
コード例 #22
0
  def __init__(self, configName, baseConfigDir, values):
    """
    configName: target configuration; see configName definition in
      nta.utils.config
    values: a sequence of overrides, where each element is a three-tuple:
      (<section name>, <attribute name>, <new value>) and <new value> is a string
    """
    self.active = False
    """ True when applied successfully; False after successfully removed or not
    applied """

    # Save for self-validation after patch
    self._configName = configName
    self._baseConfigDir = baseConfigDir
    self._values = copy.deepcopy(values)

    # Verify that the requested attributes already exist and that override
    # values are strings
    config = Config(configName, baseConfigDir)
    for sec, attr, val in values:
      # This will raise an exception if the expected attribute isn't defined
      config.get(sec, attr)

      # Verify that the override value is a string
      if not isinstance(val, types.StringTypes):
        raise TypeError("Expected a string as override for %r/%r, but got a "
                        "value of type %s; value=%r"
                        % (sec, attr, type(val), val,))

    # Create the patch, but don't start it yet
    osEnvironOverrideValues = dict(
      (Config(configName, baseConfigDir)._getEnvVarOverrideName(
        configName, sec, attr), val)
      for sec, attr, val in values
    )
    self._osEnvironPatch = patch.dict("os.environ",
                                      values=osEnvironOverrideValues)
コード例 #23
0
def main():
    logging_support.LoggingSupport().initTool()

    try:
        options = _parseArgs()

        host = options["host"]
        port = options["port"]
        isSecure = options["isSecure"]
        suffix = options["suffix"]

        configWriter = Config(config.CONFIG_NAME,
                              config.baseConfigDir,
                              mode=Config.MODE_OVERRIDE_ONLY)

        if not configWriter.has_section("dynamodb"):
            configWriter.add_section("dynamodb")

        def override(option, value):
            assert config.has_option("dynamodb", option), option
            configWriter.set("dynamodb", option, value)

        override("host", host)
        override("port", port)
        override("is_secure", isSecure)
        override("table_name_suffix", suffix)

        configWriter.save()

        g_log.info(
            "Override of dynamodb settings for %s completed successfully",
            configWriter.CONFIG_NAME)

    except SystemExit as e:
        if e.code != 0:
            g_log.exception("Failed!")
        raise
    except Exception:
        g_log.exception("Failed!")
        raise
コード例 #24
0
    def __init__(self):
        (options, args) = self.parser.parse_args()

        if args:
            self.parser.error("Unexpected positional arguments: {}".format(
                repr(args)))

        self.server = xmlrpclib.Server(urljoin(options.serverUrl, "RPC2"))

        confDir = os.path.dirname(options.monitorConfPath)
        confFileName = os.path.basename(options.monitorConfPath)
        config = Config(confFileName, confDir)

        self.emailParams = (dict(
            senderAddress=(config.get("S1",
                                      "MODELS_MONITOR_EMAIL_SENDER_ADDRESS")),
            recipients=config.get("S1", "MODELS_MONITOR_EMAIL_RECIPIENTS"),
            awsRegion=config.get("S1", "MODELS_MONITOR_EMAIL_AWS_REGION"),
            sesEndpoint=config.get("S1", "MODELS_MONITOR_EMAIL_SES_ENDPOINT"),
            awsAccessKeyId=None,
            awsSecretAccessKey=None))
コード例 #25
0
def main():
  logging_support.LoggingSupport().initTool()

  try:
    options = _parseArgs()

    host = options["host"]
    port = options["port"]
    isSecure = options["isSecure"]
    suffix = options["suffix"]

    configWriter =  Config(config.CONFIG_NAME, config.baseConfigDir,
                             mode=Config.MODE_OVERRIDE_ONLY)

    if not configWriter.has_section("dynamodb"):
      configWriter.add_section("dynamodb")

    def override(option, value):
      assert config.has_option("dynamodb", option), option
      configWriter.set("dynamodb", option, value)

    override("host", host)
    override("port", port)
    override("is_secure", isSecure)
    override("table_name_suffix", suffix)

    configWriter.save()

    g_log.info("Override of dynamodb settings for %s completed successfully",
               configWriter.CONFIG_NAME)

  except SystemExit as e:
    if e.code != 0:
      g_log.exception("Failed!")
    raise
  except Exception:
    g_log.exception("Failed!")
    raise
コード例 #26
0
ファイル: __init__.py プロジェクト: sergius/numenta-apps
import os
from pkg_resources import get_distribution

from nta.utils import logging_support_raw
from nta.utils.config import Config

# TODO: TAUR-1209 use __name__ or "taurus.engine"
distribution = get_distribution("taurus")

__version__ = distribution.version  # See setup.py for constant

TAURUS_HOME = distribution.location

logging_support = logging_support_raw
logging_support.setLogDir(
    os.environ.get("APPLICATION_LOG_DIR", os.path.join(TAURUS_HOME, "logs")))

appConfigPath = os.environ.get("APPLICATION_CONFIG_PATH")
if appConfigPath is None:
    raise KeyError(
        "APPLICATION_CONFIG_PATH environment variable must be set for "
        "Taurus")

config = Config("application.conf", appConfigPath)
コード例 #27
0
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Consume anomaly results in near realtime"""

import os

from nta.utils import amqp
from nta.utils.config import Config

from htmengine import htmengineerrno
from htmengine.runtime.anomaly_service import AnomalyService

appConfig = Config("application.conf", os.environ["APPLICATION_CONFIG_PATH"])

modelResultsExchange = appConfig.get("metric_streamer",
                                     "results_exchange_name")
queueName = "skeleton_results"


def declareExchanges(amqpClient):
    """ Declares model results and non-metric data exchanges
  """
    amqpClient.declareExchange(exchange=modelResultsExchange,
                               exchangeType="fanout",
                               durable=True)


def declareQueueAndBindToExchanges(amqpClient):
 def getDefaultPort(cls, protocol):
   if protocol == cls.PLAIN:
     return int((Config("application.conf",
                        os.environ["APPLICATION_CONFIG_PATH"])
                 .get("metric_listener", "plaintext_port")))
   raise ValueError("Unknown protocol %r" % protocol)
# http://numenta.org/licenses/
# ----------------------------------------------------------------------

"""Consume anomaly results in near realtime"""

import os

from nta.utils import amqp
from nta.utils.config import Config

from htmengine import htmengineerrno
from htmengine.runtime.anomaly_service import AnomalyService



appConfig = Config("application.conf", os.environ["APPLICATION_CONFIG_PATH"])

modelResultsExchange = appConfig.get("metric_streamer",
                                     "results_exchange_name")
queueName = "skeleton_results"



def declareExchanges(amqpClient):
  """ Declares model results and non-metric data exchanges
  """
  amqpClient.declareExchange(exchange=modelResultsExchange,
                             exchangeType="fanout",
                             durable=True)

コード例 #30
0
def _parseArgs():
  """
  :returns: dict of arg names and values:
    rmqHost: Host of RabbitMQ management interface
    rmqHost: Port number of RabbitMQ management interface
    rmqUser: RabbitMQ username
    rmqPassword: RabbitMQ password
    rmqQueues: sequence of vhost-qualified RabbitMQ queue names to monitor
      e.g., ["%2f/taurus.metric.custom.data",
             "%2f/taurus.mswapper.results",
             "%2f/taurus.mswapper.scheduler.notification"]
    metricDestHost: Host of metric destination address; None for dry-run
    metricDestPort: Port number of metric destination address
    metricPrefix: prefix for emitted metric names
  """
  usage = (
    "%prog [options]\n\n"
    "Collects statistics from a RabbitMQ server and emits them "
    "as metrics to the destination Grok server.\n"
    "\n"
    "The following metrics are collected and emitted by default, where\n"
    "<prefix> is the value of the --metric-prefix command-line option.\n"
    "\t<prefix>-allq-ready.avg - average number of READY messages in all\n"
    "\t\tqueues.\n"
    "\n"
    "\t<prefix>-q-taurus.metric.custom.data-ready.avg - average number of\n"
    "\t\tREADY messages in htmengine's Metric Storer input queue.\n"
    "\n"
    "\t<prefix>-q-taurus.mswapper.results-ready.avg - average number of READY\n"
    "\t\tmessages in htmengine's Anomaly Service input queue.\n"
    "\n"
    "\t<prefix>-q-taurus.mswapper.scheduler.notification-ready.avg - average\n"
    "\t\tnumber of READY messages in htmengine's Model Scheduler notification\n"
    "\t\tinput queue"
  )

  parser = OptionParser(usage=usage)

  # Get params to use as option defaults
  rmqParams = RabbitmqManagementConnectionParams()

  parser.add_option(
    "--rmq-addr",
    action="store",
    type="string",
    dest="rmqAddr",
    default="%s:%d" % (rmqParams.host, rmqParams.port),
    help=("Address and port host:port of RabbitMQ Management interface "
          "[default: %default]"))

  parser.add_option(
    "--rmq-user",
    action="store",
    type="string",
    dest="rmqUser",
    default=rmqParams.username,
    help="Username for RabbitMQ authentication [default: %default]")

  parser.add_option(
    "--rmq-pass",
    action="store",
    type="string",
    dest="rmqPassword",
    default=rmqParams.password,
    help="Password for RabbitMQ authentication [default: %default]")

  rmqVhost = (rmqParams.vhost if rmqParams.vhost != "/"
              else "%" + rmqParams.vhost.encode("hex"))
  appConfig = Config("application.conf", os.environ.get("APPLICATION_CONFIG_PATH"))
  swapperConfig = ModelSwapperConfig()
  defaultQueues = [
    swapperConfig.get("interface_bus", "results_queue"),
    swapperConfig.get("interface_bus", "scheduler_notification_queue"),
    appConfig.get("metric_listener", "queue_name")
  ]
  defaultQueues = ["%s/%s" % (rmqVhost, q) for q in defaultQueues]

  parser.add_option(
    "--rmq-queues",
    action="store",
    type="string",
    dest="rmqQueues",
    default=",".join(defaultQueues),
    help=("RabbitMQ message queues to monitor; comma-separated, "
          "vhost-qualified; [default: %default]"))

  parser.add_option(
      "--dryrun",
      action="store_true",
      default=False,
      dest="dryRun",
      help=("Use this flag to do a dry run: retrieve data and log it; mutually "
            "exclusive with --metric-addr"))

  parser.add_option(
    "--metric-addr",
    action="store",
    type="string",
    dest="metricDestAddr",
    help=("Destination address for metrics as host:port; typically address of "
          "Grok's custom metrics listener; Grok's default metric listener port "
          "is 2003"))

  parser.add_option(
    "--metric-prefix",
    action="store",
    type="string",
    dest="metricPrefix",
    help="Prefix for metric names")

  options, remainingArgs = parser.parse_args()
  if remainingArgs:
    msg = "Unexpected remaining args: %r" % (remainingArgs,)
    g_log.error(msg)
    parser.error(msg)


  if not options.rmqAddr:
    msg = "Missing address of RabbitMQ server"
    g_log.error(msg)
    parser.error(msg)

  rmqHost, _, rmqPort = options.rmqAddr.rpartition(":")
  if not rmqHost:
    msg = "Missing Hostname or IP address of RabbitMQ management interface."
    g_log.error(msg)
    parser.error(msg)

  if not rmqPort:
    msg = "Missing port number of RabbitMQ management interface."
    g_log.error(msg)
    parser.error(msg)

  try:
    rmqPort = int(rmqPort)
  except ValueError:
    msg = ("RabbitMQ Management Interface port must be an integer, but got %r"
           % (metricDestPort,))
    g_log.exception(msg)
    parser.error(msg)

  if not options.rmqUser:
    msg = "Missing RabbitMQ user name."
    g_log.error(msg)
    parser.error(msg)

  if not options.rmqPassword:
    msg = "Missing RabbitMQ password."
    g_log.error(msg)
    parser.error(msg)

  if not options.rmqQueues:
    msg = "Missing vhost-qualified message queue names"
    g_log.error(msg)
    parser.error(msg)

  rmqQueues = options.rmqQueues.split(",")

  if options.dryRun:
    if options.metricDestAddr:
      msg = "--dryrun is mutually exclusive with --metric-addr"
      g_log.error(msg)
      parser.error(msg)

    metricDestHost = metricDestPort = None
  else:
    if not options.metricDestAddr:
      msg = "Missing address of metric destination server"
      g_log.error(msg)
      parser.error(msg)

    metricDestHost, _, metricDestPort = options.metricDestAddr.rpartition(":")
    if not metricDestHost:
      msg = "Missing Hostname or IP address of metric destination server."
      g_log.error(msg)
      parser.error(msg)

    if not metricDestPort:
      msg = "Missing port number of metric destination server."
      g_log.error(msg)
      parser.error(msg)

    try:
      metricDestPort = int(metricDestPort)
    except ValueError:
      msg = "Metric destination port must be an integer, but got %r" % (
        metricDestPort,)
      g_log.exception(msg)
      parser.error(msg)

  options.metricPrefix = (options.metricPrefix.strip()
                          if options.metricPrefix is not None else None)
  if not options.metricPrefix:
    msg = "Missing or empty metric name prefix"
    g_log.error(msg)
    parser.error(msg)


  return dict(
    rmqHost=rmqHost,
    rmqPort=rmqPort,
    rmqUser=options.rmqUser,
    rmqPassword=options.rmqPassword,
    rmqQueues=rmqQueues,
    metricDestHost=metricDestHost,
    metricDestPort=metricDestPort,
    metricPrefix=options.metricPrefix
  )
コード例 #31
0
def main():
    """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
    try:
        args = _getArgs()
        logging_support.LoggingSupport.initLogging(
            loggingLevel=args.loggingLevel, logToFile=True)

        confDir = os.path.dirname(args.monitorConfPath)
        confFileName = os.path.basename(args.monitorConfPath)
        config = Config(confFileName, confDir)

        monitoredResource = config.get("S1", "MONITORED_RESOURCE")
        monitoredResourceNoPwd = (
            monitoredResource.split(":")[0] + ":" +
            monitoredResource.split(":")[1] + ":***@" +
            monitoredResource.split(":")[2].split("@")[1])

        emailParams = dict(senderAddress=config.get("S1",
                                                    "EMAIL_SENDER_ADDRESS"),
                           recipients=config.get("S1", "EMAIL_RECIPIENTS"),
                           awsRegion=config.get("S1", "EMAIL_AWS_REGION"),
                           sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
                           awsAccessKeyId=None,
                           awsSecretAccessKey=None)

        if args.testEmail:
            g_logger.info("Sending an email for test purposes.")
            error_reporting.sendMonitorErrorEmail(
                monitorName=_MONITOR_NAME,
                resourceName=monitoredResourceNoPwd,
                message="Test issue",
                isTest=True,
                params=emailParams)

        # Create a db error flag file if one doesn't already exist
        if not os.path.isfile(_DB_ERROR_FLAG_FILE):
            g_logger.debug("Creating the database error flag file.")
            with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
                json.dump({}, fp)

        # Perform the primary check of metric_data table order
        g_logger.debug("Connecting to resource: %s", monitoredResourceNoPwd)
        engine = sqlalchemy.create_engine(monitoredResource)
        connection = engine.connect()
        metrics = _getOutOfOrderMetrics(connection, _SQL_QUERY)
        _reportMetrics(monitoredResourceNoPwd, metrics, emailParams)

        # If previous method does not throw exception, then we come here and clear
        # the database issue flag
        _clearDatabaseIssue(_FLAG_DATABASE_ISSUE)

    except OperationalError:
        # If database connection fails, report issue
        g_logger.critical("Failed due to " + _FLAG_DATABASE_ISSUE)
        _reportDatabaseIssue(_FLAG_DATABASE_ISSUE, monitoredResourceNoPwd,
                             traceback.format_exc(), emailParams)
    except Exception:
        # If any unexpected exception occurs, try to send an email with traceback
        g_logger.critical("%s failed due to unexpected Exception. \n",
                          traceback.format_exc())
        error_reporting.sendMonitorErrorEmail(
            monitorName=_MONITOR_NAME,
            resourceName=monitoredResourceNoPwd,
            message=traceback.format_exc(),
            params=emailParams)
コード例 #32
0
def main():
  """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
  try:
    args = _getArgs()
    logging_support.LoggingSupport.initLogging(loggingLevel=args.loggingLevel,
                                               console=args.loggingConsole,
                                               logToFile=True)

    confDir = os.path.dirname(args.monitorConfPath)
    confFileName = os.path.basename(args.monitorConfPath)
    config = Config(confFileName, confDir)

    monitoredResource = config.get("S1", "MONITORED_RESOURCE")
    monitoredResourceNoPwd = (monitoredResource.split(":")[0] + ":" +
                              monitoredResource.split(":")[1] + ":***@" +
                              monitoredResource.split(":")[2].split("@")[1])

    emailParams = dict(senderAddress=config.get("S1", "EMAIL_SENDER_ADDRESS"),
                       recipients=config.get("S1", "EMAIL_RECIPIENTS"),
                       awsRegion= config.get("S1", "EMAIL_AWS_REGION"),
                       sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
                       awsAccessKeyId=None,
                       awsSecretAccessKey=None)

    if args.testEmail:
      g_logger.info("Sending an email for test purposes.")
      error_reporting.sendMonitorErrorEmail(
          monitorName=_MONITOR_NAME,
          resourceName=monitoredResourceNoPwd,
          message="Test issue",
          isTest=True,
          params=emailParams)

    # Create a db error flag file if one doesn't already exist
    if not os.path.isfile(_DB_ERROR_FLAG_FILE):
      g_logger.debug("Creating the database error flag file.")
      with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
        json.dump({}, fp)

    # Perform the primary check of metric_data table order
    g_logger.debug("Connecting to resource: %s", monitoredResourceNoPwd)
    engine = sqlalchemy.create_engine(monitoredResource)
    connection = engine.connect()
    metrics = _getOutOfOrderMetrics(connection, _SQL_QUERY)
    _reportMetrics(monitoredResourceNoPwd, metrics, emailParams)

    # If previous method does not throw exception, then we come here and clear
    # the database issue flag
    _clearDatabaseIssue(_FLAG_DATABASE_ISSUE)

  except OperationalError:
    # If database connection fails, report issue
    g_logger.critical("Failed due to " + _FLAG_DATABASE_ISSUE)
    _reportDatabaseIssue(_FLAG_DATABASE_ISSUE,
                         monitoredResourceNoPwd,
                         traceback.format_exc(),
                         emailParams)
  except Exception:
    # If any unexpected exception occurs, try to send an email with traceback
    g_logger.critical("%s failed due to unexpected Exception. \n",
                      traceback.format_exc())
    error_reporting.sendMonitorErrorEmail(
        monitorName=_MONITOR_NAME,
        resourceName=monitoredResourceNoPwd,
        message=traceback.format_exc(),
        params=emailParams)
コード例 #33
0
class ResultQualityTests(test_case_base.TestCaseBase):
  """
  Tests the output of htmengine with known data.
  """


  def setUp(self):
    self.config = Config("application.conf",
                         os.environ.get("APPLICATION_CONFIG_PATH"))


    self.plaintextPort = self.config.getint("metric_listener", "plaintext_port")

    self.initialLoggingString = "Running result quality test using metric: %s"

    # Subscribe to results broadcast from Anomaly Service

    connParams = amqp.connection.getRabbitmqConnectionParameters()

    def deleteAmqpQueue(queue):
      with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as (
          amqpClient):
        amqpClient.deleteQueue(queue=queue, ifUnused=False, ifEmpty=False)

    self.resultsQueueName = (
      "htmengine.result_quality_test.likelihood_results.%s" %
      (uuid.uuid1().hex,))

    with amqp.synchronous_amqp_client.SynchronousAmqpClient(connParams) as (
        amqpClient):
      amqpClient.declareQueue(self.resultsQueueName)
      self.addCleanup(deleteAmqpQueue, self.resultsQueueName)

      amqpClient.bindQueue(
        queue=self.resultsQueueName,
        exchange=self.config.get("metric_streamer", "results_exchange_name"),
        routingKey="")


  def testIIOData(self):
    """
    Tests for expected result quality from IIO data

    1246 rows
    """

    dataIdentifier = "IIO"
    knownDataFile = "iio_us-east-1_i-a2eb1cd9_NetworkIn.csv"
    expectedResults = {"fn": 5,
                       "fp": 10,
                       "tn": 885,
                       "tp": 0,
                       "quality": -550}

    results1 = self._runQualityTest(dataIdentifier, knownDataFile,
                                    expectedResults)

    # Run it one more time and make sure results are consistent
    results2 = self._runQualityTest(dataIdentifier, knownDataFile,
                                    expectedResults)
    self.fastCheckSequenceEqual(results1, results2)


  def testRNSData(self):
    """
    Tests for expected result quality from RNS data.

    4030 rows
    """

    dataIdentifier = "RNS"
    knownDataFile = "rns_Backend_InstanceId=i-57009237_DiskWriteOps.csv"
    expectedResults = {"fn": 3,
                       "fp": 8,
                       "tn": 3683,
                       "tp": 2,
                       "quality": -230}

    self._runQualityTest(dataIdentifier, knownDataFile, expectedResults)


  def testRPMBuildData(self):
    """
    RPM Build Caught Anomaly - Network In

    7428 rows
    """
    dataIdentifier = "RPM"
    knownDataFile = "rpmbuild_realanomaly_networkIn.csv"
    expectedResults = {"fn": 48,
                       "fp": 58,
                       "tn": 7130,
                       "tp": 141,
                       "quality": 10720}

    self._runQualityTest(dataIdentifier, knownDataFile, expectedResults)


  def _genUniqueMetricName(self, datasetName):
    """
    Adds the initial logging message for each test
    """
    runIdentifier = str(time.time())
    metricName = "test.metric.%s.%s" % (datasetName, runIdentifier)

    return metricName


  def _getPathToData(self, filename):
    """
    Returns the absolute path to a file that lives in the relative data/
    directory.
    """
    basePath = os.path.split(os.path.abspath(__file__))[0]
    dataDirPath = os.path.join(basePath, 'data')

    knownDataFilePath = os.path.join(dataDirPath, filename)

    return knownDataFilePath


  @classmethod
  def _loadDataGen(cls, filePath):
    """ Yields (dttm, value, label) three-tuples from the given dataset

    :param filePath: The csv with data
    """
    with open(filePath, "r") as fh:
      reader = csv.reader(fh)

      # Skip headers
      next(reader)
      next(reader)
      next(reader)

      for (dttm, value, label) in reader:
        yield (dttm, value, label)


  def _loadAndSendData(self, sock, filePath, metricName):
    """
    Returns the list of labels from the csv at filePath. Date and value
    fields are sent to the metric specified. As a side effect this
    creates the metric.

    :param sock: A connected socket object
    :param filePath: The csv with data to handle
    :param metricName: The target custom metric we will send data to
    """
    labels = []
    for (dttm, value, label) in self._loadDataGen(filePath):
      # Parse date string
      dttm = parsedate(dttm)
      # Convert to seconds since epoch (Graphite wants this)
      dttm = epochFromNaiveUTCDatetime(dttm)
      dttm = int(dttm)

      # Add data
      sock.sendall("%s %r %s\n" % (metricName, float(value), dttm))

      # Save the label for use later
      # Convert strings to appropriate numerical type
      try:
        labels.append(int(label))
      except ValueError:
        labels.append(float(label))

    self.gracefullyCloseSocket(sock)

    return labels


  def _getSocketConnection(self):
    """
    Returns a socket connected to localhost. This is a small abstraction in case
    this changes in the future.
    """

    # Create our socket connection to the metric listener
    sock = socket.socket()
    sock.connect(("localhost", self.plaintextPort))

    return sock


  def _reapAnomalyServiceResults(self, metricId, numRowsExpected):
    """ Retrieve likelihood results from our AMQP message queue that is bound to
    Anomaly Service's results fanout exchange

    NOTE that Anomaly Service fans out all results for all models via "fanout"
    exchange, so our queue might contain results from additional models, which
    we filter out.

    :param metricId: unique id of our metric/model
    :param numRowsExpected: number of result rows expected by caller

    :returns: a sequence of dicts conforming to the schema of the results items
      per model_inference_results_msg_schema.json
    """
    rows = []

    @test_case_base.retry(duration=30)
    def getBatch(amqpClient):
      message = amqpClient.getOneMessage(self.resultsQueueName, noAck=False)

      try:
        self.assertIsNotNone(message)
      except AssertionError:
        LOGGER.info("Got %d rows so far, waiting for %d more",
                    len(rows), numRowsExpected - len(rows))
        raise

      return message


    amqp.connection.getRabbitmqConnectionParameters()
    with amqp.synchronous_amqp_client.SynchronousAmqpClient(
        amqp.connection.getRabbitmqConnectionParameters()) as amqpClient:

      lastMessage = None

      while len(rows) < numRowsExpected:
        message = getBatch(amqpClient)

        lastMessage = message
        batch = AnomalyService.deserializeModelResult(message.body)

        dataType = (message.properties.headers.get("dataType")
                    if message.properties.headers else None)

        if dataType:
          continue # Not a model inference result

        # batch is a dict compliant with model_inference_results_msg_schema.json

        if batch["metric"]["uid"] != metricId:
          # Another model's result
          continue

        # Extract data rows; each row is a dict from the "results" attribute per
        # model_inference_results_msg_schema.json
        rows.extend(batch["results"])


      lastMessage.ack(multiple=True)

    return rows


  def _verifyResults(self,
                     uid,
                     metricName,
                     knownDataFilePath,
                     labels,
                     expectedResults,
                     boundingRange = .1):
    """
    Waits for the model to complete running through the data up to lastRowId
    and then computes a confusion matrix over all the results and
    compares those values to the pre-calculated expectedResults.

    :param uid: The uid of the metric / model
    :param metricName: name of custom metric
    :param lastRowId: The count of rows in the data / expected count of results
    :param labels: The ground truth labels for each row of data.
    :param expectedResults: A dict containing pre-computed confusion matrix
                            results
    :param boundingRange: The allowable deviation from the values in
                          ``expectedResults``. Exceeding this range will fail
                          the test

    :returns: A sequence of results from the model; each result is a sequence of
      <Date string>, <Value>, <Likelihood Score>,  <Record Number>
    """

    lastRowId = len(labels)
    data = self.getModelResults(uid, lastRowId)

    # We get this back from the API backwards
    data.reverse()

    with open("results.%s.csv" % (metricName,), "w") as fh:
      writer = csv.writer(fh)
      writer.writerow(("timestamp", "metric_value", "likelihood", "rowid"))
      writer.writerows(data)

    # Retrieve results broadcast by Anomaly Service and compare against known
    # data
    anomalyServiceResults = self._reapAnomalyServiceResults(
      metricId=uid,
      numRowsExpected=len(labels))

    # Fix up timestamps for compatibility with our known data and for saving to
    # csv file
    for row in anomalyServiceResults:
      row["ts"] = (datetime.datetime.utcfromtimestamp(row["ts"])
                   .strftime("%Y-%m-%d %H:%M:%S"))

    # Write out the results for debugging
    with open("anomsvc.%s.csv" % (metricName,), "w") as fh:
      writer = csv.writer(fh)
      attributes = sorted(anomalyServiceResults[0].keys())

      # Write the header row
      writer.writerow(attributes)

      # Write the data rows
      for row in anomalyServiceResults:
        fields = tuple(row[attr] for attr in attributes)
        writer.writerow(fields)

    # Compare timestamp and value sequence in results against known data
    knownData = tuple((ts, float(value))
                      for ts, value, _label
                      in self._loadDataGen(knownDataFilePath))
    engineData = tuple((ts.strftime("%Y-%m-%d %H:%M:%S"), value)
                       for ts, value, _, _ in data)
    self.fastCheckSequenceEqual(engineData, knownData)

    # Compare data from htmengine with AMQP-dispatched data from Anomaly
    # Service
    places = 9
    dataFromAnomalyService = tuple(
      (row["ts"], row["value"],
       round(row["anomaly"], places), row["rowid"])
      for row in anomalyServiceResults)
    engineData = tuple(
      (ts.strftime("%Y-%m-%d %H:%M:%S"), value, round(score, places), rowid)
      for ts, value, score, rowid in data)
    self.fastCheckSequenceEqual(engineData, dataFromAnomalyService)

    # Compute the confusion matrix
    cMatrix = genConfusionMatrix(labels, data)

    def formatMessage(statusMessage):
      substitutions = dict(expectedResults)
      substitutions.update({
        "statusMessage": statusMessage,
        "boundingRange": boundingRange,
        "afn": cMatrix.fn,
        "afp": cMatrix.fp,
        "atn": cMatrix.tn,
        "atp": cMatrix.tp,
        "aquality": cMatrix.quality})
      message = ("%(statusMessage)s\n"
                 "Expected:\n"
                 "    False negatives: %(fn)i\n"
                 "    False positives: %(fp)i\n"
                 "    True negatives:  %(tn)i\n"
                 "    True positives:  %(tp)i\n"
                 "    Quality Score:   %(quality)i\n"
                 "Actual:\n"
                 "    False negatives: %(afn)i\n"
                 "    False positives: %(afp)i\n"
                 "    True negatives:  %(atn)i\n"
                 "    True positives:  %(atp)i\n"
                 "    Quality Score:   %(aquality)i\n") % substitutions
      return message

    for (key, value) in expectedResults.iteritems():
      actual = getattr(cMatrix, key)
      spread = value * boundingRange
      # We don't call these 'upper' and 'lower' bounds because if value is
      # negative it reverses the expected order of inequality
      boundA = value - spread
      boundB = value + spread

      failMessage = formatMessage(
        "Change in %s - %.2f boundary violation." %
        (key, boundingRange))

      self.assertTrue((boundA <= actual <= boundB) or
                      (boundB <= actual <= boundA),
                      failMessage)

    passingMessage = formatMessage("%s Passed with %.2f boundary." %
                                   (self, boundingRange,))
    LOGGER.info(passingMessage)

    return data


  def _runQualityTest(self, dataIdentifier, knownDataFile, expectedResults):
    """
    Runs the data from knownDataFile (a csv) through htmengine and verifies
    htmengine returns the expected values in terms of a confusion matrix dict
    expectedResults.

    :param dataIdentifier: A string to identify this data in logs
    :param knownDataFile: A csv filename that exist in local data/ dir
    :param expectedResults: The confusion matrix and quality score we expect
                            out of htmengine.
    :type expectedResults: dict

    :returns: A sequence of results from the model; each result is a sequence of
      <Date string>, <Value>, <Likelihood Score>,  <Record Number>
    """

    metricName = self._genUniqueMetricName(dataIdentifier)

    self.addCleanup(self._deleteMetric, metricName)

    LOGGER.info(self.initialLoggingString, metricName)

    # Get path to data
    knownDataFilePath = self._getPathToData(knownDataFile)

    # Load and send data
    sock = self._getSocketConnection()
    LOGGER.info("Sending data from %s ...", knownDataFile)
    labels = self._loadAndSendData(sock, knownDataFilePath, metricName)

    # Make sure the metric was properly created and wait for the expected
    # records to be stored. NOTE: Waiting for all records to be stored
    # facilitates constistent stats calculation in htmengine, resulting in
    # consistency of results from one run of the test to the next.
    uid = self.checkMetricCreated(metricName, numRecords=len(labels))

    # Save the uid for later
    LOGGER.info("Metric %s has uid: %s", metricName, uid)

    # Send model creation request
    nativeMetric = {"datasource": "custom",
                    "metricSpec": {"uid": uid}}

    model = self._createModel(nativeMetric)
    self.assertEqual(model.uid, uid)
    self.assertEqual(model.name, metricName)
    self.assertEqual(model.server, metricName)

    return self._verifyResults(uid, metricName, knownDataFilePath, labels,
                               expectedResults)
コード例 #34
0
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Simple demonstration sending CPU percent samples to htmengine"""

import json
import os
import sys
import time

import psutil

from nta.utils import message_bus_connector
from nta.utils.config import Config

appConfig = Config("application.conf", os.environ["APPLICATION_CONFIG_PATH"])
MESSAGE_QUEUE_NAME = appConfig.get("metric_listener", "queue_name")


def sendSample(bus, metricName, value, epochTimestamp):
    singleDataPoint = "%s %r %d" % (metricName, float(value), epochTimestamp)
    msg = json.dumps(dict(protocol="plain", data=[singleDataPoint]))
    bus.publish(mqName=MESSAGE_QUEUE_NAME, body=msg, persistent=True)


if __name__ == "__main__":
    bus = message_bus_connector.MessageBusConnector()
    metricName = "cpu_percent"

    print "Sending CPU percent samples to `%s`..." % metricName
コード例 #35
0
from htmengine import repository
from htmengine.repository import schema

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
CONFIG = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(CONFIG.config_file_name)

# Used for autogenerating migrations.
TARGET_METADATA = schema.metadata

appConfig = Config("application.conf", os.environ["APPLICATION_CONFIG_PATH"])

def runMigrationsOffline():
  """Run migrations in 'offline' mode.

  See Alembic documentation for more details on these functions.

  This configures the context with just a URL
  and not an Engine, though an Engine is acceptable
  here as well.  By skipping the Engine creation
  we don't even need a DBAPI to be available.

  Calls to context.execute() here emit the given string to the
  script output.
  """
  context.configure(url=repository.getDbDSN(appConfig),
コード例 #36
0
def main():
    """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
    try:
        args = _getArgs()
        logging_support.LoggingSupport.initLogging(
            loggingLevel=args.loggingLevel, logToFile=True)

        confDir = os.path.dirname(args.monitorConfPath)
        confFileName = os.path.basename(args.monitorConfPath)
        config = Config(confFileName, confDir)

        modelsUrl = config.get("S1", "TAURUS_MODELS_URL")
        apiKey = config.get("S1", "TAURUS_API_KEY")

        emailParams = dict(
            senderAddress=config.get("S1", "EMAIL_SENDER_ADDRESS"),
            recipients=config.get("S1", "EMAIL_RECIPIENTS"),
            awsRegion=config.get("S1", "EMAIL_AWS_REGION"),
            sesEndpoint=config.get("S1", "EMAIL_SES_ENDPOINT"),
            awsAccessKeyId=config.get("S1", "EMAIL_SES_AWS_ACCESS_KEY_ID"),
            awsSecretAccessKey=config.get("S1",
                                          "EMAIL_SES_AWS_SECRET_ACCESS_KEY"))

        dbConf = os.getenv("TAURUS_MONITORS_DB_CONFIG_PATH",
                           "Couldn't read TAURUS_MONITORS_DB_CONFIG_PATH")
        g_logger.info("TAURUS_MONITORS_DB_CONFIG_PATH: %s", dbConf)
        g_logger.info("DB CONF DIR: %s", CONF_DIR)

        if args.testEmail:
            g_logger.info("Sending an email for test purposes.")
            error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                                  resourceName=modelsUrl,
                                                  message="Test issue",
                                                  isTest=True,
                                                  params=emailParams)

        # Create a db error flag file if it doesn't already exist
        if not os.path.isfile(_DB_ERROR_FLAG_FILE):
            g_logger.debug("Making DB error flag file")
            with open(_DB_ERROR_FLAG_FILE, "wb") as fp:
                json.dump({}, fp)

        _connectAndCheckModels(modelsUrl, apiKey, args.requestTimeout,
                               emailParams)
        _clearDatabaseIssue("sqlalchemy.exc.OperationalError")

    except OperationalError:
        g_logger.critical("Failed due to sqlalchemy.exc.OperationalError")
        issue = _getIssueString("sqlalchemy.exc.OperationalError",
                                traceback.format_exc())
        _reportDatabaseIssue("sqlalchemy.exc.OperationalError", modelsUrl,
                             issue, emailParams)
    except Exception:
        # Unexpected Exceptions are reported every time.
        g_logger.critical("%s failed due to unexpected Exception. \n",
                          __name__)
        g_logger.critical("Traceback:\n", exc_info=True)
        issue = _getIssueString("Unexpected Exception", traceback.format_exc())
        error_reporting.sendMonitorErrorEmail(monitorName=_MONITOR_NAME,
                                              resourceName=modelsUrl,
                                              message=issue,
                                              params=emailParams)
コード例 #37
0
"""Simple demonstration sending CPU percent samples to htmengine"""

import json
import os
import sys
import time

import psutil

from nta.utils import message_bus_connector
from nta.utils.config import Config



appConfig = Config("application.conf", os.environ["APPLICATION_CONFIG_PATH"])
MESSAGE_QUEUE_NAME = appConfig.get("metric_listener", "queue_name")



def sendSample(bus, metricName, value, epochTimestamp):
  singleDataPoint = "%s %r %d" % (metricName, float(value), epochTimestamp)
  msg = json.dumps(dict(protocol="plain", data=[singleDataPoint]))
  bus.publish(mqName=MESSAGE_QUEUE_NAME, body=msg, persistent=True)


if __name__ == "__main__":
  bus = message_bus_connector.MessageBusConnector()
  metricName = "cpu_percent"

  print "Sending CPU percent samples to `%s`..." % metricName
def _parseArgs():
  """
  :returns: dict of arg names and values:
    rmqHost: Host of RabbitMQ management interface
    rmqHost: Port number of RabbitMQ management interface
    rmqUser: RabbitMQ username
    rmqPassword: RabbitMQ password
    rmqQueues: sequence of vhost-qualified RabbitMQ queue names to monitor
      e.g., ["%2f/taurus.metric.custom.data",
             "%2f/taurus.mswapper.results",
             "%2f/taurus.mswapper.scheduler.notification"]
    metricDestHost: Host of metric destination address; None for dry-run
    metricDestPort: Port number of metric destination address
    metricPrefix: prefix for emitted metric names
  """
  usage = (
    "%prog [options]\n\n"
    "Collects statistics from a RabbitMQ server and emits them "
    "as metrics to the destination htmengine app server.\n"
    "\n"
    "The following metrics are collected and emitted by default, where\n"
    "<prefix> is the value of the --metric-prefix command-line option.\n"
    "\t<prefix>-allq-ready.avg - average number of READY messages in all\n"
    "\t\tqueues.\n"
    "\n"
    "\t<prefix>-q-taurus.metric.custom.data-ready.avg - average number of\n"
    "\t\tREADY messages in htmengine's Metric Storer input queue.\n"
    "\n"
    "\t<prefix>-q-taurus.mswapper.results-ready.avg - average number of READY\n"
    "\t\tmessages in htmengine's Anomaly Service input queue.\n"
    "\n"
    "\t<prefix>-q-taurus.mswapper.scheduler.notification-ready.avg - average\n"
    "\t\tnumber of READY messages in htmengine's Model Scheduler notification\n"
    "\t\tinput queue"
  )

  parser = OptionParser(usage=usage)

  # Get params to use as option defaults
  rmqParams = amqp.connection.RabbitmqManagementConnectionParams()

  parser.add_option(
    "--rmq-addr",
    action="store",
    type="string",
    dest="rmqAddr",
    default="%s:%d" % (rmqParams.host, rmqParams.port),
    help=("Address and port host:port of RabbitMQ Management interface "
          "[default: %default]"))

  parser.add_option(
    "--rmq-user",
    action="store",
    type="string",
    dest="rmqUser",
    default=rmqParams.username,
    help="Username for RabbitMQ authentication [default: %default]")

  parser.add_option(
    "--rmq-pass",
    action="store",
    type="string",
    dest="rmqPassword",
    default=rmqParams.password,
    help="Password for RabbitMQ authentication [default: %default]")

  rmqVhost = (rmqParams.vhost if rmqParams.vhost != "/"
              else "%" + rmqParams.vhost.encode("hex"))
  appConfig = Config("application.conf", os.environ.get("APPLICATION_CONFIG_PATH"))
  swapperConfig = ModelSwapperConfig()
  defaultQueues = [
    swapperConfig.get("interface_bus", "results_queue"),
    swapperConfig.get("interface_bus", "scheduler_notification_queue"),
    appConfig.get("metric_listener", "queue_name")
  ]
  defaultQueues = ["%s/%s" % (rmqVhost, q) for q in defaultQueues]

  parser.add_option(
    "--rmq-queues",
    action="store",
    type="string",
    dest="rmqQueues",
    default=",".join(defaultQueues),
    help=("RabbitMQ message queues to monitor; comma-separated, "
          "vhost-qualified; [default: %default]"))

  parser.add_option(
      "--dryrun",
      action="store_true",
      default=False,
      dest="dryRun",
      help=("Use this flag to do a dry run: retrieve data and log it; mutually "
            "exclusive with --metric-addr"))

  parser.add_option(
    "--metric-addr",
    action="store",
    type="string",
    dest="metricDestAddr",
    help=("Destination address for metrics as host:port; typically address of "
          "htmengine custom metrics listener; htmengine default metric "
          "listener port is 2003"))

  parser.add_option(
    "--metric-prefix",
    action="store",
    type="string",
    dest="metricPrefix",
    help="Prefix for metric names")

  options, remainingArgs = parser.parse_args()
  if remainingArgs:
    msg = "Unexpected remaining args: %r" % (remainingArgs,)
    g_log.error(msg)
    parser.error(msg)


  if not options.rmqAddr:
    msg = "Missing address of RabbitMQ server"
    g_log.error(msg)
    parser.error(msg)

  rmqHost, _, rmqPort = options.rmqAddr.rpartition(":")
  if not rmqHost:
    msg = "Missing Hostname or IP address of RabbitMQ management interface."
    g_log.error(msg)
    parser.error(msg)

  if not rmqPort:
    msg = "Missing port number of RabbitMQ management interface."
    g_log.error(msg)
    parser.error(msg)

  try:
    rmqPort = int(rmqPort)
  except ValueError:
    msg = ("RabbitMQ Management Interface port must be an integer, but got %r"
           % (metricDestPort,))
    g_log.exception(msg)
    parser.error(msg)

  if not options.rmqUser:
    msg = "Missing RabbitMQ user name."
    g_log.error(msg)
    parser.error(msg)

  if not options.rmqPassword:
    msg = "Missing RabbitMQ password."
    g_log.error(msg)
    parser.error(msg)

  if not options.rmqQueues:
    msg = "Missing vhost-qualified message queue names"
    g_log.error(msg)
    parser.error(msg)

  rmqQueues = options.rmqQueues.split(",")

  if options.dryRun:
    if options.metricDestAddr:
      msg = "--dryrun is mutually exclusive with --metric-addr"
      g_log.error(msg)
      parser.error(msg)

    metricDestHost = metricDestPort = None
  else:
    if not options.metricDestAddr:
      msg = "Missing address of metric destination server"
      g_log.error(msg)
      parser.error(msg)

    metricDestHost, _, metricDestPort = options.metricDestAddr.rpartition(":")
    if not metricDestHost:
      msg = "Missing Hostname or IP address of metric destination server."
      g_log.error(msg)
      parser.error(msg)

    if not metricDestPort:
      msg = "Missing port number of metric destination server."
      g_log.error(msg)
      parser.error(msg)

    try:
      metricDestPort = int(metricDestPort)
    except ValueError:
      msg = "Metric destination port must be an integer, but got %r" % (
        metricDestPort,)
      g_log.exception(msg)
      parser.error(msg)

  options.metricPrefix = (options.metricPrefix.strip()
                          if options.metricPrefix is not None else None)
  if not options.metricPrefix:
    msg = "Missing or empty metric name prefix"
    g_log.error(msg)
    parser.error(msg)


  return dict(
    rmqHost=rmqHost,
    rmqPort=rmqPort,
    rmqUser=options.rmqUser,
    rmqPassword=options.rmqPassword,
    rmqQueues=rmqQueues,
    metricDestHost=metricDestHost,
    metricDestPort=metricDestPort,
    metricPrefix=options.metricPrefix
  )