def _waitForFlusherAndGarbageCollect(engineServer, engineApiKey,
                                     flusherMetricName):
    """Wait for the data path flusher metric to be created in Taurus Engine and
  also garbage-collect flushers from this and prior sessions

  :param str engineServer: dns name of ip addres of Taurus API server

  :param str engineApiKey: API Key of Taurus HTM Engine

  :param str flusherMetricName: the unique name of the flusher metric to wait
    on.

  :raises FlusherMetricNotFound: if the wait fails
  """
    flushers = [
        obj["name"]
        for obj in metric_utils.getAllCustomMetrics(engineServer, engineApiKey)
        if obj["name"].startswith(_DATA_PATH_FLUSHER_METRIC_PREFIX)
    ]
    found = flusherMetricName in flushers

    # Delete flushers, including any from past attempts that failed delete
    for metric in flushers:
        g_log.info("Deleting metric data path flusher metric %s", metric)
        metric_utils.deleteMetric(host=engineServer,
                                  apiKey=engineApiKey,
                                  metricName=metric)

    if not found:
        raise FlusherMetricNotFound(
            "Still waiting for data path flusher metric "
            "{metric}".format(metric=flusherMetricName))
def _waitForFlusherAndGarbageCollect(engineServer, engineApiKey, flusherMetricName):
    """Wait for the data path flusher metric to be created in Taurus Engine and
  also garbage-collect flushers from this and prior sessions

  :param str engineServer: dns name of ip addres of Taurus API server

  :param str engineApiKey: API Key of Taurus HTM Engine

  :param str flusherMetricName: the unique name of the flusher metric to wait
    on.

  :raises FlusherMetricNotFound: if the wait fails
  """
    flushers = [
        obj["name"]
        for obj in metric_utils.getAllCustomMetrics(engineServer, engineApiKey)
        if obj["name"].startswith(_DATA_PATH_FLUSHER_METRIC_PREFIX)
    ]
    found = flusherMetricName in flushers

    # Delete flushers, including any from past attempts that failed delete
    for metric in flushers:
        g_log.info("Deleting metric data path flusher metric %s", metric)
        metric_utils.deleteMetric(host=engineServer, apiKey=engineApiKey, metricName=metric)

    if not found:
        raise FlusherMetricNotFound(
            "Still waiting for data path flusher metric " "{metric}".format(metric=flusherMetricName)
        )
def _safeDeleteMetric(host, apiKey, metricName):
    """Delete metric, suppressing metric_utils.MetricNotFound exception, if any
  """
    try:
        metric_utils.deleteMetric(host=host,
                                  apiKey=apiKey,
                                  metricName=metricName)
    except metric_utils.MetricNotFound:
        pass
Пример #4
0
  def testDeleteMetric(self, requestsMock):
    requestsMock.delete.return_value = Mock(status_code=200)

    metric_utils.deleteMetric(host="localhost",
                              apiKey="taurus",
                              metricName="XIGNITE.FOO.VOLUME")
    requestsMock.delete.assert_called_once_with(
      "https://localhost/_metrics/custom/XIGNITE.FOO.VOLUME",
      auth=("taurus", ""), verify=ANY)
def _safeDeleteMetric(host, apiKey, metricName):
  """Delete metric, suppressing metric_utils.MetricNotFound exception, if any
  """
  try:
    metric_utils.deleteMetric(host=host,
                              apiKey=apiKey,
                              metricName=metricName)
  except metric_utils.MetricNotFound:
    pass
Пример #6
0
    def testDeleteMetric(self, requestsMock):
        requestsMock.delete.return_value = Mock(status_code=200)

        metric_utils.deleteMetric(host="localhost",
                                  apiKey="taurus",
                                  metricName="XIGNITE.FOO.VOLUME")
        requestsMock.delete.assert_called_once_with(
            "https://localhost/_metrics/custom/XIGNITE.FOO.VOLUME",
            auth=("taurus", ""),
            verify=ANY)
Пример #7
0
  def testDeleteMetricMetricNotFound(self, requestsMock):
    requestsMock.delete.return_value = Mock(status_code=404,
                                            text="metric not found")

    with self.assertRaises(metric_utils.MetricNotFound) as errorContext:
      metric_utils.deleteMetric(host="localhost",
                                apiKey="taurus",
                                metricName="XIGNITE.FOO.VOLUME")

    requestsMock.delete.assert_called_once_with(
      "https://localhost/_metrics/custom/XIGNITE.FOO.VOLUME",
      auth=("taurus", ""), verify=ANY)

    self.assertEqual(errorContext.exception.args[0], "metric not found")
Пример #8
0
    def testDeleteMetricMetricNotFound(self, requestsMock):
        requestsMock.delete.return_value = Mock(status_code=404,
                                                text="metric not found")

        with self.assertRaises(metric_utils.MetricNotFound) as errorContext:
            metric_utils.deleteMetric(host="localhost",
                                      apiKey="taurus",
                                      metricName="XIGNITE.FOO.VOLUME")

        requestsMock.delete.assert_called_once_with(
            "https://localhost/_metrics/custom/XIGNITE.FOO.VOLUME",
            auth=("taurus", ""),
            verify=ANY)

        self.assertEqual(errorContext.exception.args[0], "metric not found")
def _deleteSymbolMetricsFromEngine(host, apiKey, symbol):
  """Delete metrics corresponding to the given stock symbol from Taurus Engine

  :param host: API server's hostname or IP address
  :param apiKey: API server's API Key
  :param symbol: Stock symbol
  """
  g_log.info("Unmonitoring and deleting existing metrics linked to stock "
             "symbol=%s", symbol)

  # Get matching metrics
  allMetrics = metric_utils.getAllCustomMetrics(host=host, apiKey=apiKey)

  metricsToDelete = tuple(obj["name"]
                          for obj in allMetrics
                          if ".{symbol}.".format(symbol=symbol) in obj["name"])

  g_log.info("Deleteing metrics=%s", metricsToDelete)
  for metricName in metricsToDelete:
    g_log.info("Deleting metric=%s", metricName)
    metric_utils.deleteMetric(host, apiKey, metricName)
Пример #10
0
def _deleteSymbolMetricsFromEngine(host, apiKey, symbol):
  """Delete metrics corresponding to the given stock symbol from Taurus Engine

  :param host: API server's hostname or IP address
  :param apiKey: API server's API Key
  :param symbol: Stock symbol
  """
  g_log.info("Unmonitoring and deleting existing metrics linked to stock "
             "symbol=%s", symbol)

  # Get matching metrics
  allMetrics = metric_utils.getAllCustomMetrics(host=host, apiKey=apiKey)

  metricsToDelete = tuple(obj["name"]
                          for obj in allMetrics
                          if ".{symbol}.".format(symbol=symbol) in obj["name"])

  g_log.info("Deleteing metrics=%s", metricsToDelete)
  for metricName in metricsToDelete:
    g_log.info("Deleting metric=%s", metricName)
    metric_utils.deleteMetric(host, apiKey, metricName)
Пример #11
0
def main():
    logging_support.LoggingSupport.initTool()

    try:
        options = _parseArgs()
        g_log.info("Running %s with options=%r", sys.argv[0], options)

        for metric in metric_utils.getAllCustomMetrics(
                host=options["htmServer"], apiKey=options["apiKey"]):
            if metric["name"] == options["metricName"]:
                metric_utils.deleteMetric(host=options["htmServer"],
                                          apiKey=options["apiKey"],
                                          metricName=metric["name"])

                g_log.info("Deleted metric=%s", metric["name"])
                break

    except SystemExit as e:
        if e.code != 0:
            g_log.exception("delete_metric failed")
        raise
    except Exception:
        g_log.exception("delete_metric failed")
        raise
Пример #12
0
def main():
  logging_support.LoggingSupport.initTool()

  try:
    options = _parseArgs()
    g_log.info("Running %s with options=%r", sys.argv[0], options)

    for metric in metric_utils.getAllCustomMetrics(host=options["htmServer"],
                                                   apiKey=options["apiKey"]):
      if metric["name"] == options["metricName"]:
        metric_utils.deleteMetric(host=options["htmServer"],
                                  apiKey=options["apiKey"],
                                  metricName=metric["name"])

        g_log.info("Deleted metric=%s", metric["name"])
        break

  except SystemExit as e:
    if e.code != 0:
      g_log.exception("delete_metric failed")
    raise
  except Exception:
    g_log.exception("delete_metric failed")
    raise
Пример #13
0
def main():
    """
  NOTE: main also serves as entry point for "console script" generated by setup
  """
    logging_support.LoggingSupport().initTool()

    try:
        options = _parseArgs()

        g_log.info("Verifying that agents are in hot_standby mode")
        for section in config.sections():
            try:
                assert config.get(section, "opmode") == ApplicationConfig.OP_MODE_HOT_STANDBY
            except Exception, e:
                raise

        g_log.info("Verifying that the old symbol has been removed from the " "metrics configuration")
        for stockData in metric_utils.getMetricsConfiguration().itervalues():
            assert stockData["symbol"] != options.old_symbol

        if options.twitter and (not options.stocks):
            g_log.info(
                "Migrating ONLY twitter data from old-symbol=%s " "to new-symbol=%s",
                options.old_symbol,
                options.new_symbol,
            )
        elif options.stocks and (not options.twitter):
            g_log.info(
                "Migrating ONLY xignite stock data from old-symbol=%s " "to new-symbol=%s",
                options.old_symbol,
                options.new_symbol,
            )
            raise NotImplementedError
        else:
            g_log.info(
                "Migrating BOTH twitter and xignite stock data from " "old-symbol=%s to new-symbol=%s",
                options.old_symbol,
                options.new_symbol,
            )
            raise NotImplementedError

        oldSymbolTweetPrefix = "TWITTER.TWEET.HANDLE.{symbol}.".format(symbol=options.old_symbol)
        newSymbolTweetPrefix = "TWITTER.TWEET.HANDLE.{symbol}.".format(symbol=options.new_symbol)
        oldSymbolTweetMetricsList = []

        with collectorsdb.engineFactory().begin() as conn:

            g_log.info("Renaming metrics to new symbol")
            if options.twitter:
                oldSymbolTweetsQuery = sql.select([tweetSamplesSchema]).where(
                    tweetSamplesSchema.c.metric.contains(oldSymbolTweetPrefix)
                )
                oldSymbolTweets = conn.execute(oldSymbolTweetsQuery)
                for tweetSample in oldSymbolTweets:
                    newMetricName = "{newPrefix}{metric}".format(
                        newPrefix=newSymbolTweetPrefix, metric=tweetSample.metric[len(oldSymbolTweetPrefix) :]
                    )
                    if tweetSample.metric not in oldSymbolTweetMetricsList:
                        oldSymbolTweetMetricsList.append(tweetSample.metric)

                    updateSampleQuery = (
                        tweetSamplesSchema.update()
                        .where(tweetSamplesSchema.c.seq == tweetSample.seq)
                        .values(metric=newMetricName)
                    )

                    conn.execute(updateSampleQuery)

            g_log.info("Forwarding new twitter metric data to Taurus engine...")
            if options.twitter:
                oldestRecordTs = conn.execute(
                    sql.select([tweetSamplesSchema.c.agg_ts], order_by=tweetSamplesSchema.c.agg_ts.asc())
                ).first()[0]
                lastEmittedAggTime = metric_utils.establishLastEmittedSampleDatetime(
                    key=_EMITTED_TWEET_VOLUME_SAMPLE_TRACKER_KEY, aggSec=options.aggPeriod
                )
                aggOffset = (
                    math.ceil(
                        (epochFromNaiveUTCDatetime(lastEmittedAggTime) - epochFromNaiveUTCDatetime(oldestRecordTs))
                        / options.aggPeriod
                    )
                    * options.aggPeriod
                )
                aggStartDatetime = (
                    lastEmittedAggTime - timedelta(seconds=aggOffset) - timedelta(seconds=options.aggPeriod)
                )

                metric_utils.updateLastEmittedSampleDatetime(
                    key=_EMITTED_TWEET_VOLUME_SAMPLE_TRACKER_KEY, sampleDatetime=aggStartDatetime
                )

                MetricDataForwarder.runInThread(
                    metricSpecs=loadMetricSpecs(),
                    aggSec=options.aggPeriod,
                    symbolList=[options.new_symbol],
                    forwardOnlyBacklog=True,
                )

                metric_utils.updateLastEmittedSampleDatetime(
                    key=_EMITTED_TWEET_VOLUME_SAMPLE_TRACKER_KEY, sampleDatetime=lastEmittedAggTime
                )

        g_log.info("Forwarding metrics to dynamodb using new symbol...")
        if options.twitter:
            migrate_tweets_to_dynamodb.main(symbolList=[options.new_symbol])

        g_log.info("Unmonitoring and deleting existing metrics associated with " "symbol=%s", options.old_symbol)
        oldModels = metric_utils.getSymbolModels(options.htmServer, options.apikey, options.old_symbol)
        for model in oldModels:
            metric_utils.unmonitorMetric(options.htmServer, options.apikey, model.uid)
            metric_utils.deleteMetric(options.htmServer, options.apikey, model.name)
def deleteCompanies(
    tickerSymbols,
    engineServer,
    engineApiKey,
    warnAboutDestructiveAction=True,
    warningTimeout=_DEFAULT_WARNING_PROMPT_TIMEOUT_SEC,
):
    """Delete companies from Taurus Collector and their metrics/models from
  Taurus Engine.

  :param sequence tickerSymbols: stock ticker symbols of companies to be
    deleted

  :param str engineServer: dns name of ip addres of Taurus API server

  :param str engineApiKey: API Key of Taurus HTM Engine

  :param bool warnAboutDestructiveAction: whether to warn about destructive
    action; defaults to True.

  :param float warningTimeout: Timeout for the warning prompt; ignored if
    warnAboutDestructiveAction is False

  :raises WarningPromptTimeout: if warning prompt timed out
  :raises UserAbortedOperation: if user chose to abort the operation
  :raises FlusherMetricNotFound:
  """
    tickerSymbols = tuple(symbol.upper() for symbol in tickerSymbols)

    # Check for duplicate symbols
    repeatedSymbols = set(sym for sym in tickerSymbols if tickerSymbols.count(sym) > 1)
    if repeatedSymbols:
        raise ValueError(
            "{numRepeats} symbol(s) are present more than once in "
            "tickerSymbols arg: {repeats}".format(numRepeats=len(repeatedSymbols), repeats=repeatedSymbols)
        )

    # Set will be handier going forward
    tickerSymbols = set(tickerSymbols)

    if warnAboutDestructiveAction:
        _warnAboutDestructiveAction(timeout=warningTimeout, tickerSymbols=tickerSymbols, engineServer=engineServer)

    # If any of the ticker symbols still appear in the collector's metrics config,
    # abort the operation as a precautionary measure.
    allSymbols = set(security[0].upper() for security in metric_utils.getAllMetricSecurities())

    problemSymbols = tickerSymbols & allSymbols
    assert not problemSymbols, (
        "Can't delete - {numProblem} of the specified companies [{symbols}] are "
        "in active metrics configuration".format(numProblem=len(problemSymbols), symbols=problemSymbols)
    )

    # First, we need to synchronize with Taurus Engine's metric data path.
    # If any of the data still in the pipeline is for any of the companies being
    # deleted, then the metrics may be re-created in the Engine after we delete
    # them. This is an yet unresolved subtlety with custom metrics in htmengine.
    _flushTaurusEngineMetricDataPath(engineServer, engineApiKey)

    # NOTE: We must query custom metrics after flushing the metric data path,
    # since metrics may get created as a side-effect of processing metric data.
    allMetricsMap = {
        obj["name"]: obj for obj in metric_utils.getAllCustomMetrics(host=engineServer, apiKey=engineApiKey)
    }

    allMetricNames = allMetricsMap.keys()

    for symbolNum, symbol in enumerate(tickerSymbols, 1):
        # Delete corresponding metrics from Taurus Engine
        metricNamesToDelete = metric_utils.filterCompanyMetricNamesBySymbol(allMetricNames, symbol)
        if not metricNamesToDelete:
            g_log.info("No metrics to delete for symbol=%s (%d of %d)", symbol, symbolNum, len(tickerSymbols))
            continue

        g_log.info(
            "Deleting metrics and models for ticker symbol=%s from Taurus " "Engine=%s (%d of %d)",
            symbol,
            engineServer,
            symbolNum,
            len(tickerSymbols),
        )

        for metricName in metricNamesToDelete:
            metric_utils.deleteMetric(host=engineServer, apiKey=engineApiKey, metricName=metricName)
            g_log.info("Deleted metric name=%s, uid=%s", metricName, allMetricsMap[metricName]["uid"])

        # Delete the symbol from xignite_security table last; this cascades to
        # delete related rows in other tables via cascading delete relationship.
        #
        # NOTE: garbage collection from other tables not tied to xiginte_security
        #  symbols presently depends on aging of the rows (e.g., twitter tables).
        #  After ENG-83, all company-specific rows from all tables will be
        # cleaned up and THIS NOTE SHOULD THEN BE REMOVED
        with collectorsdb.engineFactory().begin() as conn:
            numDeleted = (
                conn.execute(
                    collectorsdb.schema.xigniteSecurity.delete().where(  # pylint: disable=E1120
                        collectorsdb.schema.xigniteSecurity.c.symbol == symbol
                    )
                )
            ).rowcount

            if numDeleted:
                g_log.info("Deleted row=%s from table=%s", symbol, collectorsdb.schema.xigniteSecurity)
            else:
                g_log.warning(
                    "Couldn't delete security row=%s: not found in table=%s",
                    symbol,
                    collectorsdb.schema.xigniteSecurity,
                )
Пример #15
0
def deleteCompanies(tickerSymbols,
                    engineServer,
                    engineApiKey,
                    warnAboutDestructiveAction=True,
                    warningTimeout=_DEFAULT_WARNING_PROMPT_TIMEOUT_SEC):
    """Delete companies from Taurus Collector and their metrics/models from
  Taurus Engine.

  :param sequence tickerSymbols: stock ticker symbols of companies to be
    deleted

  :param str engineServer: dns name of ip addres of Taurus API server

  :param str engineApiKey: API Key of Taurus HTM Engine

  :param bool warnAboutDestructiveAction: whether to warn about destructive
    action; defaults to True.

  :param float warningTimeout: Timeout for the warning prompt; ignored if
    warnAboutDestructiveAction is False

  :raises WarningPromptTimeout: if warning prompt timed out
  :raises UserAbortedOperation: if user chose to abort the operation
  :raises FlusherMetricNotFound:
  """
    tickerSymbols = tuple(symbol.upper() for symbol in tickerSymbols)

    # Check for duplicate symbols
    repeatedSymbols = set(sym for sym in tickerSymbols
                          if tickerSymbols.count(sym) > 1)
    if repeatedSymbols:
        raise ValueError(
            "{numRepeats} symbol(s) are present more than once in "
            "tickerSymbols arg: {repeats}".format(
                numRepeats=len(repeatedSymbols), repeats=repeatedSymbols))

    # Set will be handier going forward
    tickerSymbols = set(tickerSymbols)

    if warnAboutDestructiveAction:
        _warnAboutDestructiveAction(timeout=warningTimeout,
                                    tickerSymbols=tickerSymbols,
                                    engineServer=engineServer)

    # If any of the ticker symbols still appear in the collector's metrics config,
    # abort the operation as a precautionary measure.
    allSymbols = set(security[0].upper()
                     for security in metric_utils.getAllMetricSecurities())

    problemSymbols = tickerSymbols & allSymbols
    assert not problemSymbols, (
        "Can't delete - {numProblem} of the specified companies [{symbols}] are "
        "in active metrics configuration".format(
            numProblem=len(problemSymbols), symbols=problemSymbols))

    # First, we need to synchronize with Taurus Engine's metric data path.
    # If any of the data still in the pipeline is for any of the companies being
    # deleted, then the metrics may be re-created in the Engine after we delete
    # them. This is an yet unresolved subtlety with custom metrics in htmengine.
    _flushTaurusEngineMetricDataPath(engineServer, engineApiKey)

    # NOTE: We must query custom metrics after flushing the metric data path,
    # since metrics may get created as a side-effect of processing metric data.
    allMetricsMap = {
        obj["name"]: obj
        for obj in metric_utils.getAllCustomMetrics(host=engineServer,
                                                    apiKey=engineApiKey)
    }

    allMetricNames = allMetricsMap.keys()

    for symbolNum, symbol in enumerate(tickerSymbols, 1):
        # Delete corresponding metrics from Taurus Engine
        metricNamesToDelete = metric_utils.filterCompanyMetricNamesBySymbol(
            allMetricNames, symbol)
        if not metricNamesToDelete:
            g_log.info("No metrics to delete for symbol=%s (%d of %d)", symbol,
                       symbolNum, len(tickerSymbols))
            continue

        g_log.info(
            "Deleting metrics and models for ticker symbol=%s from Taurus "
            "Engine=%s (%d of %d)", symbol, engineServer, symbolNum,
            len(tickerSymbols))

        for metricName in metricNamesToDelete:
            metric_utils.deleteMetric(host=engineServer,
                                      apiKey=engineApiKey,
                                      metricName=metricName)
            g_log.info("Deleted metric name=%s, uid=%s", metricName,
                       allMetricsMap[metricName]["uid"])

        # Delete the symbol from xignite_security table last; this cascades to
        # delete related rows in other tables via cascading delete relationship.
        #
        # NOTE: garbage collection from other tables not tied to xiginte_security
        #  symbols presently depends on aging of the rows (e.g., twitter tables).
        #  After ENG-83, all company-specific rows from all tables will be
        # cleaned up and THIS NOTE SHOULD THEN BE REMOVED
        with collectorsdb.engineFactory().begin() as conn:
            numDeleted = (
                conn.execute(collectorsdb.schema.xigniteSecurity  # pylint: disable=E1120
                             .delete().where(
                                 collectorsdb.schema.xigniteSecurity.c.symbol
                                 == symbol))).rowcount

            if numDeleted:
                g_log.info("Deleted row=%s from table=%s", symbol,
                           collectorsdb.schema.xigniteSecurity)
            else:
                g_log.warning(
                    "Couldn't delete security row=%s: not found in table=%s",
                    symbol, collectorsdb.schema.xigniteSecurity)