예제 #1
0
def logExceptions(logger=ExtendedLogger.getExtendedLogger(__name__)):
  """ Returns a closure suitable for use as function/method decorator for
  logging exceptions that leave the scope of the decorated function. Exceptions
  are logged at ERROR level.

  :param logger:    user-supplied logger object compatible with pythong-logging;
    defaults to ExtendedLogger logger with `__name__` as context.

  Usage Example:
    NOTE: logging must be initialized *before* any loggers are created, else
      there will be no output; see nupic.support.initLogging()

    @logExceptions()
    def myFunctionFoo():
        ...
        raise RuntimeError("something bad happened")
        ...
  """

  def exceptionLoggingDecorator(func):

    @functools.wraps(func)
    def exceptionLoggingWrap(*args, **kwargs):
      try:
        return func(*args, **kwargs)
      except:
        logger.exception(
          "Unhandled exception %r from %r. Caller stack:\n%s",
          sys.exc_info()[1], func, ''.join(traceback.format_stack()), )
        raise

    return exceptionLoggingWrap

  return exceptionLoggingDecorator
예제 #2
0
def abortProgramOnAnyException(
    exitCode,
    logger=ExtendedLogger.getExtendedLogger(__name__)):
  """ This function creates a decorator that calls os._exit with the given
  exitCode if any exception escapes the decorated function. This is convenient
  for background thread functions whose unexpected failure should force the
  process to exit with error (e.g., so that supervisord would restart the
  process)

  NOTE: os._exit() differs from sys.exit() in that os._exit() exits to the
  system with specified status, without normal exit processing. sys.exit()
  wouldn't work for threads because sys.exit() simply raises the SystemExit
  exception, which will cause the thread to complete silently, without the main
  thread realizing that something went wrong.

  Example::

      @abortProgramOnAnyException(exitCode=1, logger=logging.getLogger("mycontext"))
      def runThreadFunctionThatDoesSomething():
        ...
        # oops, something unrecoverable just happened
        raise SomethingUnrecoverable
  """
  def abortOnExceptionDecorator(f):

    @functools.wraps(f)
    def abortOnExceptionWrapper(*args, **kwargs):
      try:
        return f(*args, **kwargs)
      except:
        try:
          logger.exception("Patient failed, aborting program with exitCode=%r",
                           exitCode)
        finally:
          os._exit(exitCode)

        # Should never get here after os._exit()
        raise

    return abortOnExceptionWrapper

  return abortOnExceptionDecorator
  def testMetricDataBatchWrite(self):

    # Note: This test assumes that there is a running Taurus instance ready to
    # receive and process inbound custom metric data.  In the deployed
    # environment $TAURUS_HTM_SERVER and $TAURUS_APIKEY must be set.  Otherwise
    # default values will be assumed.

    host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
    apikey = os.environ.get("TAURUS_APIKEY", "taurus")

    metricName = "bogus-test-metric"

    _LOG = ExtendedLogger.getExtendedLogger(__name__)

    UTC_LOCALIZED_EPOCH = (
      pytz.timezone("UTC").localize(datetime.utcfromtimestamp(0)))

    now = datetime.now(pytz.timezone("UTC"))

    # Send metric data in batches, and for test purposes making sure to exceed
    # the max batch size to force the batch to be chunked

    with metric_utils.metricDataBatchWrite(log=_LOG) as putSample:
      for x in xrange(metric_utils._METRIC_DATA_BATCH_WRITE_SIZE + 1):
        ts = ((now - UTC_LOCALIZED_EPOCH).total_seconds()
              - metric_utils._METRIC_DATA_BATCH_WRITE_SIZE
              + 1
              + x)
        putSample(metricName=metricName,
                  value=x,
                  epochTimestamp=ts)

    self.addCleanup(requests.delete,
                    "https://%s/_metrics/custom/%s" % (host, metricName),
                    auth=(apikey, ""),
                    verify=False)

    attempt = 0
    found = False
    while not found:
      result = requests.get("https://%s/_metrics/custom" % host,
                            auth=(apikey, ""),
                            verify=False)

      models = result.json()

      for model in models:
        if model["name"] == metricName:
          # Quick check to make sure the data made its way through
          result = requests.get("https://%s/_models/%s" % (host, model["uid"]),
                                auth=(apikey, ""),
                                verify=False)

          if (result.json()[0]["last_rowid"] ==
              metric_utils._METRIC_DATA_BATCH_WRITE_SIZE + 1):
            found = True
            break

      else:
        if attempt == 30:
          self.fail(
            "Not all metric data samples made it through after 30 seconds")
        else:
          time.sleep(1)
          attempt += 1
          continue
# Columns in symbol history csv files; must match up with keys returned by
# fetchData()
_COLS = ("StartDate",
         "StartTime",
         "EndDate",
         "EndTime",
         "UTCOffset",
         "Open",
         "High",
         "Low",
         "Close",
         "Volume",
         "Trades")

# Initialize logging
_LOG = ExtendedLogger.getExtendedLogger(__name__)

# xignite bar data API URL
_API_URL = "http://globalquotes.xignite.com/v3/xGlobalQuotes.json/GetBars?"
_URL_KEYS = {"IdentifierType": "Symbol",
             "Identifier": None,
             "StartTime": None,
             "EndTime": None,
             "Precision": "Minutes",
             "Period": None,
             "_Token": None}



_UTC_TZ = pytz.timezone("UTC")
_EASTERN_TZ = pytz.timezone("US/Eastern") # XIgnite API assumes US/Eastern
예제 #5
0
NAIVE_MARKET_OPEN_TIME = datetime.time(9, 30)  # 9:30 AM
NAIVE_MARKET_CLOSE_TIME = datetime.time(16, 00)  # 4 PM
RETAIN_DAYS = 30  # Retain records for 30 days

# XIgnite API credentials
DEFAULT_API_TOKEN = os.environ.get("XIGNITE_API_TOKEN")

HISTORY_PATH = ".history/xignite"

# Columns in symbol history csv files; must match up with keys returned by
# fetchData()
_COLS = ("StartDate", "StartTime", "EndDate", "EndTime", "UTCOffset", "Open",
         "High", "Low", "Close", "Volume", "Trades")

# Initialize logging
_LOG = ExtendedLogger.getExtendedLogger(__name__)

# xignite bar data API URL
_API_URL = "http://globalquotes.xignite.com/v3/xGlobalQuotes.json/GetBars?"
_URL_KEYS = {
    "IdentifierType": "Symbol",
    "Identifier": None,
    "StartTime": None,
    "EndTime": None,
    "Precision": "Minutes",
    "Period": None,
    "_Token": None
}

_UTC_TZ = pytz.timezone("UTC")
_EASTERN_TZ = pytz.timezone("US/Eastern")  # XIgnite API assumes US/Eastern
예제 #6
0
    def testMetricDataBatchWrite(self):

        # Note: This test assumes that there is a running Taurus instance ready to
        # receive and process inbound custom metric data.  In the deployed
        # environment $TAURUS_HTM_SERVER and $TAURUS_APIKEY must be set.  Otherwise
        # default values will be assumed.

        host = os.environ.get("TAURUS_HTM_SERVER", "127.0.0.1")
        apikey = os.environ.get("TAURUS_APIKEY", "taurus")

        metricName = "bogus-test-metric"

        _LOG = ExtendedLogger.getExtendedLogger(__name__)

        UTC_LOCALIZED_EPOCH = (pytz.timezone("UTC").localize(
            datetime.utcfromtimestamp(0)))

        now = datetime.now(pytz.timezone("UTC"))

        # Send metric data in batches, and for test purposes making sure to exceed
        # the max batch size to force the batch to be chunked

        with metric_utils.metricDataBatchWrite(log=_LOG) as putSample:
            for x in xrange(metric_utils._METRIC_DATA_BATCH_WRITE_SIZE + 1):
                ts = ((now - UTC_LOCALIZED_EPOCH).total_seconds() -
                      metric_utils._METRIC_DATA_BATCH_WRITE_SIZE + 1 + x)
                putSample(metricName=metricName, value=x, epochTimestamp=ts)

        self.addCleanup(requests.delete,
                        "https://%s/_metrics/custom/%s" % (host, metricName),
                        auth=(apikey, ""),
                        verify=False)

        attempt = 0
        found = False
        while not found:
            result = requests.get("https://%s/_metrics/custom" % host,
                                  auth=(apikey, ""),
                                  verify=False)

            models = result.json()

            for model in models:
                if model["name"] == metricName:
                    # Quick check to make sure the data made its way through
                    result = requests.get("https://%s/_models/%s" %
                                          (host, model["uid"]),
                                          auth=(apikey, ""),
                                          verify=False)

                    if (result.json()[0]["last_rowid"] ==
                            metric_utils._METRIC_DATA_BATCH_WRITE_SIZE + 1):
                        found = True
                        break

            else:
                if attempt == 30:
                    self.fail(
                        "Not all metric data samples made it through after 30 seconds"
                    )
                else:
                    time.sleep(1)
                    attempt += 1
                    continue