コード例 #1
0
ファイル: client.py プロジェクト: astanway/carbon
    def sendDatapoint(self, metric, datapoint):
        instrumentation.max(self.relayMaxQueueLength, len(self.factory.queue))
        if self.paused:
            self.factory.enqueue(metric, datapoint)
            instrumentation.increment(self.queuedUntilReady)

        elif self.factory.hasQueuedDatapoints():
            self.factory.enqueue(metric, datapoint)
            self.sendQueued()

        else:
            self._sendDatapoints([(metric, datapoint)])
コード例 #2
0
    def sendDatapoint(self, metric, datapoint):
        instrumentation.max(self.relayMaxQueueLength, len(self.factory.queue))
        if self.paused:
            self.factory.enqueue(metric, datapoint)
            instrumentation.increment(self.queuedUntilReady)

        elif self.factory.hasQueuedDatapoints():
            self.factory.enqueue(metric, datapoint)
            self.sendQueued()

        else:
            self._sendDatapoints([(metric, datapoint)])
コード例 #3
0
ファイル: client.py プロジェクト: opentable/carbon
 def sendDatapoint(self, metric, datapoint):
   instrumentation.increment(self.attemptedRelays)
   instrumentation.max(self.relayMaxQueueLength, self.queueSize)
   queueSize = self.queueSize
   if queueSize >= settings.MAX_QUEUE_SIZE:
     if not self.queueFull.called:
       self.queueFull.callback(queueSize)
     instrumentation.increment(self.fullQueueDrops)
   elif self.connectedProtocol:
     self.connectedProtocol.sendDatapoint(metric, datapoint)
   else:
     self.enqueue(metric, datapoint)
     instrumentation.increment(self.queuedUntilConnected)
コード例 #4
0
ファイル: client.py プロジェクト: jacklesplat/ql_emc_graphite
 def sendDatapoint(self, metric, datapoint):
   instrumentation.increment(self.attemptedRelays)
   instrumentation.max(self.relayMaxQueueLength, self.queueSize)
   queueSize = self.queueSize
   if queueSize >= settings.MAX_QUEUE_SIZE:
     if not self.queueFull.called:
       self.queueFull.callback(queueSize)
     instrumentation.increment(self.fullQueueDrops)
   elif self.connectedProtocol:
     self.connectedProtocol.sendDatapoint(metric, datapoint)
   else:
     self.enqueue(metric, datapoint)
     instrumentation.increment(self.queuedUntilConnected)
コード例 #5
0
ファイル: client.py プロジェクト: maraev/carbon
  def sendDatapoint(self, metric, datapoint):
    instrumentation.increment(self.attemptedRelays)
    instrumentation.max(self.relayMaxQueueLength, self.queueSize)
    if self.queueSize >= settings.MAX_QUEUE_SIZE:
      if not self.queueFull.called:
        self.queueFull.callback(self.queueSize)
      instrumentation.increment(self.fullQueueDrops)
    else:
      self.enqueue(metric, datapoint)

    if self.connectedProtocol:
      reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
    else:
      instrumentation.increment(self.queuedUntilConnected)
コード例 #6
0
    def sendDatapoint(self, metric, datapoint):
        instrumentation.increment(self.attemptedRelays)
        instrumentation.max(self.relayMaxQueueLength, self.queueSize)
        if self.queueSize >= settings.MAX_QUEUE_SIZE:
            if not self.queueFull.called:
                self.queueFull.callback(self.queueSize)
            instrumentation.increment(self.fullQueueDrops)
        else:
            self.enqueue(metric, datapoint)

        if self.connectedProtocol:
            reactor.callLater(settings.TIME_TO_DEFER_SENDING,
                              self.connectedProtocol.sendQueued)
        else:
            instrumentation.increment(self.queuedUntilConnected)
コード例 #7
0
    def sendQueued(self):
        """This should be the only method that will be used to send stats.
    In order to not hold the event loop and prevent stats from flowing
    in while we send them out, this will process
    settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
    are still items in the queue, this will invoke reactor.callLater
    to schedule another run of sendQueued after a reasonable enough time
    for the destination to process what it has just received.

    Given a queue size of one million stats, and using a
    chained_invocation_delay of 0.0001 seconds, you'd get 1,000
    sendQueued() invocations/second max.  With a
    settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
    sent could theoretically be as high as 100,000 stats/sec, or
    6,000,000 stats/minute.  This is probably too high for a typical
    receiver to handle.

    In practice this theoretical max shouldn't be reached because
    network delays should add an extra delay - probably on the order
    of 10ms per send, so the queue should drain with an order of
    minutes, which seems more realistic.
    """
        chained_invocation_delay = 0.0001
        queueSize = self.factory.queueSize

        if self.paused:
            instrumentation.max(self.queuedUntilReady, queueSize)
            return
        if not self.factory.hasQueuedDatapoints():
            return

        if settings.USE_RATIO_RESET is True:
            if not self.connectionQualityMonitor():
                self.resetConnectionForQualityReasons(
                    "Sent: {0}, Received: {1}".format(
                        instrumentation.prior_stats.get(self.sent, 0),
                        instrumentation.prior_stats.get('metricsReceived', 0)))

        self._sendDatapoints(self.factory.takeSomeFromQueue())
        if (self.factory.queueFull.called
                and queueSize < SEND_QUEUE_LOW_WATERMARK):
            if not self.factory.queueHasSpace.called:
                self.factory.queueHasSpace.callback(queueSize)
        if self.factory.hasQueuedDatapoints():
            reactor.callLater(chained_invocation_delay, self.sendQueued)
コード例 #8
0
ファイル: client.py プロジェクト: Krylon360/carbon
    def sendQueued(self):
        """This should be the only method that will be used to send stats.
    In order to not hold the event loop and prevent stats from flowing
    in while we send them out, this will process
    settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
    are still items in the queue, this will invoke reactor.callLater
    to schedule another run of sendQueued after a reasonable enough time
    for the destination to process what it has just received.

    Given a queue size of one million stats, and using a
    chained_invocation_delay of 0.0001 seconds, you'd get 1,000
    sendQueued() invocations/second max.  With a
    settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
    sent could theoretically be as high as 100,000 stats/sec, or
    6,000,000 stats/minute.  This is probably too high for a typical
    receiver to handle.

    In practice this theoretical max shouldn't be reached because
    network delays should add an extra delay - probably on the order
    of 10ms per send, so the queue should drain with an order of
    minutes, which seems more realistic.
    """
        chained_invocation_delay = 0.0001
        queueSize = self.factory.queueSize

        if self.paused:
            instrumentation.max(self.queuedUntilReady, queueSize)
            return
        if not self.factory.hasQueuedDatapoints():
            return

        if settings.USE_RATIO_RESET is True:
            if not self.connectionQualityMonitor():
                self.resetConnectionForQualityReasons(
                    "Sent: {0}, Received: {1}".format(
                        instrumentation.prior_stats.get(self.sent, 0),
                        instrumentation.prior_stats.get("metricsReceived", 0),
                    )
                )

        self._sendDatapoints(self.factory.takeSomeFromQueue())
        if self.factory.queueFull.called and queueSize < SEND_QUEUE_LOW_WATERMARK:
            self.factory.queueHasSpace.callback(queueSize)
        if self.factory.hasQueuedDatapoints():
            reactor.callLater(chained_invocation_delay, self.sendQueued)