def accept(self, conn, addr): self._count += 1 a = Agent(conn, conn) h = MirrorHandler(a) a.handler = h logger.info("Starting Agent for connection %d", self._count) a.start() a.wait() logger.info("Agent finished connection %d", self._count)
def accept(self, conn, addr): """ Create a new agent/handler for each new connection. Count and log each new connection and termination. """ self._count += 1 a = Agent(conn, conn) h = HumidityClassifierHandler(a) a.handler = h logger.info("Starting Agent for connection %d", self._count) a.start() a.wait() logger.info("Agent finished connection %d", self._count)
def accept(self, conn, addr): self._count += 1 a = Agent(conn, conn) h = MirrorHandler(a) a.handler = h logger.info("Starting Agent for connection %d", self._count) a.start() a.wait() logger.info("Agent finished connection %d",self._count)
value = point.fieldsDouble[self._field] self._state.update(value, point) def end_batch(self, end_req): # Get outliers outliers = self._state.outliers(self._scale) # Send begin batch with count of outliers self._begin_response.begin.size = len(outliers) self._agent.write_response(self._begin_response) response = udf_pb2.Response() for outlier in outliers: response.point.CopyFrom(outlier) self._agent.write_response(response) # Send an identical end batch back to Kapacitor response.end.CopyFrom(end_req) self._agent.write_response(response) if __name__ == '__main__': a = Agent() h = OutliersHandler(a) a.handler = h logger.info("Starting Agent") a.start() a.wait() logger.info("Agent finished")
response.point.CopyFrom(group_buffer._consolidated_point) self._agent.write_response(response) self._state.reset() def point(self, point): # Points come through in bursts, all from a particular scrape share the same time. # So once the time changes, we can flush the current cache. if self._state.time and self._state.time != point.time: self.flush() # If the point is from a relevant measurement, cache it; otherwise just pass it through. # (This is particularly useful if such UDFs exist in a chain, to avoid a cache sequence that # requires N time changes in order to fully propagate through N nodes.) if point.tags['__name__'] == self._measurement: self._state.update(point) else: response = udf_pb2.Response() response.point.CopyFrom(point) self._agent.write_response(response) if __name__ == '__main__': agent = Agent() handler = KubeStateMetricsConsolidateTrueFalseUnknown(agent) agent.handler = handler logger.info("Starting agent") agent.start() agent.wait() logger.info("Agent finished")
self._state.update(value, point) def end_batch(self, end_req): # Get outliers outliers = self._state.outliers(self._scale) # Send begin batch with count of outliers self._begin_response.begin.size = len(outliers) self._agent.write_response(self._begin_response) response = udf_pb2.Response() for outlier in outliers: response.point.CopyFrom(outlier) self._agent.write_response(response) # Send an identical end batch back to Kapacitor response.end.CopyFrom(end_req) self._agent.write_response(response) if __name__ == '__main__': a = Agent() h = OutliersHandler(a) a.handler = h logger.info("Starting Agent") a.start() a.wait() logger.info("Agent finished")
# update stats for new value self.n += 1.0 diff = (value - self.mean) self.mean += diff / self.n self._s += diff * (value - self.mean) if self.n == self.size + 1: # update stats for removing old value old = self._window.pop(0) oldM = (self.n * self.mean - old)/(self.n - 1) self._s -= (old - self.mean) * (old - oldM) self.mean = oldM self.n -= 1 self._window.append(value) if __name__ == '__main__': # Create an agent agent = Agent() # Create a handler and pass it an agent so it can write points h = cycleTimeCIHandler(agent) # Set the handler on the agent agent.handler = h # Anything printed to STDERR from a UDF process gets captured into the Kapacitor logs. print >> sys.stderr, "Starting agent for TTestHandler" agent.start() agent.wait() print >> sys.stderr, "Agent finished"