def build(cls, root_service): plugin_up = cls.plugin_name.upper() interface = settings.get('%s_RECEIVER_INTERFACE' % plugin_up, None) port = int(settings.get('%s_RECEIVER_PORT' % plugin_up, 0)) protocol = cls if not port: return if hasattr(protocol, 'datagramReceived'): service = UDPServer(port, protocol(), interface=interface) service.setServiceParent(root_service) else: factory = CarbonReceiverFactory() factory.protocol = protocol service = TCPServer(port, factory, interface=interface) service.setServiceParent(root_service)
def makeService(config, reactor=reactor): parent = MultiService() basedir = FilePath(os.path.expanduser(config["basedir"])) basedir.makedirs(ignoreExistingDirectory=True) basedir.chmod(0o700) data = Data(basedir.child("config.json")) dns_server = DNSServerFactory(verbose=0) s1 = UDPServer(int(config["dns-port"]), dns.DNSDatagramProtocol(dns_server), interface=config["dns-interface"]) s1.setServiceParent(parent) s2 = TCPServer(int(config["dns-port"]), dns_server, interface=config["dns-interface"]) s2.setServiceParent(parent) s = Server(data, dns_server) s.update_records() certFile = basedir.child("tub.data").path #furlFile = basedir.child("server.furl").path t = Tub(certFile=certFile) t.setOption("keepaliveTimeout", 60) # ping after 60s of idle t.setOption("disconnectTimeout", 5*60) # disconnect/reconnect after 5m #t.setOption("logLocalFailures", True) #t.setOption("logRemoteFailures", True) #t.unsafeTracebacks = True fp = config["foolscap-port"] if not fp.startswith("tcp:"): raise usage.UsageError("I don't know how to handle non-tcp foolscap-port=") port = int(fp.split(":")[1]) assert port > 1 t.listenOn(fp) t.setLocation("tcp:%s:%d" % (config["hostname"], port)) c = Controller(data, s) cf = t.registerReference(c, furlFile=basedir.child("controller.furl").path) furl_prefix = cf[:cf.rfind("/")+1] c.set_furl_prefix(furl_prefix) t.registerNameLookupHandler(c.lookup) t.setServiceParent(parent) return parent
def createService(options): from tryfer.tracers import ( DebugTracer, EndAnnotationTracer, ZipkinTracer) from twisted.internet import reactor from twisted.internet.endpoints import TCP4ClientEndpoint from scrivener import ScribeClient from athwart.processor import HAProxyProcessor, SpanProcessor root_service = MultiService() root_service.setName("athwart") tracers = [] if options["dump-mode"]: tracers.append(EndAnnotationTracer(DebugTracer(sys.stdout))) client = ScribeClient(TCP4ClientEndpoint( reactor, options["scribe-host"], options["scribe-port"])) tracers.append(ZipkinTracer(client)) haproxy_processor = HAProxyProcessor(tracers) logstash_input = AthwartServerProtocol( haproxy_processor, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) logstash_listener = UDPServer(options["logstash-listen-port"], logstash_input) logstash_listener.setServiceParent(root_service) span_processor = SpanProcessor(client) span_input = AthwartServerProtocol( span_processor, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) span_listener = UDPServer(options["span-listen-port"], span_input) span_listener.setServiceParent(root_service) return root_service
def build_target_redirect_udp(self, host, port): if self.service is None: return lambda *args: True port = int(port) d = defer.Deferred() self.ready.addCallback(lambda _: d) client = TwistedStatsDClient( host, port, connect_callback=lambda: d.callback(None)) protocol = StatsDClientProtocol(client) udp_service = UDPServer(0, protocol) udp_service.setServiceParent(self.service) def redirect_udp_target(metric_type, key, fields): message = self.rebuild_message(metric_type, key, fields) client.write(message) yield metric_type, key, fields return redirect_udp_target
def build_target_redirect_udp(self, host, port): if self.service is None: return lambda *args: True port = int(port) d = defer.Deferred() self.ready.addCallback(lambda _: d) client = TwistedStatsDClient.create( host, port, connect_callback=lambda: d.callback(None)) protocol = StatsDClientProtocol(client) udp_service = UDPServer(0, protocol) udp_service.setServiceParent(self.service) def redirect_udp_target(metric_type, key, fields): message = self.rebuild_message(metric_type, key, fields) client.write(message) yield metric_type, key, fields return redirect_udp_target
def createService(options): """Create a txStatsD service.""" from carbon.routers import ConsistentHashingRouter from carbon.client import CarbonClientManager from carbon.conf import settings settings.MAX_QUEUE_SIZE = options["max-queue-size"] settings.MAX_DATAPOINTS_PER_MESSAGE = options["max-datapoints-per-message"] root_service = MultiService() root_service.setName("statsd") prefix = options["prefix"] if prefix is None: prefix = "statsd" instance_name = options["instance-name"] if not instance_name: instance_name = platform.node() # initialize plugins plugin_metrics = [] for plugin in getPlugins(IMetricFactory): plugin.configure(options) plugin_metrics.append(plugin) processor = None if options["dump-mode"]: # LoggingMessageProcessor supersedes # any other processor class in "dump-mode" assert not hasattr(log, 'info') log.info = log.msg # for compatibility with LMP logger interface processor = functools.partial(LoggingMessageProcessor, logger=log) if options["statsd-compliance"]: processor = (processor or MessageProcessor)(plugins=plugin_metrics) input_router = Router(processor, options['routing'], root_service) connection = InternalClient(input_router) metrics = Metrics(connection) else: processor = (processor or ConfigurableMessageProcessor)( message_prefix=prefix, internal_metrics_prefix=prefix + "." + instance_name + ".", plugins=plugin_metrics) input_router = Router(processor, options['routing'], root_service) connection = InternalClient(input_router) metrics = ExtendedMetrics(connection) if not options["carbon-cache-host"]: options["carbon-cache-host"].append("127.0.0.1") if not options["carbon-cache-port"]: options["carbon-cache-port"].append(2004) if not options["carbon-cache-name"]: options["carbon-cache-name"].append(None) reporting = ReportingService(instance_name) reporting.setServiceParent(root_service) reporting.schedule(report_client_manager_stats, options["flush-interval"] / 1000, metrics.gauge) if options["report"] is not None: from txstatsd import process from twisted.internet import reactor reporting.schedule(process.report_reactor_stats(reactor), 60, metrics.gauge) reports = [name.strip() for name in options["report"].split(",")] for report_name in reports: if report_name == "reactor": inspector = ReactorInspectorService(reactor, metrics, loop_time=0.05) inspector.setServiceParent(root_service) for reporter in getattr(process, "%s_STATS" % report_name.upper(), ()): reporting.schedule(reporter, 60, metrics.gauge) # XXX Make this configurable. router = ConsistentHashingRouter() carbon_client = CarbonClientManager(router) carbon_client.setServiceParent(root_service) for host, port, name in zip(options["carbon-cache-host"], options["carbon-cache-port"], options["carbon-cache-name"]): carbon_client.startClient((host, port, name)) statsd_service = StatsDService(carbon_client, input_router, options["flush-interval"]) statsd_service.setServiceParent(root_service) statsd_server_protocol = StatsDServerProtocol( input_router, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) listener = UDPServer(options["listen-port"], statsd_server_protocol) listener.setServiceParent(root_service) if options["listen-tcp-port"] is not None: statsd_tcp_server_factory = StatsDTCPServerFactory( input_router, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) listener = TCPServer(options["listen-tcp-port"], statsd_tcp_server_factory) listener.setServiceParent(root_service) httpinfo_service = httpinfo.makeService(options, processor, statsd_service) httpinfo_service.setServiceParent(root_service) return root_service
def createService(options): """Create a txStatsD service.""" from carbon.routers import ConsistentHashingRouter from carbon.client import CarbonClientManager from carbon.conf import settings settings.MAX_QUEUE_SIZE = options["max-queue-size"] settings.MAX_DATAPOINTS_PER_MESSAGE = options["max-datapoints-per-message"] root_service = MultiService() root_service.setName("statsd") prefix = options["prefix"] if prefix is None: prefix = "statsd" instance_name = options["instance-name"] if not instance_name: instance_name = platform.node() # initialize plugins plugin_metrics = [] for plugin in getPlugins(IMetricFactory): plugin.configure(options) plugin_metrics.append(plugin) processor = None if options["dump-mode"]: # LoggingMessageProcessor supersedes # any other processor class in "dump-mode" assert not hasattr(log, 'info') log.info = log.msg # for compatibility with LMP logger interface processor = functools.partial(LoggingMessageProcessor, logger=log) if options["statsd-compliance"]: processor = (processor or MessageProcessor)(plugins=plugin_metrics) input_router = Router(processor, options['routing'], root_service) connection = InternalClient(input_router) metrics = Metrics(connection) else: processor = (processor or ConfigurableMessageProcessor)( message_prefix=prefix, internal_metrics_prefix=prefix + "." + instance_name + ".", plugins=plugin_metrics) input_router = Router(processor, options['routing'], root_service) connection = InternalClient(input_router) metrics = ExtendedMetrics(connection) if not options["carbon-cache-host"]: options["carbon-cache-host"].append("127.0.0.1") if not options["carbon-cache-port"]: options["carbon-cache-port"].append(2004) if not options["carbon-cache-name"]: options["carbon-cache-name"].append(None) reporting = ReportingService(instance_name) reporting.setServiceParent(root_service) reporting.schedule(report_client_manager_stats, options["flush-interval"] / 1000, metrics.gauge) if options["report"] is not None: from txstatsd import process from twisted.internet import reactor reporting.schedule( process.report_reactor_stats(reactor), 60, metrics.gauge) reports = [name.strip() for name in options["report"].split(",")] for report_name in reports: if report_name == "reactor": inspector = ReactorInspectorService(reactor, metrics, loop_time=0.05) inspector.setServiceParent(root_service) for reporter in getattr(process, "%s_STATS" % report_name.upper(), ()): reporting.schedule(reporter, 60, metrics.gauge) # XXX Make this configurable. router = ConsistentHashingRouter() carbon_client = CarbonClientManager(router) carbon_client.setServiceParent(root_service) for host, port, name in zip(options["carbon-cache-host"], options["carbon-cache-port"], options["carbon-cache-name"]): carbon_client.startClient((host, port, name)) statsd_service = StatsDService(carbon_client, input_router, options["flush-interval"]) statsd_service.setServiceParent(root_service) statsd_server_protocol = StatsDServerProtocol( input_router, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) listener = UDPServer(options["listen-port"], statsd_server_protocol) listener.setServiceParent(root_service) if options["listen-tcp-port"] is not None: statsd_tcp_server_factory = StatsDTCPServerFactory( input_router, monitor_message=options["monitor-message"], monitor_response=options["monitor-response"]) listener = TCPServer(options["listen-tcp-port"], statsd_tcp_server_factory) listener.setServiceParent(root_service) httpinfo_service = httpinfo.makeService(options, processor, statsd_service) httpinfo_service.setServiceParent(root_service) return root_service
factory[key].update(value, *args) class SketchFactory(object): def __init__(self, clazz, *args, **kargs): self.clazz = clazz self.args = args self.kargs = kargs self.table = {} def __getitem__(self, key): if key not in self.table: log.msg("Create: '%s' [%s]" % (key, self.clazz.__name__)) self.table[key] = self.clazz(key, *self.args, **self.kargs) return self.table[key] dispatcher = Dispatcher() dispatcher.register("hh", SketchFactory(SpaceSaving)) sketch_udp_protocol = SketchUDPServerProtocol(MessageProcessor(dispatcher)) server = UDPServer(LISTEN_PORT, sketch_udp_protocol) application = Application("sketch") server.setServiceParent(application)