def __init__(self, kz_client, interval, partitioner_path, buckets, time_boundary, log, got_buckets, clock=None): """ :param log: a bound log :param kz_client: txKazoo client :param partitioner_path: ZooKeeper path, used for partitioning :param buckets: iterable of buckets to distribute between nodes. Ideally there should be at least as many elements as nodes taking part in this partitioner. This should be a sequence of str. :param time_boundary: time to wait for partitioning to stabilize. :param got_buckets: Callable which will be called with a list of buckets when buckets have been allocated to this node. :param clock: clock to use for checking the buckets on an interval. """ MultiService.__init__(self) self.kz_client = kz_client self.partitioner_path = partitioner_path self.buckets = buckets self.log = log self.got_buckets = got_buckets self.time_boundary = time_boundary ts = TimerService(interval, self.check_partition) ts.setServiceParent(self) ts.clock = clock self._old_buckets = []
def application(config): app = Application("Scrapyd") http_port = int(environ.get('PORT', config.getint('http_port', 6800))) config.cp.set('scrapyd', 'database_url', environ.get('DATABASE_URL')) poller = Psycopg2QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = Psycopg2SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) launcher = Launcher(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app))) log.msg("Scrapyd web console available at http://localhost:%s/ (HEROKU)" % http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def get_application(config): app = Application('Scrapyd') http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get('launcher', 'scrapyd_mongodb.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer( http_port, server.Site(Root(config, app)), interface=bind_address) log.msg('http://%(bind_address)s:%(http_port)s/' % {'bind_address':bind_address, 'http_port':http_port}) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) launcher = Launcher(config, app) timer = TimerService(5, poller.poll) root = Root(config, app) root = configRoot(root, config) webservice = TCPServer(http_port, server.Site(root)) log.msg("Scrapyd web console available at http://localhost:%s/" % http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg("Scrapyd web console available at http://%s:%s/" % (bind_address, http_port)) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) portal = Portal(PublicHTMLRealm(config, app), [FilePasswordDB(str(config.get('passwd', '')))]) credentialFactory = DigestCredentialFactory("md5", "Go away") poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) launcher = Launcher(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(HTTPAuthSessionWrapper(portal, [credentialFactory]))) log.msg("Scrapyd web console available at http://localhost:%s/" % http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg( format= "Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) launcher = Launcher(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg("Scrapyd web console available at http://%s:%s/" % (bind_address, http_port)) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config, components=interfaces): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') for interface, key in interfaces: path = config.get(key) cls = load_object(path) component = cls(config) app.setComponent(interface, component) poller = component laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) poll_every = config.getint("poll_every", 5) timer = TimerService(poll_every, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def __init__(self): StatusReceiverMultiService.__init__(self) timer = TimerService(60*60, self.report_pending_builds) timer.setServiceParent(self) timer = TimerService(30, self.metrics) timer.setServiceParent(self)
def __init__(self, basedir="."): node.Node.__init__(self, basedir) self.started_timestamp = time.time() self.logSource="Client" self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy() self.init_introducer_client() self.init_stats_provider() self.init_secrets() self.init_storage() self.init_control() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): self.init_helper() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: self.init_key_gen(key_gen_furl) self.init_client() # ControlServer and Helper are attached after Tub startup self.init_ftp_server() self.init_sftp_server() hotline_file = os.path.join(self.basedir, self.SUICIDE_PREVENTION_HOTLINE_FILE) if os.path.exists(hotline_file): age = time.time() - os.stat(hotline_file)[stat.ST_MTIME] self.log("hotline file noticed (%ds old), starting timer" % age) hotline = TimerService(1.0, self._check_hotline, hotline_file) hotline.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def __init__(self, reactor): MultiService.__init__(self) self._deployment_state = DeploymentState() timer = TimerService(1, self._wipe_expired) timer.clock = reactor timer.setServiceParent(self) self._information_wipers = pmap() self._clock = reactor
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '127.0.0.1') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) webpath = config.get('webroot', 'scrapyd.website.Root') webcls = load_object(webpath) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer(http_port, server.Site(webcls(config, app)), interface=bind_address) log.msg( format= "Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) host = get_host_ip(config) redis_host = config.get('redis_host', 'localhost') redis_port = config.get('redis_port', 6379) redis_db = config.get('redis_db', 0) redis_pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_db) register_to_redis(config, redis_pool) log.msg('Registering scrapyd [{}] to redis {}:{} at db {}'.format( host, redis_host, redis_port, redis_db)) # log.msg('2018-11-03 10:10 am') redis_interval = config.getfloat('redis_interval', 5) register_timer = TimerService(redis_interval, register_to_redis, config, redis_pool) register_timer.setServiceParent(app) return app
def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, introducer_clients, storage_farm_broker): """ Use :func:`allmydata.client.create_client` to instantiate one of these. """ node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider) self._magic_folders = dict() self.started_timestamp = time.time() self.logSource = "Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.introducer_clients = introducer_clients self.storage_broker = storage_farm_broker self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self._key_generator = KeyGenerator() key_gen_furl = config.get_config("client", "key_generator.furl", None) if key_gen_furl: log.msg("[client]key_generator.furl= is now ignored, see #2783") self.init_client() self.load_static_servers() self.helper = None if config.get_config("helper", "enabled", False, boolean=True): if not self._is_tub_listening(): raise ValueError("config error: helper is enabled, but tub " "is not listening ('tub.port=' is empty)") self.init_helper() self.init_ftp_server() self.init_sftp_server() self.init_magic_folder() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = config.get_config_path(self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = config.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
class StatsGatherer(Referenceable, service.MultiService): implements(RIStatsGatherer) poll_interval = 60 def __init__(self, basedir): service.MultiService.__init__(self) self.basedir = basedir self.clients = {} self.nicknames = {} self.timer = TimerService(self.poll_interval, self.poll) self.timer.setServiceParent(self) def get_tubid(self, rref): return rref.getRemoteTubID() def remote_provide(self, provider, nickname): tubid = self.get_tubid(provider) if tubid == '<unauth>': print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname) # don't add to clients to poll (polluting data) don't care about disconnect return self.clients[tubid] = provider self.nicknames[tubid] = nickname def poll(self): for tubid, client in self.clients.items(): nickname = self.nicknames.get(tubid) d = client.callRemote('get_stats') d.addCallbacks( self.got_stats, self.lost_client, callbackArgs=(tubid, nickname), errbackArgs=(tubid, )) d.addErrback(self.log_client_error, tubid) def lost_client(self, f, tubid): # this is called lazily, when a get_stats request fails del self.clients[tubid] del self.nicknames[tubid] f.trap(DeadReferenceError) def log_client_error(self, f, tubid): log.msg( "StatsGatherer: error in get_stats(), peerid=%s" % tubid, level=log.UNUSUAL, failure=f) def got_stats(self, stats, tubid, nickname): raise NotImplementedError()
class StatsGatherer(Referenceable, service.MultiService): implements(RIStatsGatherer) poll_interval = 60 def __init__(self, basedir): service.MultiService.__init__(self) self.basedir = basedir self.clients = {} self.nicknames = {} self.timer = TimerService(self.poll_interval, self.poll) self.timer.setServiceParent(self) def get_tubid(self, rref): return rref.getRemoteTubID() def remote_provide(self, provider, nickname): tubid = self.get_tubid(provider) if tubid == '<unauth>': print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname) # don't add to clients to poll (polluting data) don't care about disconnect return self.clients[tubid] = provider self.nicknames[tubid] = nickname def poll(self): for tubid, client in self.clients.items(): nickname = self.nicknames.get(tubid) d = client.callRemote('get_stats') d.addCallbacks(self.got_stats, self.lost_client, callbackArgs=(tubid, nickname), errbackArgs=(tubid, )) d.addErrback(self.log_client_error, tubid) def lost_client(self, f, tubid): # this is called lazily, when a get_stats request fails del self.clients[tubid] del self.nicknames[tubid] f.trap(DeadReferenceError) def log_client_error(self, f, tubid): log.msg("StatsGatherer: error in get_stats(), peerid=%s" % tubid, level=log.UNUSUAL, failure=f) def got_stats(self, stats, tubid, nickname): raise NotImplementedError()
def __init__(self, basedir="."): node.Node.__init__(self, basedir) # All tub.registerReference must happen *after* we upcall, since # that's what does tub.setLocation() configutil.validate_config(self.config_fname, self.config, _valid_config_sections()) self._magic_folder = None self.started_timestamp = time.time() self.logSource = "Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.init_introducer_clients() self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: log.msg("[client]key_generator.furl= is now ignored, see #2783") self.init_client() self.load_static_servers() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): if not self._tub_is_listening: raise ValueError("config error: helper is enabled, but tub " "is not listening ('tub.port=' is empty)") self.init_helper() self.init_ftp_server() self.init_sftp_server() self.init_magic_folder() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def __init__(self, basedir="."): node.Node.__init__(self, basedir) # All tub.registerReference must happen *after* we upcall, since # that's what does tub.setLocation() configutil.validate_config(self.config_fname, self.config, _valid_config_sections()) self._magic_folder = None self.started_timestamp = time.time() self.logSource="Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.init_introducer_clients() self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: log.msg("[client]key_generator.furl= is now ignored, see #2783") self.init_client() self.load_static_servers() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): if not self._tub_is_listening: raise ValueError("config error: helper is enabled, but tub " "is not listening ('tub.port=' is empty)") self.init_helper() self.init_ftp_server() self.init_sftp_server() self.init_magic_folder() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '127.0.0.1') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(poll_interval, poller.poll) webpath = config.get('webroot', 'scrapyd.website.Root') webcls = load_object(webpath) username = config.get('username', '') password = config.get('password', '') if username and password: if ':' in username: sys.exit("The `username` option contains illegal character ':', " "check and update the configuration file of Scrapyd") portal = Portal(PublicHTMLRealm(webcls(config, app)), [StringCredentialsChecker(username, password)]) credential_factory = BasicCredentialFactory("Auth") resource = HTTPAuthSessionWrapper(portal, [credential_factory]) log.msg("Basic authentication enabled") else: resource = webcls(config, app) log.msg("Basic authentication disabled as either `username` or `password` is unset") webservice = TCPServer(http_port, server.Site(resource), interface=bind_address) log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Flowder") app_id = config.get('app_id', 'fw0') logfile = config.get('logfile', '/var/log/flowder.log') loglevel = config.get('loglevel', 'info') db_file = config.get('db_file', 'flowder') rest_port = config.getint('rest_port', 4000) rest_bind = config.get('rest_bind', '0.0.0.0') poll_interval = config.getfloat('poll_interval', 1) poll_size = config.getint("poll_size", 5) signalmanager = SignalManager() app.setComponent(ISignalManager, signalmanager) fetcher = FetcherService(config) fetcher.setServiceParent(app) poller = QueuePoller(app, poll_size) poller.setServiceParent(app) db_file = '%s.db' % db_file task_storage = FileDownloaderTaskStorage(app, db_file) task_storage.setServiceParent(app) timer = TimerService(poll_interval, poller.poll) timer.setServiceParent(app) scheduler = TaskScheduler(config, app) scheduler.setServiceParent(app) laupath = config.get('launcher', 'flowder.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(app, config) launcher.setServiceParent(app) restService = TCPServer(rest_port, server.Site(Root(app, config)), interface=rest_bind) restService.setServiceParent(app) amqp_publisher = AmqpService(app, config) amqp_publisher.setServiceParent(app) log.msg("Starting Flowder services (;-)") return app
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '127.0.0.1') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) jspath = config.get('jobstorage', 'scrapyd.jobstorage.MemoryJobStorage') jscls = load_object(jspath) jobstorage = jscls(config) app.setComponent(IJobStorage, jobstorage) eggstorage = config.get('eggstorage', 'scrapyd.eggstorage.FilesystemEggStorage') eggstoragecls = load_object(eggstorage) app.setComponent(IEggStorage, eggstoragecls(config)) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(poll_interval, poller.poll) webpath = config.get('webroot', 'scrapyd.website.Root') webcls = load_object(webpath) resource = create_wrapped_resource(webcls, config, app) webservice = TCPServer(http_port, server.Site(resource), interface=bind_address) log.msg( format= "Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def execute(self): log.trace("Executing watcher.") self._lastPrintedId = None self._lastError = None if self.config["interval"]: svc = TimerService(self.config["interval"], self.showTimeline) svc.setServiceParent(self) # Since this runs ~forever, just return a Deferred that doesn't call # back. A swift SIGINT will kill it. d = Deferred() else: # Print it once and exit d = self.showTimeline() return d
def application(config): """ 提供http服务并启动应用 :param config: :return: """ app = Application("engine") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '127.0.0.1') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) # metrics = MetricsReporter(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) # app.setComponent(IMetrics, metrics) laupath = config.get('launcher', 'engine.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) webpath = config.get('webroot', 'engine.website.Root') webcls = load_object(webpath) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer(http_port, server.Site(webcls(config, app)), interface=bind_address) log.msg( format="DtCrawlEngine 访问地址 at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(settings): app = Application("Scraperd") poller = QueuePoller(settings) environment = Environment(settings) app.setComponent(IPoller, poller) app.setComponent(IEnvironment, environment) laupath = settings.get('DAEMON_LAUNCHER', 'scraper.daemon.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(settings, app) poll_interval = settings.getfloat('DAEMON_POLL_INTERVAL', 5) timer = TimerService(poll_interval, poller.poll) launcher.setServiceParent(app) timer.setServiceParent(app) return app
def __init__(self, basedir="."): node.Node.__init__(self, basedir) self.started_timestamp = time.time() self.logSource = "Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.init_introducer_client() self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): self.init_helper() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: self.init_key_gen(key_gen_furl) self.init_client() # ControlServer and Helper are attached after Tub startup self.init_ftp_server() self.init_sftp_server() self.init_drop_uploader() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def application(config): app = Application("Spider Platform Slave") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '127.0.0.1') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) performance = Performance() egg_storage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IPerformance, performance) app.setComponent(IEggStorage, egg_storage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) lau_path = config.get('launcher', 'slave.launcher.Launcher') lau_cls = load_object(lau_path) launcher = lau_cls(config, app) web_path = config.get('web_root', 'slave.website.Root') web_cls = load_object(web_path) timer_queue = TimerService(poll_interval, poller.poll) timer_performance = TimerService(1, performance.poll) webservice = TCPServer(http_port, server.Site(web_cls(config, app)), interface=bind_address) log.msg( format="Spider Platform Slave web console available at " "http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer_queue.setServiceParent(app) timer_performance.setServiceParent(app) webservice.setServiceParent(app) return app
def __init__(self, basedir="."): node.Node.__init__(self, basedir) self.started_timestamp = time.time() self.logSource="Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.init_introducer_client() self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): self.init_helper() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: self.init_key_gen(key_gen_furl) self.init_client() # ControlServer and Helper are attached after Tub startup self.init_ftp_server() self.init_sftp_server() self.init_drop_uploader() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def __init__(self, basedir="."): node.Node.__init__(self, basedir) self.started_timestamp = time.time() self.logSource = "Client" self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy( ) self.init_introducer_client() self.init_stats_provider() self.init_secrets() self.init_node_key() self.init_storage() self.init_control() self.helper = None if self.get_config("helper", "enabled", False, boolean=True): self.init_helper() self._key_generator = KeyGenerator() key_gen_furl = self.get_config("client", "key_generator.furl", None) if key_gen_furl: self.init_key_gen(key_gen_furl) self.init_client() # ControlServer and Helper are attached after Tub startup self.init_ftp_server() self.init_sftp_server() self.init_drop_uploader() hotline_file = os.path.join(self.basedir, self.SUICIDE_PREVENTION_HOTLINE_FILE) if os.path.exists(hotline_file): age = time.time() - os.stat(hotline_file)[stat.ST_MTIME] self.log("hotline file noticed (%ds old), starting timer" % age) hotline = TimerService(1.0, self._check_hotline, hotline_file) hotline.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string
def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = config.get('bind_address', '0.0.0.0') poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) schedpath = config.get('scheduler', 'scrapyd.scheduler.SpiderScheduler') schedCls = load_object(schedpath) scheduler = schedCls(config, app) environment = Environment(config) pubsub_path = config.get('pubsub', 'scrapyd.pubsub.BasePubSub') pubsubCls = load_object(pubsub_path) pubsub = pubsubCls(config, app) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) app.setComponent(IPubSub, pubsub) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") poll_interval = config.getfloat('poll_interval', 5) poller = QueuePoller(config, app) taskpoller = TaskPoller(config, app) # app.setComponent(IPoller, poller) app.setComponent(IPoller, taskpoller) # timer = TimerService(poll_interval, poller.poll) tasktimer = TimerService(poll_interval, taskpoller.poll) # timer.setServiceParent(app) tasktimer.setServiceParent(app) # http_port = config.getint('http_port', 9090) bind_address = config.get('bind_address', '127.0.0.1') log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) webservice.setServiceParent(app) return app
def application(config): app = Application("Scrapyd") http_port = config.getint("http_port", 6800) bind_address = config.get("bind_address", "0.0.0.0") poll_interval = config.getfloat("poll_interval", 5) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) laupath = config.get("launcher", "scrapyd.launcher.Launcher") laucls = load_object(laupath) launcher = laucls(config, app) webpath = config.get("webroot", "scrapyd.website.Root") webcls = load_object(webpath) timer = TimerService(poll_interval, poller.poll) webservice = TCPServer(http_port, server.Site(webcls(config, app)), interface=bind_address) log.msg( format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/", bind_address=bind_address, http_port=http_port, ) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
def application(): app = Application("Scrapyd") config = Config() http_port = config.getint('http_port', 6800) poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(IEnvironment, environment) launcher = Launcher(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app))) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
class SQSPollingService(BuildbotService): """ Polling service to get messages from AWS Simple Queue Service (SQS). Inspired by the MaildirService class from buildbot.util.service. """ log = Logger() def __init__(self, name, uri, pollinterval=60, codebase=None, aws_region='eu-central-1', **kwargs): """ Takes an uri to the SQS queue. It also, optionally, takes a pollinterval (seconds between polls). Defaults to 60. For cost estimate: free tier is one million requests per month. After that, it's $0.40/million requests. If we poll every minute, we'll do about 44000 requests. (If we get a message, there's an additonal delete_message request, but that is negliable under the load i operate with.) We'll utilize "long polling", where we connect to SQS and leave the connection open in case new messages arrive. The AWS max timeout for this connection is 20s. This means that we sometime have to wait 40s to see a message. Maybe a recommended value could be 20? That would mean about 131000 requests/month. Having a higher default pollinterval value is a cautious choice, but can increase latency for some messages. """ super().__init__(name=name, **kwargs) self.pollinterval = pollinterval self.uri = uri self.aws_region = aws_region self.sqs = boto3.client("sqs", region_name=aws_region) self.default_codebase = codebase def startService(self): self.log.info("Starting SQS poller {name} for URI {uri} (every {i}s)", name=self.name, uri=self.uri, i=self.pollinterval) self.timerService = TimerService(self.pollinterval, self.poll) self.timerService.setServiceParent(self) return super().startService() def stopService(self): self.log.info("Stopping SQS poller {name}", name=self.name) self.timerService.disownServiceParent() self.timerService = None return super().stopService() def _get_sqs_msg(self): def error(_fmt, *args, **kwargs): self.log.error('sqs.receive_message (non-fatal): ' + _fmt, *args, **kwargs) try: return self.sqs.receive_message( QueueUrl=self.uri, AttributeNames=['SentTimestamp'], WaitTimeSeconds=20, ) except botocore.exceptions.CredentialRetrievalError as exc: error("failed to get credentials: {exc}", exc=exc) except botocore.exceptions.ClientError as exc: if exc.response.get('Error', {}).get('Code') == 'InternalError': err = exc.response['Error'] http = exc.response.get('ResponseMetadata', {}) error("unknown error, code {code}/http:{http}: {msg}", http=http.get('HTTPStatusCode', 'unknown'), code=err.get('Code', 'unknown'), msg=err.get('Message', 'no error message available')) else: error("unknown error: {exc}", exc=exc) def is_empty(self, resp): # In practice, "no messages avilable" will get you a response # *without* a Messages key. Out of caution, we also handle the # Messages key being an empty list. In case of failure, resp # may be None. # { # 'ResponseMetadata': { # 'RequestId': '19999999-7999-5999-8999-a99999999999', # 'HTTPStatusCode': 200, # 'HTTPHeaders': { # 'x-amzn-requestid': '19999999-7999-5999-8999-a99999999999', # 'date': 'Tue, 12 Nov 2019 18:44:13 GMT', # 'content-type': 'text/xml', # 'content-length': '240' # }, # 'RetryAttempts': 0 # } # } return resp is None or 'Messages' not in resp or not resp['Messages'] @defer.inlineCallbacks def sqs_poll(self): resp = yield threads.deferToThread(self._get_sqs_msg) if self.is_empty(resp): self.log.debug("Polled SQS {name}, no items to process", name=self.name) defer.returnValue(None) self.log.info("Poll result SQS queue {name}: {resp}", name=self.name, resp=resp) # Structure of a response with messages available: # { # 'Messages': [ # { # 'MessageId': 'b9999999-4999-4999-8999-d99999999999', # 'ReceiptHandle': 'AAAAAaaaAa....,', # 'MD5OfBody': '4b39812e2d7f14f01ae86e7e5bb417d6', # 'Body': '{"foo": "bar", "baz": "qux"}', # 'Attributes': { # 'SentTimestamp': '1573477920399' # } # } # ], # 'ResponseMetadata': { # 'RequestId': 'd9999999-b999-5999-b999-89999999999b', # 'HTTPStatusCode': 200, # 'HTTPHeaders': { # 'x-amzn-requestid': 'd9999999-b999-5999-b999-89999999999b', # 'date': 'Mon, 11 Nov 2019 13:12:00 GMT', # 'content-type': 'text/xml', # 'content-length': '53' # }, # 'RetryAttempts': 0 # } # } # TODO support popping multiple messages? Today, we only ask # for one message, but we can ask for up to 10 at a time. We # could thus generate ten changes per poll. defer.returnValue(resp['Messages'][0]) def delete_msg(self, msg): self.sqs.delete_message(QueueUrl=self.uri, ReceiptHandle=msg['ReceiptHandle']) def handleMessage(self, msg): raise NotImplementedError @defer.inlineCallbacks def poll(self): msg = yield self.sqs_poll() if msg: yield self.handleMessage(msg) self.delete_msg(msg)
def hyper_task(): print("starting loop....") try: getMessageLog(sHost, sPort, sUser, sPass, sDB) except Exception as e: log.error("Exception on getMessageLog. {0}".format(e)) print "Exception on getMessageLog. {0}".format(e) except: log.error("Unexpetced Exception in getMessageLog") print "Unexpetced Exception in getMessageLog" print("loop terminated") # hyper_task() """ from twisted.internet.task import LoopingCall from twisted.internet import reactor lc = LoopingCall(hyper_task) lc.start(60*5) reactor.run() """ from twisted.application import service from twisted.application.internet import TimerService application = service.Application("processMessageLog") ts = TimerService(60 * 5, hyper_task) ts.setServiceParent(application)
log = SystemLog(text=error\ .substitute(circuit=circuit.ip_address, timediff=sub(now, last_log.created), date=last_log.created,)) output.write('--------------------\n') output.write("\t %s\n" % log.text) session.add(log) return circuit def check_meters(): output = StringIO.StringIO() print('++++++++++++++++++++') print('Checking system at %s' % datetime.now()) with transaction: meters = session.query(Meter).filter_by(status=True) for meter in meters: output.write('====================\n') output.write('Checking circuits for %s\n' % meter) for circuit in meter.get_circuits(): check_circuit(circuit, output) msg = Message(subject='Gateway: Alert, Circuit failed', sender=_from, body=output.getvalue(), recipients=_to) mailer.send_immediately(msg, fail_silently=False) application = service.Application(__name__) ts = TimerService(hour, check_meters) ts.setServiceParent(application)
def __init__(self): StatusReceiverMultiService.__init__(self) timer = TimerService(30, self.metrics) timer.setServiceParent(self)
class NetworksMonitoringService(MultiService, metaclass=ABCMeta): """Service to monitor network interfaces for configuration changes. Parse ``/etc/network/interfaces`` and the output from ``ip addr show`` to update MAAS's records of network interfaces on this host. :param reactor: An `IReactor` instance. """ interval = timedelta(seconds=30).total_seconds() def __init__(self, clock=None, enable_monitoring=True): # Order is very important here. First we set the clock to the passed-in # reactor, so that unit tests can fake out the clock if necessary. # Then we call super(). The superclass will set up the structures # required to add parents to this service, which allows the remainder # of this method to succeed. (And do so without the side-effect of # executing calls that shouldn't be executed based on the desired # reactor.) self.clock = clock super().__init__() self.enable_monitoring = enable_monitoring # The last successfully recorded interfaces. self._recorded = None self._monitored = frozenset() self._monitoring_state = {} self._monitoring_mdns = False self._locked = False # Use a named filesystem lock to prevent more than one monitoring # service running on each host machine. This service attempts to # acquire this lock on each loop, and then it holds the lock until the # service stops. self._lock = NetworksMonitoringLock() # Set up child service to update interface. self.interface_monitor = TimerService(self.interval, self.updateInterfaces) self.interface_monitor.setName("updateInterfaces") self.interface_monitor.clock = self.clock self.interface_monitor.setServiceParent(self) self.beaconing_protocol = None def updateInterfaces(self): """Update interfaces, catching and logging errors. This can be overridden by subclasses to conditionally update based on some external configuration. """ d = maybeDeferred(self._assumeSoleResponsibility) def update(responsible): if responsible: d = maybeDeferred(self.getInterfaces) d.addCallback(self._updateInterfaces) return d def failed(failure): log.err( failure, "Failed to update and/or record network interface " "configuration: %s" % failure.getErrorMessage()) d = d.addCallback(update) # During the update, we might fail to get the interface monitoring # state from the region. We can safely ignore this, as it will be # retried shortly. d.addErrback(suppress, NoConnectionsAvailable) d.addErrback(failed) return d def getInterfaces(self): """Get the current network interfaces configuration. This can be overridden by subclasses. """ return deferToThread(get_all_interfaces_definition) @abstractmethod def getDiscoveryState(self): """Record the interfaces information. This MUST be overridden in subclasses. """ @abstractmethod def recordInterfaces(self, interfaces): """Record the interfaces information. This MUST be overridden in subclasses. """ @abstractmethod def reportNeighbours(self, neighbours): """Report on new or refreshed neighbours. This MUST be overridden in subclasses. """ @abstractmethod def reportMDNSEntries(self, mdns): """Report on new or refreshed neighbours. This MUST be overridden in subclasses. """ def reportBeacons(self, beacons): """Receives a report of an observed beacon packet.""" for beacon in beacons: log.msg("Received beacon: %r" % beacon) self.beaconing_protocol.beaconReceived(beacon) def stopService(self): """Stop the service. Ensures that sole responsibility for monitoring networks is released. """ d = super().stopService() if self.beaconing_protocol is not None: self.beaconing_protocol.stopProtocol() d.addBoth(callOut, self._releaseSoleResponsibility) return d def _assumeSoleResponsibility(self): """Assuming sole responsibility for monitoring networks. It does this by attempting to acquire a host-wide lock. If this service already holds the lock this is a no-op. :return: True if we have responsibility, False otherwise. """ if self._locked: return True else: try: self._lock.acquire() except self._lock.NotAvailable: return False else: maaslog.info( "Networks monitoring service: Process ID %d assumed " "responsibility." % os.getpid()) self._locked = True return True def _releaseSoleResponsibility(self): """Releases sole responsibility for monitoring networks. Another network monitoring service on this host may then take up responsibility. If this service is not currently responsible this is a no-op. """ if self._locked: self._lock.release() self._locked = False # If we were monitoring neighbours on any interfaces, we need to # stop the monitoring services. self._configureNetworkDiscovery({}) def _updateInterfaces(self, interfaces): """Record `interfaces` if they've changed.""" if interfaces != self._recorded: d = maybeDeferred(self.recordInterfaces, interfaces) # Note: _interfacesRecorded() will reconfigure discovery after # recording the interfaces, so there is no need to call # _configureNetworkDiscovery() here. d.addCallback(callOut, self._interfacesRecorded, interfaces) return d else: # If the interfaces didn't change, we still need to poll for # monitoring state changes. d = maybeDeferred(self._configureNetworkDiscovery, interfaces) return d def _getInterfacesForNeighbourDiscovery(self, interfaces: dict, monitoring_state: dict): """Return the interfaces which will be used for neighbour discovery. :return: The set of interface names to run neighbour discovery on. """ # Don't observe interfaces when running the test suite/dev env. # In addition, if we don't own the lock, we should not be monitoring # any interfaces. if is_dev_environment() or not self._locked or interfaces is None: return set() monitored_interfaces = { ifname for ifname in interfaces if (ifname in monitoring_state and monitoring_state[ifname].get('neighbour', False) is True) } return monitored_interfaces def _startNeighbourDiscovery(self, ifname): """"Start neighbour discovery service on the specified interface.""" service = NeighbourDiscoveryService(ifname, self.reportNeighbours) service.clock = self.clock service.setName("neighbour_discovery:" + ifname) service.setServiceParent(self) def _startBeaconing(self, ifname): """"Start neighbour discovery service on the specified interface.""" service = BeaconingService(ifname, self.reportBeacons) service.clock = self.clock service.setName("beaconing:" + ifname) service.setServiceParent(self) def _startMDNSDiscoveryService(self): """Start resolving mDNS entries on attached networks.""" try: self.getServiceNamed("mdns_resolver") except KeyError: # This is an expected exception. (The call inside the `try` # is only necessary to ensure the service doesn't exist.) service = MDNSResolverService(self.reportMDNSEntries) service.clock = self.clock service.setName("mdns_resolver") service.setServiceParent(self) def _stopMDNSDiscoveryService(self): """Stop resolving mDNS entries on attached networks.""" try: service = self.getServiceNamed("mdns_resolver") except KeyError: # Service doesn't exist, so no need to stop it. pass else: service.disownServiceParent() maaslog.info("Stopped mDNS resolver service.") def _startNeighbourDiscoveryServices(self, new_interfaces): """Start monitoring services for the specified set of interfaces.""" for ifname in new_interfaces: # Sanity check to ensure the service isn't already started. try: self.getServiceNamed("neighbour_discovery:" + ifname) except KeyError: # This is an expected exception. (The call inside the `try` # is only necessary to ensure the service doesn't exist.) self._startNeighbourDiscovery(ifname) def _stopNeighbourDiscoveryServices(self, deleted_interfaces): """Stop monitoring services for the specified set of interfaces.""" for ifname in deleted_interfaces: try: service = self.getServiceNamed("neighbour_discovery:" + ifname) except KeyError: # Service doesn't exist, so no need to stop it. pass else: service.disownServiceParent() maaslog.info("Stopped neighbour observation service for %s." % ifname) def _startBeaconingServices(self, new_interfaces): """Start monitoring services for the specified set of interfaces.""" for ifname in new_interfaces: # Sanity check to ensure the service isn't already started. try: self.getServiceNamed("beaconing:" + ifname) except KeyError: # This is an expected exception. (The call inside the `try` # is only necessary to ensure the service doesn't exist.) self._startBeaconing(ifname) def _stopBeaconingServices(self, deleted_interfaces): """Stop monitoring services for the specified set of interfaces.""" for ifname in deleted_interfaces: try: service = self.getServiceNamed("beaconing:" + ifname) except KeyError: # Service doesn't exist, so no need to stop it. pass else: service.disownServiceParent() maaslog.info("Stopped beaconing service for %s." % ifname) def _shouldMonitorMDNS(self, monitoring_state): # If any interface is configured for mDNS, we must start the monitoring # process. (You cannot select interfaces when using `avahi-browse`.) mdns_state = { monitoring_state[ifname].get('mdns', False) for ifname in monitoring_state.keys() } return True in mdns_state @inlineCallbacks def _configureNetworkDiscovery(self, interfaces): """Update the set of monitored interfaces. Calculates the difference between the interfaces that are currently being monitored and the new list of interfaces enabled for discovery. Starts services for any new interfaces, and stops services for any deleted interface. Updates `self._monitored` with the current set of interfaces being monitored. """ if interfaces is None: # This is a no-op if we don't have any interfaces to monitor yet. # (An empty dictionary tells us not to monitor any interfaces.) return # Don't bother calling the region if the interface dictionary # hasn't yet been populated, or was intentionally set to nothing. if len(interfaces) > 0: monitoring_state = yield maybeDeferred(self.getDiscoveryState) else: monitoring_state = {} # If the monitoring state has changed, we need to potentially start # or stop some services. if self._monitoring_state != monitoring_state: log.msg("New interface monitoring state: %r" % monitoring_state) self._configureNeighbourDiscovery(interfaces, monitoring_state) self._configureMDNS(monitoring_state) self._monitoring_state = monitoring_state def _configureMDNS(self, monitoring_state): should_monitor_mdns = self._shouldMonitorMDNS(monitoring_state) if not self._monitoring_mdns and should_monitor_mdns: # We weren't currently monitoring any interfaces, but we have been # requested to monitor at least one. self._startMDNSDiscoveryService() self._monitoring_mdns = True elif self._monitoring_mdns and not should_monitor_mdns: # We are currently monitoring at least one interface, but we have # been requested to stop monitoring them all. self._stopMDNSDiscoveryService() self._monitoring_mdns = False else: # No state change. We either still AREN'T monitoring any # interfaces, or we still ARE monitoring them. (Either way, it # doesn't matter for mDNS discovery purposes.) pass def _configureNeighbourDiscovery(self, interfaces, monitoring_state): monitored_interfaces = self._getInterfacesForNeighbourDiscovery( interfaces, monitoring_state) # Calculate the difference between the sets. We need to know which # interfaces were added and deleted (with respect to the interfaces we # were already monitoring). new_interfaces = monitored_interfaces.difference(self._monitored) deleted_interfaces = self._monitored.difference(monitored_interfaces) if len(new_interfaces) > 0: log.msg("Starting neighbour discovery for interfaces: %r" % (new_interfaces)) self._startNeighbourDiscoveryServices(new_interfaces) # XXX mpontillo 2017-07-12: for testing, just start beaconing # services on all the interfaces enabled for active discovery. self._startBeaconingServices(new_interfaces) if len(deleted_interfaces) > 0: log.msg("Stopping neighbour discovery for interfaces: %r" % (deleted_interfaces)) self._stopNeighbourDiscoveryServices(deleted_interfaces) # XXX mpontillo 2017-07-12: this should be separately configured. # (see similar comment in the 'start' path above.) self._stopBeaconingServices(deleted_interfaces) self._monitored = monitored_interfaces def _interfacesRecorded(self, interfaces): """The given `interfaces` were recorded successfully.""" self._recorded = interfaces if self.beaconing_protocol is None: self.beaconing_protocol = BeaconingSocketProtocol( self.clock, interfaces=interfaces) if self.enable_monitoring is True: self._configureNetworkDiscovery(interfaces)