def service_modules(self): workers_to_load = set(config.get("worker.load_workers", default=[])) ret = [] for entry in scandir( config.get("worker.services_dir", default="/code/services")): if not entry.is_dir(): self.log.debug("Ignoring %s (not a directory)", entry.path) continue dname = entry.name if workers_to_load and dname not in workers_to_load: self.log.debug("Ignoring %s (not in %s)", dname, workers_to_load) continue for fname in ("__init__.py", dname + ".py"): path = os.path.join(entry.path, fname) if not os.access(path, os.F_OK): self.log.debug("Ignoring %s: %s not found", entry.path, path) break else: ret.append("{dname}.{dname}".format(dname=dname)) return ret
def __init__(self, time=0, result_is_url=False): self.ttl = time self.result_is_url = result_is_url self.cache = instance() self.cache_check_frequency = float( config.get("cache.check_frequency", default="0.1") ) self.inflight_ttl = int(config.get("cache.inflight_ttl", default="60"))
def test_set_entry_from_config_file(servicelib_yaml): assert config_client.instance().get("worker.num_processes") == 10 assert config.get("worker.num_processes") == 10 with pytest.raises(Exception) as exc: config_client.instance().set("worker.num_processes", 42) assert str(exc.value) == "File-based config is read-only" assert config_client.instance().get("worker.num_processes") == 10 assert config.get("worker.num_processes") == 10
def main(): logutils.configure_logging() cmd = ["uwsgi"] autoreload = int(config.get("worker_autoreload", "0")) if autoreload > 0: cmd.extend(["--py-autoreload", "{}".format(autoreload)]) serve_results = config.get("worker_serve_results", default=None) if serve_results is not None: for dname in serve_results.split(":"): cmd.extend(["--static-map", "{}={}".format(dname, dname)]) swagger_yaml = Path( config.get("worker_services_dir", default="/code/services"), "swagger.yaml" ) if swagger_yaml.exists(): cmd.extend(["--static-map", "/services/swagger.yaml={}".format(swagger_yaml)]) swagger_ui = Path( config.get("worker_swagger_ui_path", default="/usr/share/nginx/html") ) if swagger_yaml.exists(): cmd.extend(["--static-map", "/docs={}".format(swagger_ui)]) cmd.extend(["--static-index", "index.html"]) try: static_assets = config.get("worker_static_map") except Exception: pass else: cmd.extend(["--static-map", static_assets]) cmd.append( config.get( "worker_uwsgi_config_file", default=str(Path(config.__file__, "..", "uwsgi.ini").resolve()), ) ) os.environ.setdefault( "SERVICELIB_WORKER_NUM_PROCESSES", config.get("worker_num_processes", str(psutil.cpu_count())), ) os.environ.setdefault( "SERVICELIB_WORKER_NUM_THREADS", config.get("worker_num_threads", "1") ) os.environ.setdefault("SERVICELIB_WORKER_PORT", config.get("worker_port", "8000")) log = logutils.get_logger("servicelib-worker") log.info("Running: %s", " ".join(cmd)) os.execlp(cmd[0], *cmd[0:])
def pool(self): with self._lock: if self._pool is None: url = config.get("registry.url") self._pool = redis.ConnectionPool.from_url(url) self.log.debug("Initialized Redis connection pool for URL %s", url) return self._pool
def on_get(self, req, resp): proc = psutil.Process() with proc.oneshot(): parent = proc.parent() nofile_soft = proc.rlimit(psutil.RLIMIT_NOFILE) with parent.oneshot(): # Assume all processes with our parent as ancestor are # part of this worker instance. proc_set = [ p.as_dict(attrs=[ "cmdline", "connections", "cpu_percent", "cpu_times", "memory_info", "num_fds", "pid", "ppid", ]) for p in parent.children(recursive=True) ] stats = { "config": { "num_processes": int(config.get("worker.num_processes")), "num_threads": int(config.get("worker.num_threads")), "max_num_fds": nofile_soft, }, "totals": { "cpu_percent": 0.0, "mem": { "rss": 0, "vms": 0, }, }, "procs": proc_set, } for p in proc_set: stats["totals"]["cpu_percent"] += p["cpu_percent"] stats["totals"]["mem"]["rss"] += p["memory_info"][0] stats["totals"]["mem"]["vms"] += p["memory_info"][1] resp.status = falcon.HTTP_200 resp.data = json.dumps(stats).encode("utf-8")
def instance(): class_name = config.get("registry.class", default="no-op") try: ret = _INSTANCE_MAP[class_name] except KeyError: raise Exception("Invalid value for `registry.class`: {}".format(class_name)) if isinstance(ret, type): _INSTANCE_MAP[class_name] = ret = ret() return ret
def instance(): class_name = config.get("inventory_class", "default") try: ret = _INSTANCE_MAP[class_name] except KeyError: raise Exception( "Invalid value for `inventory_class`: {}".format(class_name)) if isinstance(ret, type): _INSTANCE_MAP[class_name] = ret = ret() return ret
def instance(): class_name = config.get("scratch_strategy", default="random") try: ret = _INSTANCE_MAP[class_name] except KeyError: raise Exception( "Invalid value for `scratch_strategy`: {}".format(class_name)) if isinstance(ret, type): _INSTANCE_MAP[class_name] = ret = DefaultScratch(ret()) return ret
def service_url(service_name): host = config.get("worker_host", default=socket.getfqdn()) port = config.get("worker_port", "0") if port == "0": p = psutil.Process() for c in p.connections(kind="tcp"): if c.status == psutil.CONN_LISTEN: port = c.laddr.port break else: raise Exception("Cannot determine listening port") os.environ["SERVICELIB_WORKER_PORT"] = str(port) else: try: port = int(port) except Exception as exc: raise ValueError("Invalid listening port {}: {}".format(port, exc)) return "http://{}:{}/services/{}".format(host, port, service_name)
def __init__(self): super(HttpFileResults, self).__init__() host = config.get("results.http_hostname", default=HOSTNAME_FQDN) k = "results.http_port" try: port = config.get(k) except Exception: # Assume results are exposed through uWSGI as well. k = "worker.port" port = config.get(k) try: port = int(port) except ValueError as exc: raise Exception("Invalid config variable {}={}: {}".format( k, port, exc)) self.netloc = "{}:{}".format(host, port)
def location(self): # return "http://{}.copernicus-climate.eu/cache-{}{}".format( # self._stack, # config.get("results_cds_hostname", default=HOSTNAME_SHORT), # self._path, # ) return "http://{}/cache-{}{}".format( self._download_host, config.get("results_cds_hostname", default=HOSTNAME_SHORT), self._path, )
def test_value_from_env(monkeypatch): monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_INT", "42")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_FLOAT", "42.0")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_TRUE1", "True")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_TRUE2", "true")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_FALSE1", "False")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_FALSE2", "False")) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_LIST", '["one", {"two": 3}]')) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_DICT", '{"two": 3}')) monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR_PEPE", "just a string")) assert config.get("foo.bar_int") == 42 assert config.get("foo.bar_float") == 42.0 assert config.get("foo.bar_true1") assert config.get("foo.bar_true2") assert not config.get("foo.bar_false1") assert not config.get("foo.bar_false2") assert config.get("foo.bar_list") == ["one", {"two": 3}] assert config.get("foo.bar_dict") == {"two": 3} assert config.get("foo.bar_pepe") == "just a string"
def scratch_dirs(self): return config.get("scratch.dirs")
def __init__(self, path, content_type): super(CDSCacheResult, self).__init__(path, content_type) self._stack = config.get("results_cds_stack") self._download_host = config.get("results_cds_download_host")
def test_default_value_with_config_file(servicelib_yaml): assert config.get("no.such_key", default="whatever") == "whatever"
def test_env_overrides_config_file(servicelib_yaml, monkeypatch): monkeypatch.setenv(*env_var("SERVICELIB_WORKER_NUM_PROCESSES", "42")) assert config.get("worker.num_processes") == 42
import os import falcon from servicelib import config, inventory, logutils from servicelib.falcon import HealthResource, StatsResource, WorkerResource __all__ = [ "application", ] # On y va! logutils.configure_logging( level=config.get("log_level", default="debug").upper(), log_type=config.get("log_type", default="text"), ) services = inventory.instance().load_services() application = falcon.API(media_type=falcon.MEDIA_JSON) application.add_route("/services/{service}", WorkerResource(services)) # Now that routes for services have been set up, we are ready to # handle requests. Let Kubernetes know (or whoever may be sending # health check probes) by enabling the health check route. application.add_route("/health", HealthResource()) application.add_route("/stats", StatsResource())
def get(self, k): v, expires = self._data[k] now = time.time() if now > expires: try: del self._data[k] except KeyError: pass raise KeyError(k) return v def put(self, k, v): self._data[k] = (v, time.time() + self.ttl) _CACHE = Cache(int(config.get("registry_cache_ttl", default=5))) def service_url(name, local_only=False): # TODO: Cache results. c = _redis_pool.connection() k = redis_key(name) url = None if local_only: for parsed, unparsed in [(urlparse(u), u) for u in c.smembers(k)]: if parsed.netloc.split(":")[0] == HOSTNAME: url = unparsed break else: url = c.srandmember(k)
def test_default_value(servicelib_yaml): assert config.get("foo", default="42") == "42"
def __init__(self, path, content_type): super(CDSCacheResult, self).__init__(path, content_type) self._download_host = config.get("results.cds_download_host") self._path_prefix = config.get( "results.cds_download_path_prefix").rstrip("/")
def result_dirs(self): return config.get("results.dirs")
def get_default_timeout(): return config.get("client.default_timeout", default=None)
def test_env_overrides_default(monkeypatch): monkeypatch.setenv(*env_var("SERVICELIB_FOO_BAR", "42")) assert config.get("foo.bar", default="43") == 42
def scratch_dirs(self): return config.get("scratch_dirs").split(":")
def test_config_file(servicelib_yaml): assert config.get("worker.num_processes") == 10 assert config.get("worker.num_threads") == 1 assert config.get("inventory.class") == "default" assert config.get("registry.url") == "redis://some-host/12" assert config.get("registry.cache_ttl") == 5
def result_dirs(self): return config.get("results_dirs").split(":")
def test_config_file_overrides_default(servicelib_yaml): assert config.get("worker.num_processes", default="42") == 10
def test_missing_key(servicelib_yaml): with pytest.raises(Exception) as exc: config.get("foo") assert str(exc.value).startswith("No config value for `foo`")
def __init__(self): super(MemcachedCache, self).__init__ memcached_addresses = config.get("cache.memcached_addresses") self.log.info("Using memcached instances: %s", memcached_addresses) self._memcached = memcache.Client(memcached_addresses)