def load(self): obj = self.rcs.get('config', self.conf.name) if obj: dictlib.union(self.conf, obj) if 'export' in self.conf: self.DEPRECATE("conf.export should be conf.exports") self.conf.exports = self.conf.export if 'macroExpansion' in self.conf: self.conf.content['varsub'] = self.conf.macroExpansion self.DEPRECATE("conf.macroExpansion should be conf.content.varsub") return self.conf
def _launch_prep_action(self, action_target, commit=True): """ With launch control, local immutable package env configs take precendence over remotely defined (Reflex Engine) configs. Important: do not set secrets or environmentally unique information in local environment--that is only for things relevant to the local immutable package, such as the version of the stack required (java/node/etc). """ action = Action(base=self, extcfg=self, colorize=False) # set APP_CFG_BASE and APP_RUN_DIR first -- these are not in # setenv because they must exist prior to setenv (which can have macro subs) local_config = action.config.get('config', {}) # pylint: disable=unused-argument def get_conf(name): """Pull some configs based on local first, then service""" if name in local_config: return local_config[name] elif name in self.launch_pipeline.get('launch', {}): return self.launch_pipeline['launch'][name] raise ValueError("Unable to find '" + name + "' in local or pipeline definition") os.environ['APP_CFG_BASE'] = self.sed_env(get_conf('cfgdir'), {}, '') self.launch_cfgdir = os.environ['APP_CFG_BASE'] os.environ['APP_RUN_BASE'] = self.sed_env(get_conf('rundir'), {}, '') self.launch_rundir = os.environ['APP_RUN_BASE'] # Load Reflex Engine config, after ^^ environ changes self._load_reflex_engine_config(self.launch_service['config'], commit=commit) # load action configs. pull Reflex Engine config expanded, merge with action config # and redo macro expansion conf = self.launch_config conf.setenv = dictlib.union(conf.setenv, action.config.get('setenv', {})) cproc = ConfigProcessor(base=self, rcs=self.rcs) conf.setenv = cproc.macro_expand_dict(conf.setenv) for key, value in conf.setenv.items(): value = self.sed_env(str(value), local_config, '') conf.setenv[key] = value os.environ[key] = value action.env[key] = value if action_target == 'none': # special case return self elif action_target: self.launch_target = action_target elif self.launch_pipeline.get('launch', {}).get('target', None): self.launch_target = self.launch_pipeline['launch']['target'] else: raise ValueError("No launch service action target (svc:target)") action.verify(self.launch_target) self.launch_action = action return self
def load_config(path: str) -> DotAccessDict: with open(path, 'r') as f: config = yaml.load(f.read()) config = dictlib.union(CONFIG_DEFAULTS, config) validate(config, CONFIG_SCHEMA) return DotAccessDict(config)
def _call(self, func, target, *args, **kwargs): """Call Reflex Engine, wrapped with authentication and session management""" try: self._login() except Unauthorized as err: self.ABORT("Unauthorized: " + str(err)) except requests.exceptions.ConnectionError: self.ABORT("Unable to connect to REFLEX_URL ({})".format( self.cfg['REFLEX_URL'])) # enrich the arguments headers = self.headers.copy() if kwargs.get('headers'): headers = dictlib.union(headers, kwargs['headers']) if not kwargs.get('cookies'): kwargs['cookies'] = {} if not headers.get('Content-Type'): headers['Content-Type'] = "application/json" kwargs['headers'] = headers query = self.cfg['REFLEX_URL'] + "/" + target if self.debug.get('remote-abac'): if "?" in query: query += "&" else: query += "?" query += "abac=log" # make the call result = self._call_sub(func, query, *args, **kwargs) # unlikely, as self._login() should take care of this, unless our timing # is off from the server's, but just in case... if result.status_code == 401: self.DEBUG("Unauthorized received, Retrying Login") self._login(force=True) result = self._call_sub(func, query, *args, **kwargs) if result.status_code == 500: raise ClientError("Server side error") if result.status_code == 404: raise ClientError("Endpoint or object not found (" + query + ")") if "application/json" not in result.headers.get('Content-Type', ''): self.DEBUG("error", result.content.decode()) raise ClientError("Result is not valid content type") if result.status_code == 204: return {} if result.status_code in (200, 201, 202): return result.json() raise ClientError(result.json()['message'])
def export(self): """Return a complete object""" obj = {} if self.launch_pipeline: obj = dictlib.union(obj, self.launch_pipeline) obj['pipeline'] = obj['name'] if self.launch_service: obj = dictlib.union(obj, self.launch_service) if self.launch_config: obj = dictlib.union(obj, self.launch_config) #.export()) obj.pop('id', '') obj.pop('name', '') obj.pop('createdAt', '') obj.pop('updatedAt', '') return obj
def _attrs_skeleton(**kwargs): attrs = dictlib.Obj(cert_cn='', user_name='', ip='', token_nbr=0, token_name='', http_headers=dictlib.Obj(), groups=dictlib.Obj()) if kwargs: attrs = dictlib.union(attrs, kwargs) return attrs
def rest_update(self, *args, **kwargs): """ update """ attrs = self.abac_gather() if not attrs.token_nbr: # check policy instead self.auth_fail("Unauthorized") if not args: return self.respond({"status": "failed"}, status=404) body = get_json_body() target = args[0] if target and target[:1] in "0123456789": body['id'] = int(target) else: body['name'] = target obj = self.obj(master=self.server.dbm, reqid=self.reqid) try: # would prefer to do PATCH, but tired of fighting w/CherryPy if 'merge' in kwargs and kwargs['merge'].lower() == 'true': data = obj.get(target, attrs).dump() obj.load(dictlib.union(data, body)) else: obj.load(body) warnings = obj.update(attrs) except dbo.ObjectNotFound as err: self.respond_failure({ "status": "failed", "message": str(err) }, status=404) except dbo.InvalidParameter as err: self.respond_failure({ "status": "failed", "message": str(err) }, status=400) except dbo.NoChanges as err: self.respond_failure({ "status": "unknown", "message": str(err) }, status=202) if warnings: return self.respond( { "status": "updated", "warning": "; ".join(warnings) }, status=201) return self.respond({"status": "updated"}, status=201)
def _process(self, conf): subname = (conf.name + ".").split(".")[0] self.NOTIFY("Processing config object {0}".format(subname)) # process the variables self.expanded_vars = allvars = dict() procvars = set(['sensitive.parameters'] + conf.procvars) # pass1, merge all vars into one monster dictionary for key in procvars: dictlib.union(allvars, deep_get(conf, key)) # pass2 expand all variables self.macro_expand_dict(allvars) # pass3 put values back in their place for target in procvars: tobj = deep_get(conf, target) if not isinstance(tobj, dict): self.ABORT("Target {} is not a dictionary".format(dict)) for key in tobj: tobj[key] = allvars[key] # pass4, do setenv last, it does not merge into allvars view for key in conf.setenv: conf.setenv[key] = self.macro_expand(conf.setenv[key], allvars) #conf.setenv[key] = self.sed_env(conf.setenv[key], allvars, key) if conf.content.get('dest'): conf.content.dest = self.macro_expand(conf.content.dest, allvars) if conf.get('file'): self.DEPRECATE("conf.file should be conf.content.dest") conf.file = self.macro_expand(conf.file, allvars) if conf.content.get('source'): conf.content.source = self.macro_expand(conf.content.source, allvars) return conf
def start_agent(self, cfgin=True): """ CLI interface to start 12-factor service """ default_conf = { "threads": { "result": { "number": 0, "function": None }, "worker": { "number": 0, "function": None }, }, "interval": { "refresh": 900, "heartbeat": 300, "reporting": 300, "test": 60 }, "heartbeat-hook": False } indata = {} if cfgin: indata = json.load(sys.stdin) elif os.environ.get("REFLEX_MONITOR_CONFIG"): indata = os.environ.get("REFLEX_MONITOR_CONFIG") if indata[0] != "{": indata = base64.b64decode(indata) else: self.NOTIFY("Using default configuration") conf = dictlib.union(default_conf, indata) conf['threads']['result']['function'] = self.handler_thread conf['threads']['worker']['function'] = self.worker_thread self.NOTIFY("Starting monitor Agent") try: self.configure(conf).start() except KeyboardInterrupt: self.thread_stopper.set() if self.refresh_stopper: self.refresh_stopper.set() if self.heartbeat_stopper: self.heartbeat_stopper.set() if self.reporting_stopper: self.reporting_stopper.set()
def rest_update(self, *args, **kwargs): """ update """ attrs = self.abac_gather() if not attrs.token_nbr: # check policy instead self.auth_fail("Unauthorized") if not args: return self.respond({"status": "failed"}, status=404) body = get_json_body() target = args[0] # hostnames can be all digits if target and re.sub('[^\\d]', '', target) == target: body['id'] = int(target) else: body['name'] = target # record where we saw them come in from if not body.get('address'): body['address'] = {} body['address']['ping-from-ip'] = attrs['ip'] obj = self.obj(master=self.server.dbm, reqid=self.reqid) try: # merge the new info with the current object data = obj.get(target, attrs).dump() except dbo.ObjectNotFound as err: data = obj.skeleton() try: obj.load(dictlib.union(data, body)) warnings = obj.update(attrs) except dbo.ObjectNotFound as err: self.respond_failure({ "status": "failed", "message": str(err) }, status=404) except dbo.InvalidParameter as err: self.respond_failure({ "status": "failed", "message": str(err) }, status=400) except dbo.NoChanges as err: self.respond_failure({ "status": "unknown", "message": str(err) }, status=202) if warnings: return self.respond( { "status": "updated", "warning": "; ".join(warnings) }, status=201) return self.respond({"status": "updated"}, status=201)
def dmerge_inner(conf, new, key): if key in new and new[key]: vals_are_same(key, new, conf) conf[key] = dictlib.union(new[key], conf[key])
def start(self, test=True): """ Startup script for webhook routing. Called from agent start """ cherrypy.log = logger.CherryLog() cherrypy.config.update({ 'log.screen': False, 'log.access_file': '', 'log.error_file': '' }) cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files) logging.config.dictConfig({ 'version': 1, 'formatters': { 'custom': { '()': 'server.logger.Logger' } }, 'handlers': { 'console': { 'level':'INFO', 'class': 'server.logger.Logger', 'formatter': 'custom', 'stream': 'ext://sys.stdout' } }, 'loggers': { '': { 'handlers': ['console'], 'level': 'INFO' }, 'cherrypy.access': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, 'cherrypy.error': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, } }) # lots of legacy stuff here which isn't used defaults = { 'deploy_ver': 0, # usable for deployment tools 'server': { 'route_base': '/api/v1', 'port': 64000, 'host': '0.0.0.0' }, 'heartbeat': 10, 'status_report': 3600, # every hour 'requestid': True, 'refresh_maps': 300, 'cache': { 'housekeeper': 60, 'policies': 300, 'sessions': 300, 'groups': 300 }, 'auth': { 'expires': 300 } } cfgin = None # try docker secrets if os.path.exists("/run/secrets/SERVER_CONFIG"): with open("/run/secrets/SERVER_CONFIG") as infile: cfgin = infile.read() # try environ if not cfgin: cfgin = os.environ.get('SERVER_CONFIG') if cfgin: try: cfgin = json2data(base64.b64decode(cfgin)) except: # pylint: disable=bare-except try: cfgin = json2data(cfgin) except Exception as err: # pylint: disable=broad-except traceback.print_exc() logger.abort("Cannot process SERVER_CONFIG: " + str(err) + " from " + cfgin) conf = Dict(dictlib.union(defaults, cfgin)) else: logger.log("Unable to find configuration, using defaults!") conf = Dict(defaults) # cherry py global cherry_conf = { 'server.socket_port': 64000, 'server.socket_host': '0.0.0.0' } if dictlib.dig_get(conf, 'server.port'): # .get('port'): cherry_conf['server.socket_port'] = int(conf.server.port) if dictlib.dig_get(conf, 'server.host'): # .get('host'): cherry_conf['server.socket_host'] = conf.server.host # if production mode if test: logger.log("Test mode enabled", type="notice") conf['test_mode'] = True else: cherry_conf['environment'] = 'production' conf['test_mode'] = False sys.stdout.flush() cherrypy.config.update(cherry_conf) cherrypy.config.update({'engine.autoreload.on': False}) self.conf = conf sys.path.append('.') # # eventually # for mod in self.endpoint_names: # self.add_endpoint(mod) # hack for now # from . import polyform as polyform from server.endpoints import polyform self.add_endpoint('polyform', polyform) # startup cleaning interval def housekeeper(server): for endpoint in server.endpoints: try: endpoint.handler.housekeeper(server) except: # pylint: disable=bare-except traceback.print_exc() timeinterval.start(conf.auth.expires * 1000, housekeeper, self) # mount routes cherrypy.tree.mount(http.Health(server=self), conf.server.route_base + "/health", self.endpoint_conf) int_mon = cherrypy.process.plugins.Monitor(cherrypy.engine, self.monitor, frequency=conf.heartbeat/2) int_mon.start() # whew, now start the server logger.log("Base path={}".format(conf.server.route_base), type="notice") cherrypy.engine.start() cherrypy.engine.block()
def start(self, test=True): """ Startup script for webhook routing. Called from agent start """ cherrypy.log = CherryLog() cherrypy.config.update({ 'log.screen': False, 'log.access_file': '', 'log.error_file': '' }) cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files) logging.config.dictConfig({ 'version': 1, 'formatters': { 'custom': { '()': 'rfxengine.server.cherry.Logger' } }, 'handlers': { 'console': { 'level':'INFO', 'class':'rfxengine.server.cherry.Logger', #logging.StreamHandler', 'formatter': 'custom', 'stream': 'ext://sys.stdout' } }, 'loggers': { '': { 'handlers': ['console'], 'level': 'INFO' }, 'cherrypy.access': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, 'cherrypy.error': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, } }) defaults = { 'deploy_ver': 0, # usable for deployment tools 'server': { 'route_base': '/api/v1', 'port': 54000, 'host': '0.0.0.0' }, 'heartbeat': 10, 'status_report': 3600, # every hour 'requestid': False, 'refresh_maps': 300, 'cache': { 'housekeeper': 60, 'policies': 300, 'sessions': 300, 'groups': 300 }, 'crypto': { # pylint: disable=bad-continuation # '000': { # dd if=/dev... # 'key': "", # 'default': True, # } }, 'db': { 'database': 'reflex_engine', 'user': '******' }, 'auth': { 'expires': 300 } } cfgin = None # try docker secrets if os.path.exists("/run/secrets/REFLEX_ENGINE_CONFIG"): with open("/run/secrets/REFLEX_ENGINE_CONFIG") as infile: cfgin = infile.read() # try environ if not cfgin: cfgin = os.environ.get('REFLEX_ENGINE_CONFIG') if cfgin: try: cfgin = json2data(base64.b64decode(cfgin)) except: # pylint: disable=bare-except try: cfgin = json2data(cfgin) except Exception as err: # pylint: disable=broad-except traceback.print_exc() self.ABORT("Cannot process REFLEX_ENGINE_CONFIG: " + str(err) + " from " + cfgin) conf = dictlib.Obj(dictlib.union(defaults, cfgin)) else: self.NOTIFY("Unable to find configuration, using defaults!") conf = dictlib.Obj(defaults) # cherry py global cherry_conf = { 'server.socket_port': 9000, 'server.socket_host': '0.0.0.0' } if dictlib.dig_get(conf, 'server.port'): # .get('port'): cherry_conf['server.socket_port'] = int(conf.server.port) if dictlib.dig_get(conf, 'server.host'): # .get('host'): cherry_conf['server.socket_host'] = conf.server.host # if production mode if test: log("Test mode enabled", type="notice") conf['test_mode'] = True else: cherry_conf['environment'] = 'production' conf['test_mode'] = False # db connection self.dbm = mxsql.Master(config=conf.db, base=self, crypto=conf.get('crypto')) # configure the cache self.dbm.cache = rfxengine.memstate.Cache(**conf.cache.__export__()) self.dbm.cache.start_housekeeper(conf.cache.housekeeper) # schema schema = dbo.Schema(master=self.dbm) schema.initialize(verbose=False, reset=False) sys.stdout.flush() cherrypy.config.update(cherry_conf) endpoint_conf = { '/': { 'response.headers.server': "stack", 'tools.secureheaders.on': True, 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'request.method_with_bodies': ('PUT', 'POST', 'PATCH'), } } cherrypy.config.update({'engine.autoreload.on': False}) self.conf = conf # startup cleaning interval def clean_keys(dbm): """periodically called to purge expired auth keys from db""" dbo.AuthSession(master=dbm).clean_keys() timeinterval.start(conf.auth.expires * 1000, clean_keys, self.dbm) # recheck policymaps every so often def check_policymaps(dbm): """ periodically remap policy maps, incase somebody was fidgeting where they shoudln't be """ dbo.Policyscope(master=dbm).remap_all() timeinterval.start(conf.refresh_maps * 1000, check_policymaps, self.dbm) # mount routes cherrypy.tree.mount(endpoints.Health(conf, server=self), conf.server.route_base + "/health", endpoint_conf) cherrypy.tree.mount(endpoints.Token(conf, server=self), conf.server.route_base + "/token", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="config"), conf.server.route_base + "/config", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="service"), conf.server.route_base + "/service", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="pipeline"), conf.server.route_base + "/pipeline", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="instance"), conf.server.route_base + "/instance", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="build"), conf.server.route_base + "/build", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="group"), conf.server.route_base + "/group", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="apikey"), conf.server.route_base + "/apikey", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="policy"), conf.server.route_base + "/policy", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="policyscope"), conf.server.route_base + "/policyscope", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="state"), conf.server.route_base + "/state", endpoint_conf) cherrypy.tree.mount(endpoints.InstancePing(conf, server=self), conf.server.route_base + "/instance-ping", endpoint_conf) # cherrypy.tree.mount(endpoints.Compose(conf, server=self), # conf.server.route_base + "/compose", # endpoint_conf) # setup our heartbeat monitor int_mon = cherrypy.process.plugins.Monitor(cherrypy.engine, self.monitor, frequency=conf.heartbeat/2) int_mon.start() log("Base path={}".format(conf.server.route_base), type="notice") cherrypy.engine.start() cherrypy.engine.block()