def aconf(app): configs = glob.glob("%s-*" % app.config_dir_prefix) if configs: configs.sort(key=lambda x: int(x.split("-")[-1])) latest = configs[-1] else: latest = app.config_dir_prefix aconf = Config(latest) # How long since the last Scout update? If it's been more than an hour, # check Scout again. now = datetime.datetime.now() if (now - last_scout_update) > datetime.timedelta(hours=1): uptime = now - boot_time hr_uptime = td_format(uptime) result = Config.scout_report(mode="diagd", runtime=Config.runtime, uptime=int(uptime.total_seconds()), hr_uptime=hr_uptime) app.logger.debug("Scout reports %s" % json.dumps(result)) return aconf
def generate_config(self, output): if os.path.exists(output): shutil.rmtree(output) os.makedirs(output) for filename, config in self.configs.items(): path = os.path.join(output, filename) with open(path, "w") as fd: fd.write(config) print ("Wrote %s to %s" % (filename, path)) aconf = Config(output) rc = aconf.generate_envoy_config(mode="kubewatch") print("Scout reports %s" % json.dumps(rc.scout_result)) if rc: envoy_config = "%s-%s" % (output, "envoy.json") aconf.pretty(rc.envoy_config, out=open(envoy_config, "w")) try: result = subprocess.check_output(["/usr/local/bin/envoy", "--base-id", "1", "--mode", "validate", "-c", envoy_config]) if result.strip().endswith(b" OK"): print ("Configuration %s valid" % envoy_config) return envoy_config except subprocess.CalledProcessError: print ("Invalid envoy config") with open(envoy_config) as fd: print(fd.read()) else: print("Could not generate new Envoy configuration: %s" % rc.error) print("Raw template output:") print("%s" % rc.raw) raise ValueError("Unable to generate config")
def aconf(app): configs = glob.glob("%s-*" % app.config_dir_prefix) # # Test crap # configs.append("%s-87-envoy.json" % app.config_dir_prefix) if configs: keyfunc = lambda x: x.split("-")[-1] key_match = lambda x: re.match('^\d+$', keyfunc(x)) key_as_int = lambda x: int(keyfunc(x)) configs = sorted(filter(key_match, configs), key=key_as_int) latest = configs[-1] else: latest = app.config_dir_prefix aconf = Config(latest) uptime = datetime.datetime.now() - boot_time hr_uptime = td_format(uptime) result = Config.scout_report(mode="diagd", runtime=Config.runtime, uptime=int(uptime.total_seconds()), hr_uptime=hr_uptime) app.logger.info("Scout reports %s" % json.dumps(result)) return aconf
def generate_config(self, output): if os.path.exists(output): shutil.rmtree(output) os.makedirs(output) for filename, config in self.configs.items(): path = os.path.join(output, filename) with open(path, "w") as fd: fd.write(config) logger.debug("Wrote %s to %s" % (filename, path)) changes = self.changes() plural = "" if (changes == 1) else "s" logger.info("generating config with gencount %d (%d change%s)" % (self.restart_count, changes, plural)) aconf = Config(output) rc = aconf.generate_envoy_config(mode="kubewatch", generation_count=self.restart_count) logger.info("Scout reports %s" % json.dumps(rc.scout_result)) if rc: envoy_config = "%s-%s" % (output, "envoy.json") aconf.pretty(rc.envoy_config, out=open(envoy_config, "w")) try: result = subprocess.check_output(["/usr/local/bin/envoy", "--base-id", "1", "--mode", "validate", "-c", envoy_config]) if result.strip().endswith(b" OK"): logger.debug("Configuration %s valid" % envoy_config) return envoy_config except subprocess.CalledProcessError: logger.info("Invalid envoy config") with open(envoy_config) as fd: logger.info(fd.read()) else: logger.info("Could not generate new Envoy configuration: %s" % rc.error) logger.info("Raw template output:") logger.info("%s" % rc.raw) raise ValueError("Unable to generate config")
def aconf(app): configs = glob.glob("%s-*" % app.config_dir_prefix) if configs: configs.sort(key=lambda x: int(x.split("-")[-1])) latest = configs[-1] else: latest = app.config_dir_prefix aconf = Config(latest) uptime = datetime.datetime.now() - boot_time hr_uptime = td_format(uptime) result = Config.scout_report(mode="diagd", runtime=Config.runtime, uptime=int(uptime.total_seconds()), hr_uptime=hr_uptime) app.logger.info("Scout reports %s" % json.dumps(result)) return aconf
def diag_paranoia(configdir, outputdir): aconf = Config(configdir) ov = aconf.diagnostic_overview() reconstituted = {} errors = [] warnings = [] missing_uniqifiers = {} source_info = [ { "filename": x['filename'], "sources": [ key for key in x['objects'].keys() ] } for x in ov['sources'] ] source_info.insert(0, { "filename": "--internal--", "sources": [ "--internal--" ] }) for si in source_info: source_filename = si['filename'] for source_key in si['sources']: intermediate = aconf.get_intermediate_for(source_key) # print("==== %s" % source_key) # print(prettify(intermediate)) for key in intermediate.keys(): if key == 'clusters': rclusters = reconstituted.setdefault('clusters', {}) for cluster in intermediate[key]: cname = cluster['name'] csource = cluster['_source'] if cname not in rclusters: rclusters[cname] = dict(**cluster) rclusters[cname]['_referenced_by'] = [ source_key ] # print("%s: new cluster %s" % (source_key, prettify(rclusters[cname]))) else: rcluster = rclusters[cname] # print("%s: extant cluster %s" % (source_key, prettify(rclusters[cname]))) if not mark_referenced_by(rcluster, source_key) and (source_key != "--internal--"): errors.append('%s: already appears in cluster %s?' % (source_key, rcluster['name'])) for ckey in sorted(cluster.keys()): if ckey == '_referenced_by': continue if cluster[ckey] != rcluster[ckey]: errors.append("%s: cluster %s doesn't match %s for %s" % (source_key, cname, rcluster['_source'], ckey)) for rkey in sorted(rcluster.keys()): if rkey not in cluster: errors.append('%s: cluster %s is missing key %s from source %s' % (source_key, cname, rkey, rcluster['_source'])) else: # Other things are a touch more straightforward, just need to work out a unique # key for them. uniqifier = Uniqifiers.get(key, None) if not uniqifier: if not key in missing_uniqifiers: warnings.append("missing uniqifier for %s" % key) missing_uniqifiers[key] = True continue for obj in intermediate[key]: # print(obj) u = uniqifier(obj) rcon = reconstituted.setdefault(key, {}) if u in rcon: if obj['_source'] != rcon[u]['_source']: errors.append('%s: %s %s already defined by %s' % (source_key, key, u, prettify(rcon[u]))) else: mark_referenced_by(rcon[u], obj['_source']) else: rcon[u] = obj if '_referenced_by' in rcon[u]: rcon[u]['_referenced_by'].sort() # OK. After all that, flip the dictionaries in reconstituted back into lists... reconstituted_lists = {} for key in reconstituted: if key == 'sources': # Special work here: reassemble source files from objects. sources = {} for source_key, obj in reconstituted['sources'].items(): # print(obj) s = sources.setdefault(obj['filename'], { 'count': 0, 'error_count': 0, 'filename': obj['filename'], 'objects': {} }) s['count'] += 1 s['objects'][source_key] = { 'errors': obj['errors'], 'key': source_key, 'kind': obj['kind'] } s['error_count'] += len(obj['errors']) for s in sources.values(): s['error_plural'] = "error" if (s['error_count'] == 1) else "errors" s['plural'] = "object" if (s['count'] == 1) else "objects" # Finally, sort 'em all. reconstituted_lists['sources'] = sorted(sources.values(), key=lambda x: x['filename']) else: # Not the list of sources. Grab the uniqifier... uniqifier = Uniqifiers.get(key, lambda x: x.get('name', None)) reconstituted_lists[key] = sorted(reconstituted[key].values(), key=uniqifier) # # If there's no listener block in the reconstituted set, that implies that # # the configuration doesn't override the listener state. Go ahead and add the # # default in. # l = reconstituted_lists.get('listeners', []) # if not l: # reconstituted_lists['listeners'] = [] # If there're no 'filters' in the reconstituted set, uh, there were no filters # defined. Create an empty list. if 'filters' not in reconstituted_lists: reconstituted_lists['filters'] = [] # OK. Next, filter out the '--internal--' stuff from our overview, and sort # _referenced_by. filtered = filtered_overview(ov) pretty_filtered_overview = prettify(filtered) pretty_reconstituted_lists = prettify(reconstituted_lists) udiff = list(difflib.unified_diff(pretty_filtered_overview.split("\n"), pretty_reconstituted_lists.split("\n"), fromfile="from overview", tofile="from intermediates", lineterm="")) if udiff: errors.append("%s\n-- DIFF --\n%s\n" % ("mismatch between overview and reconstituted diagnostics", "\n".join(udiff))) return { 'errors': errors, 'warnings': warnings, 'overview': pretty_filtered_overview, 'reconstituted': pretty_reconstituted_lists }
def diag_paranoia(configdir, outputdir): aconf = Config(configdir) ov = aconf.diagnostic_overview() reconstituted = {} errors = [] warnings = [] missing_uniqifiers = {} source_info = [ { "filename": x['filename'], "sources": [ key for key in x['objects'].keys() ] } for x in ov['sources'] ] source_info.insert(0, { "filename": "--internal--", "sources": [ "--internal--" ] }) for si in source_info: source_filename = si['filename'] for source_key in si['sources']: intermediate = aconf.get_intermediate_for(source_key) # print("==== %s" % source_key) # print(prettify(intermediate)) for key in intermediate.keys(): if key == 'clusters': rclusters = reconstituted.setdefault('clusters', {}) for cluster in intermediate[key]: cname = cluster['name'] csource = cluster['_source'] if cname not in rclusters: rclusters[cname] = dict(**cluster) rclusters[cname]['_referenced_by'] = [ source_key ] # print("%s: new cluster %s" % (source_key, prettify(rclusters[cname]))) else: rcluster = rclusters[cname] # print("%s: extant cluster %s" % (source_key, prettify(rclusters[cname]))) if not mark_referenced_by(rcluster, source_key) and (source_key != "--internal--"): errors.append('%s: already appears in cluster %s?' % (source_key, rcluster['name'])) for ckey in sorted(cluster.keys()): if ckey == '_referenced_by': continue if cluster[ckey] != rcluster[ckey]: errors.append("%s: cluster %s doesn't match %s for %s" % (source_key, cname, rcluster['_source'], ckey)) for rkey in sorted(rcluster.keys()): if rkey not in cluster: errors.append('%s: cluster %s is missing key %s from source %s' % (source_key, cname, rkey, rcluster['_source'])) else: # Other things are a touch more straightforward, just need to work out a unique # key for them. uniqifier = Uniqifiers.get(key, None) if not uniqifier: if not key in missing_uniqifiers: warnings.append("missing uniqifier for %s" % key) missing_uniqifiers[key] = True continue for obj in intermediate[key]: # print(obj) u = uniqifier(obj) rcon = reconstituted.setdefault(key, {}) if u in rcon: if obj['_source'] != rcon[u]['_source']: errors.append('%s: %s %s already defined by %s' % (source_key, key, u, prettify(rcon[u]))) else: mark_referenced_by(rcon[u], obj['_source']) else: rcon[u] = obj if '_referenced_by' in rcon[u]: rcon[u]['_referenced_by'].sort() # OK. After all that, flip the dictionaries in reconstituted back into lists... reconstituted_lists = {} for key in reconstituted: if key == 'sources': # Special work here: reassemble source files from objects. sources = {} for source_key, obj in reconstituted['sources'].items(): # print(obj) s = sources.setdefault(obj['filename'], { 'count': 0, 'error_count': 0, 'filename': obj['filename'], 'objects': {} }) s['count'] += 1 s['objects'][source_key] = { 'errors': obj['errors'], 'key': source_key, 'kind': obj['kind'] } s['error_count'] += len(obj['errors']) for s in sources.values(): s['error_plural'] = "error" if (s['error_count'] == 1) else "errors" s['plural'] = "object" if (s['count'] == 1) else "objects" # Finally, sort 'em all. reconstituted_lists['sources'] = sorted(sources.values(), key=lambda x: x['filename']) else: # Not the list of sources. Grab the uniqifier... uniqifier = Uniqifiers.get(key, lambda x: x.get('name', None)) reconstituted_lists[key] = sorted(reconstituted[key].values(), key=uniqifier) # # If there's no listener block in the reconstituted set, that implies that # # the configuration doesn't override the listener state. Go ahead and add the # # default in. # l = reconstituted_lists.get('listeners', []) # if not l: # reconstituted_lists['listeners'] = [] # If there're no 'filters' in the reconstituted set, uh, there were no filters # defined. Create an empty list. if 'filters' not in reconstituted_lists: reconstituted_lists['filters'] = [] # Copy any 'extauth' block from the original into the reconstituted list. if ('extauth' in ov) and ('extauth' not in reconstituted_lists): reconstituted_lists['extauth'] = [ ov['extauth'] ] # OK. Next, filter out the '--internal--' stuff from our overview, and sort # _referenced_by. filtered = filtered_overview(ov) pretty_filtered_overview = prettify(filtered) pretty_reconstituted_lists = prettify(reconstituted_lists) udiff = list(difflib.unified_diff(pretty_filtered_overview.split("\n"), pretty_reconstituted_lists.split("\n"), fromfile="from overview", tofile="from reconstituted", lineterm="")) if udiff: errors.append("%s\n-- DIFF --\n%s\n" % ("mismatch between overview and reconstituted diagnostics", "\n".join(udiff))) return { 'errors': errors, 'warnings': warnings, 'overview': pretty_filtered_overview, 'reconstituted': pretty_reconstituted_lists }