Пример #1
0
    def load(self, manifest: KubeList) -> WattDict:
        collected: Dict[str, Dict[str, KubeResource]] = {}
        watt_k8s: WattDict = {}

        self.logger.info("LOADING:")

        for spec in self.watch_specs.values():
            self.logger.debug(f"{repr(spec)}")

        for obj in manifest:
            metadata = obj.get('metadata') or {}
            name = metadata.get('name')

            if not name:
                self.logger.debug(f"skipping unnamed object {obj}")
                continue

            # self.logger.debug(f"consider {obj}")

            for w in self.watch_specs.values():
                m = w.match(obj)

                if m:
                    by_type = collected.setdefault(m.kind, {})

                    # If we already have this object's name in the collection,
                    # this is a duplicate find.
                    if name not in by_type:
                        by_type[name] = obj

        # Once that's all done, flatten everything.
        for kind in collected.keys():
            watt_k8s[kind] = list(collected[kind].values())

        self.snapshot = dump_json({
            'Consul': {},
            'Kubernetes': watt_k8s
        },
                                  pretty=True)

        return watt_k8s
Пример #2
0
def main(k8s_yaml_paths: List[str],
         debug: bool,
         force_pod_labels: bool,
         update: bool,
         source: List[str],
         labels: List[str],
         namespace: Optional[str],
         watch: str,
         include_ir: bool,
         include_aconf: bool,
         diff_path: Optional[str] = None,
         kat_name: Optional[str] = None) -> None:
    loglevel = logging.DEBUG if debug else logging.INFO

    logging.basicConfig(
        level=loglevel,
        format="%(asctime)s mockery %(levelname)s: %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S")

    logger = logging.getLogger('mockery')

    logger.debug(f"reading from {k8s_yaml_paths}")

    if not source:
        source = [
            "Host", "service", "ingresses", "AuthService", "LogService",
            "Mapping", "Module", "RateLimitService", "TCPMapping",
            "TLSContext", "TracingService", "ConsulResolver",
            "KubernetesEndpointResolver", "KubernetesServiceResolver"
        ]

    if namespace:
        os.environ['AMBASSADOR_NAMESPACE'] = namespace

    # Make labels a list, instead of a tuple.
    labels = list(labels)
    labels_to_force = {l: True for l in labels or []}

    if kat_name:
        logger.debug(f"KAT name {kat_name}")

        # First set up some labels to force.

        labels_to_force["scope=AmbassadorTest"] = True
        labels_to_force[f"service={kat_name}"] = True

        kat_amb_id_label = f"kat-ambassador-id={kat_name}"

        if kat_amb_id_label not in labels_to_force:
            labels_to_force[kat_amb_id_label] = True
            labels.append(kat_amb_id_label)

        os.environ['AMBASSADOR_ID'] = kat_name

        # Forcibly override the cached ambassador_id.
        Config.ambassador_id = kat_name

    logger.debug(f"namespace {namespace or '*'}")
    logger.debug(f"labels to watch {', '.join(labels)}")
    logger.debug(
        f"labels to force {', '.join(sorted(labels_to_force.keys()))}")
    logger.debug(f"watch hook {watch}")
    logger.debug(f"sources {', '.join(source)}")

    for key in sorted(os.environ.keys()):
        if key.startswith('AMBASSADOR'):
            logger.debug(f"${key}={os.environ[key]}")

    if force_pod_labels:
        try:
            os.makedirs("/tmp/ambassador-pod-info")
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        with open("/tmp/ambassador-pod-info/labels", "w",
                  encoding="utf-8") as outfile:
            for l in labels_to_force:
                outfile.write(l)
                outfile.write("\n")

    # Pull in the YAML.
    input_yaml = ''.join([open(x, "r").read() for x in k8s_yaml_paths])
    manifest = parse_yaml(input_yaml)

    w = Mockery(logger, debug, source, ",".join(labels), namespace, watch)

    iteration = 0

    while True:
        iteration += 1

        if iteration > 10:
            print(f"!!!! Not stable after 10 iterations, failing")
            logger.error("Not stable after 10 iterations, failing")
            sys.exit(1)

        logger.info(f"======== START ITERATION {iteration}")

        w.load(manifest)

        logger.info(f"WATT_K8S: {w.snapshot}")

        hook_ok, any_changes = w.run_hook()

        if not hook_ok:
            raise Exception("hook failed")

        if any_changes:
            logger.info(
                f"======== END ITERATION {iteration}: watches changed!")
        else:
            logger.info(f"======== END ITERATION {iteration}: stable!")
            break

    # Once here, we should be good to go.
    try:
        os.makedirs("/tmp/ambassador/snapshots")
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    scc = MockSecretHandler(logger, "mockery", "/tmp/ambassador/snapshots",
                            f"v{iteration}")

    aconf = Config()

    logger.debug(f"Config.ambassador_id {Config.ambassador_id}")
    logger.debug(f"Config.ambassador_namespace {Config.ambassador_namespace}")

    logger.info(f"STABLE WATT_K8S: {w.snapshot}")

    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_watt(w.snapshot)
    aconf.load_all(fetcher.sorted())

    open("/tmp/ambassador/snapshots/aconf.json", "w",
         encoding="utf-8").write(aconf.as_json())

    ir = IR(aconf, secret_handler=scc)

    open("/tmp/ambassador/snapshots/ir.json", "w",
         encoding="utf-8").write(ir.as_json())

    econf = EnvoyConfig.generate(ir, "V2")
    bootstrap_config, ads_config, clustermap = econf.split_config()

    ads_config.pop('@type', None)
    with open("/tmp/ambassador/snapshots/econf.json", "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(ads_config, pretty=True))

    with open("/tmp/ambassador/snapshots/bootstrap.json",
              "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(bootstrap_config, pretty=True))

    diag = Diagnostics(ir, econf)

    with open("/tmp/ambassador/snapshots/diag.json", "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(diag.as_dict(), pretty=True))

    if diff_path:
        diffs = False

        pairs_to_check = [(os.path.join(diff_path, 'snapshots', 'econf.json'),
                           '/tmp/ambassador/snapshots/econf.json'),
                          (os.path.join(diff_path, 'bootstrap-ads.json'),
                           '/tmp/ambassador/snapshots/bootstrap.json')]

        if include_ir:
            pairs_to_check.append(
                (os.path.join(diff_path, 'snapshots',
                              'ir.json'), '/tmp/ambassador/snapshots/ir.json'))

        if include_aconf:
            pairs_to_check.append((os.path.join(diff_path, 'snapshots',
                                                'aconf.json'),
                                   '/tmp/ambassador/snapshots/aconf.json'))

        for gold_path, check_path in pairs_to_check:
            if update:
                logger.info(f"mv {check_path} {gold_path}")
                shutil.move(check_path, gold_path)
            elif not filecmp.cmp(gold_path, check_path):
                diffs = True

                gold_lines = open(gold_path, "r", encoding="utf-8").readlines()
                check_lines = open(check_path, "r",
                                   encoding="utf-8").readlines()

                for line in difflib.unified_diff(gold_lines,
                                                 check_lines,
                                                 fromfile=gold_path,
                                                 tofile=check_path):
                    sys.stdout.write(line)

        if diffs:
            sys.exit(1)
Пример #3
0
def dump(config_dir_path: Parameter.REQUIRED,
         *,
         secret_dir_path=None,
         watt=False,
         debug=False,
         debug_scout=False,
         k8s=False,
         recurse=False,
         stats=False,
         nopretty=False,
         everything=False,
         aconf=False,
         ir=False,
         v2=False,
         v3=False,
         diag=False,
         features=False,
         profile=False):
    """
    Dump various forms of an Ambassador configuration for debugging

    Use --aconf, --ir, and --envoy to control what gets dumped. If none are requested, the IR
    will be dumped.

    :param config_dir_path: Configuration directory to scan for Ambassador YAML files
    :param secret_dir_path: Directory into which to save secrets
    :param watt: If set, input must be a WATT snapshot
    :param debug: If set, generate debugging output
    :param debug_scout: If set, generate debugging output
    :param k8s: If set, assume configuration files are annotated K8s manifests
    :param recurse: If set, recurse into directories below config_dir_path
    :param stats: If set, dump statistics to stderr
    :param nopretty: If set, do not pretty print the dumped JSON
    :param aconf: If set, dump the Ambassador config
    :param ir: If set, dump the IR
    :param v2: If set, dump the Envoy V2 config
    :param v3: If set, dump the Envoy V3 config
    :param diag: If set, dump the Diagnostics overview
    :param everything: If set, dump everything
    :param features: If set, dump the feature set
    :param profile: If set, profile with the cProfile module
    """

    if not secret_dir_path:
        secret_dir_path = "/tmp/cli-secrets"

        if not os.path.isdir(secret_dir_path):
            secret_dir_path = os.path.dirname(secret_dir_path)

    if debug:
        logger.setLevel(logging.DEBUG)

    if debug_scout:
        logging.getLogger('ambassador.scout').setLevel(logging.DEBUG)

    if everything:
        aconf = True
        ir = True
        v2 = True
        v3 = True
        diag = True
        features = True
    elif not (aconf or ir or v2 or v3 or diag or features):
        aconf = True
        ir = True
        v2 = True
        v3 = False
        diag = False
        features = False

    dump_aconf = aconf
    dump_ir = ir
    dump_v2 = v2
    dump_v3 = v3
    dump_diag = diag
    dump_features = features

    od = {}
    diagconfig: Optional[EnvoyConfig] = None

    _profile: Optional[cProfile.Profile] = None
    _rc = 0

    if profile:
        _profile = cProfile.Profile()
        _profile.enable()

    try:
        total_timer = Timer("total")
        total_timer.start()

        fetch_timer = Timer("fetch resources")
        with fetch_timer:
            aconf = Config()

            fetcher = ResourceFetcher(logger, aconf)

            if watt:
                fetcher.parse_watt(open(config_dir_path, "r").read())
            else:
                fetcher.load_from_filesystem(config_dir_path,
                                             k8s=k8s,
                                             recurse=True)

        load_timer = Timer("load fetched resources")
        with load_timer:
            aconf.load_all(fetcher.sorted())

        # aconf.post_error("Error from string, boo yah")
        # aconf.post_error(RichStatus.fromError("Error from RichStatus"))

        irgen_timer = Timer("ir generation")
        with irgen_timer:
            secret_handler = NullSecretHandler(logger, config_dir_path,
                                               secret_dir_path, "0")

            ir = IR(aconf,
                    file_checker=file_checker,
                    secret_handler=secret_handler)

        aconf_timer = Timer("aconf")
        with aconf_timer:
            if dump_aconf:
                od['aconf'] = aconf.as_dict()

        ir_timer = Timer("ir")
        with ir_timer:
            if dump_ir:
                od['ir'] = ir.as_dict()

        v2_timer = Timer("v2")
        with v2_timer:
            if dump_v2:
                v2config = V2Config(ir)
                diagconfig = v2config
                od['v2'] = v2config.as_dict()
        v3_timer = Timer("v3")
        with v3_timer:
            if dump_v3:
                v3config = V3Config(ir)
                diagconfig = v3config
                od['v3'] = v3config.as_dict()
        diag_timer = Timer("diag")
        with diag_timer:
            if dump_diag:
                if not diagconfig:
                    diagconfig = V2Config(ir)
                    diagconfigv3 = V3Config(ir)
                econf = typecast(EnvoyConfig, diagconfig)
                econfv3 = typecast(EnvoyConfig, diagconfigv3)
                diag = Diagnostics(ir, econf)
                diagv3 = Diagnostics(ir, econfv3)
                od['diag'] = diag.as_dict()
                od['elements'] = econf.elements
                od['diagv3'] = diagv3.as_dict()
                od['elementsv3'] = econfv3.elements

        features_timer = Timer("features")
        with features_timer:
            if dump_features:
                od['features'] = ir.features()

        # scout = Scout()
        # scout_args = {}
        #
        # if ir and not os.environ.get("AMBASSADOR_DISABLE_FEATURES", None):
        #     scout_args["features"] = ir.features()
        #
        # result = scout.report(action="dump", mode="cli", **scout_args)
        # show_notices(result)

        dump_timer = Timer("dump JSON")

        with dump_timer:
            js = dump_json(od, pretty=not nopretty)
            jslen = len(js)

        write_timer = Timer("write JSON")
        with write_timer:
            sys.stdout.write(js)
            sys.stdout.write("\n")

        total_timer.stop()

        route_count = 0
        vhost_count = 0
        filter_chain_count = 0
        filter_count = 0
        apiversion = 'v2' if v2 else 'v3'
        if apiversion in od:
            for listener in od[apiversion]['static_resources']['listeners']:
                for fc in listener['filter_chains']:
                    filter_chain_count += 1
                    for f in fc['filters']:
                        filter_count += 1
                        for vh in f['typed_config']['route_config'][
                                'virtual_hosts']:
                            vhost_count += 1
                            route_count += len(vh['routes'])

        if stats:
            sys.stderr.write("STATS:\n")
            sys.stderr.write("  config bytes:  %d\n" % jslen)
            sys.stderr.write("  vhosts:        %d\n" % vhost_count)
            sys.stderr.write("  filter chains: %d\n" % filter_chain_count)
            sys.stderr.write("  filters:       %d\n" % filter_count)
            sys.stderr.write("  routes:        %d\n" % route_count)
            sys.stderr.write("  routes/vhosts: %.3f\n" %
                             float(float(route_count) / float(vhost_count)))
            sys.stderr.write("TIMERS:\n")
            sys.stderr.write("  fetch resources:  %.3fs\n" %
                             fetch_timer.average)
            sys.stderr.write("  load resources:   %.3fs\n" %
                             load_timer.average)
            sys.stderr.write("  ir generation:    %.3fs\n" %
                             irgen_timer.average)
            sys.stderr.write("  aconf:            %.3fs\n" %
                             aconf_timer.average)
            sys.stderr.write("  envoy v2:         %.3fs\n" % v2_timer.average)
            sys.stderr.write("  diag:             %.3fs\n" %
                             diag_timer.average)
            sys.stderr.write("  features:         %.3fs\n" %
                             features_timer.average)
            sys.stderr.write("  dump json:        %.3fs\n" %
                             dump_timer.average)
            sys.stderr.write("  write json:       %.3fs\n" %
                             write_timer.average)
            sys.stderr.write("  ----------------------\n")
            sys.stderr.write("  total: %.3fs\n" % total_timer.average)
    except Exception as e:
        handle_exception("EXCEPTION from dump",
                         e,
                         config_dir_path=config_dir_path)
        _rc = 1

    if _profile:
        _profile.disable()
        _profile.dump_stats("ambassador.profile")

    sys.exit(_rc)
Пример #4
0
    def load_yaml(self, yaml_stream):
        self.aconf = Config()

        fetcher = ResourceFetcher(self.logger, self.aconf, watch_only=True)
        fetcher.parse_watt(yaml_stream.read())

        self.aconf.load_all(fetcher.sorted())

        # We can lift mappings straight from the aconf...
        mappings = self.aconf.get_config('mappings') or {}

        # ...but we need the fake IR to deal with resolvers and TLS contexts.
        self.fake = FakeIR(self.aconf, logger=self.logger)

        self.logger.debug("IR: %s" % self.fake.as_json())

        resolvers = self.fake.resolvers
        contexts = self.fake.tls_contexts

        self.logger.debug(f'mappings: {len(mappings)}')
        self.logger.debug(f'resolvers: {len(resolvers)}')
        self.logger.debug(f'contexts: {len(contexts)}')

        global_resolver = self.fake.ambassador_module.get('resolver', None)

        global_label_selector = os.environ.get('AMBASSADOR_LABEL_SELECTOR', '')
        self.logger.debug('label-selector: %s' % global_label_selector)

        # watch the AES Secret if the edge stack is running
        if self.fake.edge_stack_allowed:
            aes_secret_name = os.getenv(ENV_AES_SECRET_NAME, DEFAULT_AES_SECRET_NAME)
            aes_secret_namespace = os.getenv(ENV_AES_SECRET_NAMESPACE, Config.ambassador_namespace)
            self.logger.debug(f'edge stack detected: need secret {aes_secret_name}.{aes_secret_namespace}')
            self.add_kube_watch(f'Secret {aes_secret_name}', 'secret', namespace=aes_secret_namespace,
                                field_selector=f"metadata.name={aes_secret_name}")

        # Walk hosts.
        for host in self.fake.get_hosts():
            sel = host.get('selector') or {}
            match_labels = sel.get('matchLabels') or {}

            label_selectors: List[str] = []

            if global_label_selector:
                label_selectors.append(global_label_selector)

            if match_labels:
                label_selectors += [ f"{l}={v}" for l, v in match_labels.items() ]

            label_selector = ','.join(label_selectors) if label_selectors else None

            for wanted_kind in ['service', 'secret']:
                self.add_kube_watch(f"Host {host.name}", wanted_kind, host.namespace,
                                    label_selector=label_selector)

        for mname, mapping in mappings.items():
            res_name = mapping.get('resolver', None)
            res_source = 'mapping'

            if not res_name:
                res_name = global_resolver
                res_source = 'defaults'

            ctx_name = mapping.get('tls', None)

            self.logger.debug(
                f'Mapping {mname}: resolver {res_name} from {res_source}, service {mapping.service}, tls {ctx_name}')

            if res_name:
                resolver = resolvers.get(res_name, None)
                self.logger.debug(f'-> resolver {resolver}')

                if resolver:
                    svc = Service(logger, mapping.service, ctx_name)

                    if resolver.kind == 'ConsulResolver':
                        self.logger.debug(f'Mapping {mname} uses Consul resolver {res_name}')

                        # At the moment, we stuff the resolver's datacenter into the association
                        # ID for this watch. The ResourceFetcher relies on that.

                        self.consul_watches.append(
                            {
                                "id": resolver.datacenter,
                                "consul-address": resolver.address,
                                "datacenter": resolver.datacenter,
                                "service-name": svc.hostname
                            }
                        )
                    elif resolver.kind == 'KubernetesEndpointResolver':
                        host = svc.hostname
                        namespace = Config.ambassador_namespace

                        if not host:
                            # This is really kind of impossible.
                            self.logger.error(f"KubernetesEndpointResolver {res_name} has no 'hostname'")
                            continue

                        if "." in host:
                            (host, namespace) = host.split(".", 2)[0:2]

                        self.logger.debug(f'...kube endpoints: svc {svc.hostname} -> host {host} namespace {namespace}')

                        self.add_kube_watch(f"endpoint", "endpoints", namespace,
                                            label_selector=global_label_selector,
                                            field_selector=f"metadata.name={host}")

        for secret_key, secret_info in self.fake.secret_recorder.needed.items():
            self.logger.debug(f'need secret {secret_info.name}.{secret_info.namespace}')

            self.add_kube_watch(f"needed secret", "secret", secret_info.namespace,
                                label_selector=global_label_selector,
                                field_selector=f"metadata.name={secret_info.name}")

        if self.fake.edge_stack_allowed:
            # If the edge stack is allowed, make sure we watch for our fallback context.
            self.add_kube_watch("Fallback TLSContext", "TLSContext", namespace=Config.ambassador_namespace)

        ambassador_basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')

        if os.path.exists(os.path.join(ambassador_basedir, '.ambassadorinstallations_ok')):
            self.add_kube_watch("AmbassadorInstallations", "ambassadorinstallations.getambassador.io", Config.ambassador_namespace)

        ambassador_knative_requested = (os.environ.get("AMBASSADOR_KNATIVE_SUPPORT", "-unset-").lower() == 'true')

        if ambassador_knative_requested:
            self.logger.debug('Looking for Knative support...')

            if os.path.exists(os.path.join(ambassador_basedir, '.knative_clusteringress_ok')):
                # Watch for clusteringresses.networking.internal.knative.dev in any namespace and with any labels.

                self.logger.debug('watching for clusteringresses.networking.internal.knative.dev')
                self.add_kube_watch("Knative clusteringresses", "clusteringresses.networking.internal.knative.dev",
                                    None)

            if os.path.exists(os.path.join(ambassador_basedir, '.knative_ingress_ok')):
                # Watch for ingresses.networking.internal.knative.dev in any namespace and
                # with any labels.

                self.add_kube_watch("Knative ingresses", "ingresses.networking.internal.knative.dev", None)

        self.watchset = {
            "kubernetes-watches": self.kube_watches,
            "consul-watches": self.consul_watches
        }

        save_dir = os.environ.get('AMBASSADOR_WATCH_DIR', '/tmp')

        if save_dir:
            watchset = dump_json(self.watchset)
            with open(os.path.join(save_dir, 'watch.json'), "w") as output:
                output.write(watchset)
Пример #5
0
    args = sys.argv[1:]

    if args:
        if args[0] == '--debug':
            loglevel = logging.DEBUG
            args.pop(0)
        elif args[0].startswith('--'):
            raise Exception(f'Usage: {os.path.basename(sys.argv[0])} [--debug] [path]')

    logging.basicConfig(
        level=loglevel,
        format="%(asctime)s watch-hook %(levelname)s: %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S"
    )

    alogger = logging.getLogger('ambassador')
    alogger.setLevel(logging.INFO)

    logger = logging.getLogger('watch_hook')
    logger.setLevel(loglevel)

    yaml_stream = sys.stdin

    if args:
        yaml_stream = open(args[0], "r")

    wh = WatchHook(logger, yaml_stream)

    watchset = dump_json(wh.watchset)
    sys.stdout.write(watchset)
Пример #6
0
def helper_snapshot(path: str) -> str:
    snapshot = json.loads(open(path, "r").read())

    return dump_json(sanitize_snapshot(snapshot))
Пример #7
0
def helper_problems(path: str) -> str:
    bad_dict = json.loads(open(path, "r").read())

    bad_dict["snapshot"] = sanitize_snapshot(bad_dict["snapshot"])

    return dump_json(bad_dict)