コード例 #1
0
    def test_mapping_v1(self):
        aconf = Config()
        mgr = ResourceManager(logger, aconf)

        assert AmbassadorProcessor(mgr).try_process(valid_mapping_v1)
        assert len(mgr.elements) == 1

        aconf.load_all(mgr.elements)
        assert len(aconf.errors) == 0

        mappings = aconf.get_config('mappings')
        assert len(mappings) == 1

        mapping = next(iter(mappings.values()))
        assert mapping.apiVersion == valid_mapping_v1.gvk.api_version
        assert mapping.name == valid_mapping_v1.name
        assert mapping.namespace == valid_mapping_v1.namespace
        assert mapping.prefix == valid_mapping_v1.spec['prefix']
        assert mapping.service == valid_mapping_v1.spec['service']
コード例 #2
0
ファイル: watch_hook.py プロジェクト: jfrabaute/ambassador
class WatchHook:
    def __init__(self, logger, yaml_stream) -> None:
        # Watch management

        self.logger = logger

        self.consul_watches: List[Dict[str, str]] = []
        self.kube_watches: List[Dict[str, str]] = []

        self.load_yaml(yaml_stream)

    def add_kube_watch(self, what: str, kind: str, namespace: Optional[str],
                       field_selector: Optional[str]=None, label_selector: Optional[str]=None) -> None:
        watch = { "kind": kind }

        if namespace:
            watch["namespace"] = namespace

        if field_selector:
            watch["field-selector"] = field_selector

        if label_selector:
            watch["label-selector"] = label_selector

        self.logger.debug(f"{what}: add watch {watch}")
        self.kube_watches.append(watch)

    def load_yaml(self, yaml_stream):
        self.aconf = Config()

        fetcher = ResourceFetcher(self.logger, self.aconf, watch_only=True)
        fetcher.parse_watt(yaml_stream.read())

        self.aconf.load_all(fetcher.sorted())

        # We can lift mappings straight from the aconf...
        mappings = self.aconf.get_config('mappings') or {}

        # ...but we need the fake IR to deal with resolvers and TLS contexts.
        self.fake = FakeIR(self.aconf, logger=self.logger)

        self.logger.debug("IR: %s" % self.fake.as_json())

        resolvers = self.fake.resolvers
        contexts = self.fake.tls_contexts

        self.logger.debug(f'mappings: {len(mappings)}')
        self.logger.debug(f'resolvers: {len(resolvers)}')
        self.logger.debug(f'contexts: {len(contexts)}')

        global_resolver = self.fake.ambassador_module.get('resolver', None)

        global_label_selector = os.environ.get('AMBASSADOR_LABEL_SELECTOR', '')
        self.logger.debug('label-selector: %s' % global_label_selector)

        # watch the AES Secret if the edge stack is running
        if self.fake.edge_stack_allowed:
            aes_secret_name = os.getenv(ENV_AES_SECRET_NAME, DEFAULT_AES_SECRET_NAME)
            aes_secret_namespace = os.getenv(ENV_AES_SECRET_NAMESPACE, Config.ambassador_namespace)
            self.logger.debug(f'edge stack detected: need secret {aes_secret_name}.{aes_secret_namespace}')
            self.add_kube_watch(f'Secret {aes_secret_name}', 'secret', namespace=aes_secret_namespace,
                                field_selector=f"metadata.name={aes_secret_name}")

        # Walk hosts.
        for host in self.fake.get_hosts():
            sel = host.get('selector') or {}
            match_labels = sel.get('matchLabels') or {}

            label_selectors: List[str] = []

            if global_label_selector:
                label_selectors.append(global_label_selector)

            if match_labels:
                label_selectors += [ f"{l}={v}" for l, v in match_labels.items() ]

            label_selector = ','.join(label_selectors) if label_selectors else None

            for wanted_kind in ['service', 'secret']:
                self.add_kube_watch(f"Host {host.name}", wanted_kind, host.namespace,
                                    label_selector=label_selector)

        for mname, mapping in mappings.items():
            res_name = mapping.get('resolver', None)
            res_source = 'mapping'

            if not res_name:
                res_name = global_resolver
                res_source = 'defaults'

            ctx_name = mapping.get('tls', None)

            self.logger.debug(
                f'Mapping {mname}: resolver {res_name} from {res_source}, service {mapping.service}, tls {ctx_name}')

            if res_name:
                resolver = resolvers.get(res_name, None)
                self.logger.debug(f'-> resolver {resolver}')

                if resolver:
                    svc = Service(logger, mapping.service, ctx_name)

                    if resolver.kind == 'ConsulResolver':
                        self.logger.debug(f'Mapping {mname} uses Consul resolver {res_name}')

                        # At the moment, we stuff the resolver's datacenter into the association
                        # ID for this watch. The ResourceFetcher relies on that.

                        self.consul_watches.append(
                            {
                                "id": resolver.datacenter,
                                "consul-address": resolver.address,
                                "datacenter": resolver.datacenter,
                                "service-name": svc.hostname
                            }
                        )
                    elif resolver.kind == 'KubernetesEndpointResolver':
                        host = svc.hostname
                        namespace = Config.ambassador_namespace

                        if not host:
                            # This is really kind of impossible.
                            self.logger.error(f"KubernetesEndpointResolver {res_name} has no 'hostname'")
                            continue

                        if "." in host:
                            (host, namespace) = host.split(".", 2)[0:2]

                        self.logger.debug(f'...kube endpoints: svc {svc.hostname} -> host {host} namespace {namespace}')

                        self.add_kube_watch(f"endpoint", "endpoints", namespace,
                                            label_selector=global_label_selector,
                                            field_selector=f"metadata.name={host}")

        for secret_key, secret_info in self.fake.secret_recorder.needed.items():
            self.logger.debug(f'need secret {secret_info.name}.{secret_info.namespace}')

            self.add_kube_watch(f"needed secret", "secret", secret_info.namespace,
                                label_selector=global_label_selector,
                                field_selector=f"metadata.name={secret_info.name}")

        if self.fake.edge_stack_allowed:
            # If the edge stack is allowed, make sure we watch for our fallback context.
            self.add_kube_watch("Fallback TLSContext", "TLSContext", namespace=Config.ambassador_namespace)

        ambassador_basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')

        if os.path.exists(os.path.join(ambassador_basedir, '.ambassadorinstallations_ok')):
            self.add_kube_watch("AmbassadorInstallations", "ambassadorinstallations.getambassador.io", Config.ambassador_namespace)

        ambassador_knative_requested = (os.environ.get("AMBASSADOR_KNATIVE_SUPPORT", "-unset-").lower() == 'true')

        if ambassador_knative_requested:
            self.logger.debug('Looking for Knative support...')

            if os.path.exists(os.path.join(ambassador_basedir, '.knative_clusteringress_ok')):
                # Watch for clusteringresses.networking.internal.knative.dev in any namespace and with any labels.

                self.logger.debug('watching for clusteringresses.networking.internal.knative.dev')
                self.add_kube_watch("Knative clusteringresses", "clusteringresses.networking.internal.knative.dev",
                                    None)

            if os.path.exists(os.path.join(ambassador_basedir, '.knative_ingress_ok')):
                # Watch for ingresses.networking.internal.knative.dev in any namespace and
                # with any labels.

                self.add_kube_watch("Knative ingresses", "ingresses.networking.internal.knative.dev", None)

        self.watchset = {
            "kubernetes-watches": self.kube_watches,
            "consul-watches": self.consul_watches
        }

        save_dir = os.environ.get('AMBASSADOR_WATCH_DIR', '/tmp')

        if save_dir:
            watchset = dump_json(self.watchset)
            with open(os.path.join(save_dir, 'watch.json'), "w") as output:
                output.write(watchset)
コード例 #3
0
#### Mainline.

yaml_stream = sys.stdin

if args:
    yaml_stream = open(args[0], "r")

aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_watt(yaml_stream.read())

aconf.load_all(fetcher.sorted())

# We can lift mappings straight from the aconf...
mappings = aconf.get_config('mappings') or {}

# ...but we need the fake IR to deal with resolvers and TLS contexts.
fake = FakeIR(logger, aconf)

logger.debug("FakeIR: %s" % fake.as_json())

resolvers = fake.resolvers
contexts = fake.tls_contexts

logger.debug(f'mappings: {len(mappings)}')
logger.debug(f'resolvers: {len(resolvers)}')
logger.debug(f'contexts: {len(contexts)}')

consul_watches = []
kube_watches = []
コード例 #4
0
        # If the port is unset, fix it up.
        if not self.port:
            self.port = 443 if originate_tls else 80

yaml_stream = sys.stdin

if len(sys.argv) > 1:
    yaml_stream = open(sys.argv[1], "r")

aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_watt(yaml_stream.read())

aconf.load_all(fetcher.sorted())

mappings = aconf.get_config('mappings') or {}
resolvers = aconf.get_config('resolvers') or {}
contexts = aconf.get_config('tls_contexts') or {}
secrets = aconf.get_config('secret') or {}  # 'secret', singular, is not a typo

consul_watches = []

for mname, mapping in mappings.items():
    res_name = mapping.get('resolver', None)
    ctx_name = mapping.get('tls', None)

    if res_name:
        resolver = resolvers.get(res_name, None)

        if resolver:
            if resolver.kind == 'ConsulResolver':