Ejemplo n.º 1
0
    def resolve_resolver(self, cluster: IRCluster,
                         resolver_name: Optional[str]) -> IRServiceResolver:
        # Which resolver should we use?
        if not resolver_name:
            resolver_name = self.ambassador_module.get('resolver',
                                                       'kubernetes-service')

        # Casting to str is OK because the Ambassador module's resolver must be a string,
        # so all the paths for resolver_name land with it being a string.
        resolver = self.get_resolver(typecast(str, resolver_name))
        assert resolver is not None
        return resolver
Ejemplo n.º 2
0
    def good_ambassador_id(self, resource: dict):
        # Is an ambassador_id present in this object?
        allowed_ids: StringOrList = resource.get('ambassador_id', 'default')

        if allowed_ids:
            # Make sure it's a list. Yes, this is Draconian,
            # but the jsonschema will allow only a string or a list,
            # and guess what? Strings are Iterables.
            if type(allowed_ids) != list:
                allowed_ids = typecast(StringOrList, [allowed_ids])

            return Config.ambassador_id in allowed_ids
Ejemplo n.º 3
0
    def add_mappings(self, ir: 'IR', aconf: Config):
        cluster_hosts = self.get('hosts',
                                 {'127.0.0.1:5000': (100, None, '-internal-')})

        self.cluster = None
        cluster_good = False

        for service, params in cluster_hosts.items():
            weight, grpc, ctx_name, location = params

            self.logger.debug(
                "IRAuth: svc %s, weight %s, grpc %s, ctx_name %s, location %s"
                % (service, weight, grpc, ctx_name, location))

            cluster = IRCluster(ir=ir,
                                aconf=aconf,
                                location=location,
                                service=service,
                                host_rewrite=self.get('host_rewrite', False),
                                ctx_name=ctx_name,
                                grpc=grpc,
                                marker='extauth')

            cluster.referenced_by(self)

            cluster_good = True

            if self.cluster:
                if not self.cluster.merge(cluster):
                    self.post_error(
                        RichStatus.fromError(
                            "auth canary %s can only change service!" %
                            cluster.name))
                    cluster_good = False
            else:
                self.cluster = cluster

        if cluster_good:
            ir.add_cluster(typecast(IRCluster, self.cluster))
            self.referenced_by(typecast(IRCluster, self.cluster))
Ejemplo n.º 4
0
    def __init__(self, config: 'V2Config') -> None:
        # We should never be instantiated unless there is, in fact, defined ratelimit stuff.
        assert config.ir.ratelimit

        super().__init__()

        ratelimit = typecast(IRRateLimit, config.ir.ratelimit)

        # self['use_data_plane_proto'] = ratelimit.data_plane_proto
        self['grpc_service'] = {
            'envoy_grpc': {
                'cluster_name': ratelimit.cluster.name
            }
        }
Ejemplo n.º 5
0
    def validate_object(self, resource: ACResource) -> RichStatus:
        # This is basically "impossible"
        if not (("apiVersion" in resource) and ("kind" in resource) and
                ("name" in resource)):
            return RichStatus.fromError("must have apiVersion, kind, and name")

        apiVersion = resource.apiVersion

        # Ditch the leading ambassador/ that really needs to be there.
        if apiVersion.startswith("ambassador/"):
            apiVersion = apiVersion.split('/')[1]
        else:
            return RichStatus.fromError("apiVersion %s unsupported" %
                                        apiVersion)

        # Do we already have this schema loaded?
        schema_key = "%s-%s" % (apiVersion, resource.kind)
        schema = self.schemas.get(schema_key, None)

        if not schema:
            # Not loaded. Go find it on disk.
            schema_path = os.path.join(self.schema_dir_path, apiVersion,
                                       "%s.schema" % resource.kind)

            try:
                # Load it up...
                schema = json.load(open(schema_path, "r"))

                # ...and then cache it, if it exists. Note that we'll never
                # get here if we find something that doesn't parse.
                if schema:
                    self.schemas[schema_key] = typecast(Dict[Any, Any], schema)
            except OSError:
                self.logger.debug("no schema at %s, not validating" %
                                  schema_path)
            except json.decoder.JSONDecodeError as e:
                self.logger.warning("corrupt schema at %s, skipping (%s)" %
                                    (schema_path, e))

        if schema:
            # We have a schema. Does the object validate OK?
            try:
                jsonschema.validate(resource.as_dict(), schema)
            except jsonschema.exceptions.ValidationError as e:
                # Nope. Bzzzzt.
                return RichStatus.fromError("not a valid %s: %s" %
                                            (resource.kind, e))

        # All good. Return an OK.
        return RichStatus.OK(msg="valid %s" % resource.kind)
Ejemplo n.º 6
0
    def __init__(self, config: 'V1Config') -> None:
        # We should never be instantiated unless there is, in fact, defined tracing stuff.
        assert config.ir.tracing

        super().__init__()

        tracing = typecast(IRTracing, config.ir.tracing)

        self['http'] = {
            "driver": {
                "type": tracing['driver'],
                "config": tracing['driver_config']
            }
        }
Ejemplo n.º 7
0
    def __init__(self, config: 'V2Config') -> None:
        # We should never be instantiated unless there is, in fact, defined ratelimit stuff.
        assert config.ir.ratelimit

        super().__init__()

        ratelimit = typecast(IRRateLimit, config.ir.ratelimit)

        assert (ratelimit.cluster.envoy_name)

        self['use_alpha'] = True
        self['grpc_service'] = {
            'envoy_grpc': {
                'cluster_name': ratelimit.cluster.envoy_name
            }
        }
Ejemplo n.º 8
0
    def __init__(self, config: 'V3Config') -> None:
        # We should never be instantiated unless there is, in fact, defined tracing stuff.
        assert config.ir.tracing

        super().__init__()

        tracing = typecast(IRTracing, config.ir.tracing)

        name = tracing['driver']

        if not name.startswith('envoy.'):
            name = 'envoy.%s' % (name.lower())

        driver_config = tracing['driver_config']

        # We check for the full 'envoy.tracers.datadog' below because that's how it's set in the
        # IR code. The other tracers are configured by their short name and then 'envoy.' is
        # appended above.
        if name.lower() == 'envoy.zipkin':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v3.ZipkinConfig'
            # The collector_endpoint is mandatory now.
            if not driver_config.get('collector_endpoint'):
                driver_config['collector_endpoint'] = '/api/v2/spans'

            # This is also now required if you don't want your listener rejected
            # https://www.envoyproxy.io/docs/envoy/latest/version_history/v1.12.0.html?highlight=http_json_v1
            # https://github.com/envoyproxy/envoy/blob/ae1ed1fa74f096dabe8dd5b19fc70333621b0309/api/envoy/config/trace/v3/zipkin.proto#L27
            if 'collector_endpoint_version' not in driver_config:
                driver_config['collector_endpoint_version'] = 'HTTP_JSON'
            # Make 128-bit traceid the default
            if not 'trace_id_128bit' in driver_config:
                driver_config['trace_id_128bit'] = True
        elif name.lower() == 'envoy.tracers.datadog':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v3.DatadogConfig'
            if not driver_config.get('service_name'):
                driver_config['service_name'] = 'ambassador'
        elif name.lower() == 'envoy.lightstep':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v3.LightstepConfig'
        else:
            # This should be impossible, because we ought to have validated the input driver
            # in ambassador/pkg/api/getambassador.io/v2/tracingservice_types.go:47
            raise Exception("Unsupported tracing driver \"%s\"" % name.lower())

        self['http'] = {"name": name, "typed_config": driver_config}
Ejemplo n.º 9
0
def v2filter_authv1(auth: IRAuth):
    assert auth.cluster
    cluster = typecast(IRCluster, auth.cluster)

    if auth.api_version != "ambassador/v1":
        auth.ir.logger.warning("IRAuth_v1 working on %s, mismatched at %s" % (auth.name, auth.api_version))

    assert auth.proto

    if auth.proto == "http":
        allowed_authorization_headers = list(set(auth.allowed_authorization_headers).union(AllowedAuthorizationHeaders))
        allowed_request_headers = list(set(auth.allowed_request_headers).union(AllowedRequestHeaders))

        return {
            'name': 'envoy.ext_authz',
            'config': {
                'http_service': {
                    'server_uri': {
                        'uri': auth_cluster_uri(auth, cluster),
                        'cluster': cluster.name,
                        'timeout': "%0.3fs" % (float(auth.timeout_ms) / 1000.0)
                    },
                    'path_prefix': auth.path_prefix,
                    'allowed_authorization_headers': allowed_authorization_headers,
                    'allowed_request_headers': allowed_request_headers,
                },
                'send_request_data': auth.allow_request_body
            }
        }

    if auth.proto == "grpc":
        return {
            'name': 'envoy.ext_authz',
            'config': {
                'grpc_service': {
                    'envoy_grpc': {
                        'cluster_name': cluster.name
                    },
                    'timeout': "%0.3fs" % (float(auth.timeout_ms) / 1000.0)
                },
                'send_request_data': auth.allow_request_body
            }
        }

    # If here, something's gone horribly wrong.
    auth.post_error("Protocol '%s' is not supported, auth not enabled" % auth.proto)
    return None
Ejemplo n.º 10
0
    def __init__(self, config: 'V2Config') -> None:
        # We should never be instantiated unless there is, in fact, defined tracing stuff.
        assert config.ir.tracing

        super().__init__()

        tracing = typecast(IRTracing, config.ir.tracing)

        name = tracing['driver']

        if not name.startswith('envoy.'):
            name = 'envoy.%s' % (name.lower())

        self['http'] = {
            "name": name,
            "config": tracing['driver_config'],
        }
Ejemplo n.º 11
0
    def __init__(self, config: 'V3Config') -> None:
        # We should never be instantiated unless there is, in fact, defined ratelimit stuff.
        assert config.ir.ratelimit

        super().__init__()

        ratelimit = typecast(IRRateLimit, config.ir.ratelimit)

        assert (ratelimit.cluster.envoy_name)

        protocol_version = ratelimit.protocol_version
        self['transport_api_version'] = protocol_version.replace("alpha",
                                                                 "").upper()
        self['grpc_service'] = {
            'envoy_grpc': {
                'cluster_name': ratelimit.cluster.envoy_name
            }
        }
Ejemplo n.º 12
0
    def load_all(self, resources: Iterable[ACResource]) -> None:
        """
        Loads all of a set of ACResources. It is the caller's responsibility to arrange for
        the set of ACResources to be sorted in some way that makes sense.
        """

        rcount = 0

        for resource in resources:
            rcount += 1

            # Is an ambassador_id present in this object?
            allowed_ids: StringOrList = resource.get('ambassador_id', 'default')

            if allowed_ids:
                # Make sure it's a list. Yes, this is Draconian,
                # but the jsonschema will allow only a string or a list,
                # and guess what? Strings are Iterables.
                if type(allowed_ids) != list:
                    allowed_ids = typecast(StringOrList, [ allowed_ids ])

                if Config.ambassador_id not in allowed_ids:
                    # self.logger.debug("LOAD_ALL: skip %s; id %s not in %s" %
                    #                   (resource, Config.ambassador_id, allowed_ids))
                    continue

            self.logger.debug("LOAD_ALL: %s @ %s" % (resource, resource.location))

            rc = self.process(resource)

            if not rc:
                # Object error. Not good but we'll allow the system to start.
                self.post_error(rc, resource=resource)

        self.logger.debug("LOAD_ALL: processed %d resource%s" % (rcount, "" if (rcount == 1) else "s"))

        if self.fatal_errors:
            # Kaboom.
            raise Exception("ERROR ERROR ERROR Unparseable configuration; exiting")

        if self.errors:
            self.logger.error("ERROR ERROR ERROR Starting with configuration errors")
Ejemplo n.º 13
0
    def __init__(self,
                 rkey: str,
                 location: str,
                 *,
                 kind: str,
                 name: Optional[str] = None,
                 namespace: Optional[str] = None,
                 metadata_labels: Optional[str] = None,
                 apiVersion: Optional[str] = "getambassador.io/v0",
                 serialization: Optional[str] = None,
                 **kwargs) -> None:

        if not rkey:
            raise Exception("ACResource requires rkey")

        if not kind:
            raise Exception("ACResource requires kind")

        if (kind != "Pragma") and not name:
            raise Exception("ACResource: %s requires name (%s)" %
                            (kind, repr(kwargs)))

        if not apiVersion:
            raise Exception("ACResource requires apiVersion")

        # This next bit is a little odd -- we don't want a label_selector of None
        # to appear with a null value in the dict, so we move it to kwargs if present
        # and don't bother passing it as an explicit keyword argument.

        if metadata_labels:
            kwargs["metadata_labels"] = metadata_labels

        # print("ACResource __init__ (%s %s)" % (kind, name))

        super().__init__(rkey=rkey,
                         location=location,
                         kind=kind,
                         name=name,
                         namespace=namespace,
                         apiVersion=typecast(str, apiVersion),
                         serialization=serialization,
                         **kwargs)
Ejemplo n.º 14
0
    def generate(cls, config: 'V3Config') -> None:
        config.routes = []

        for irgroup in config.ir.ordered_groups():
            if not isinstance(irgroup, IRHTTPMappingGroup):
                # We only want HTTP mapping groups here.
                continue

            if irgroup.get('host_redirect') is not None and len(
                    irgroup.get('mappings', [])) == 0:
                # This is a host-redirect-only group, which is weird, but can happen. Do we
                # have a cached route for it?
                key = f"Route-{irgroup.group_id}-hostredirect"

                # Casting an empty dict to an IRBaseMapping may look weird, but in fact IRBaseMapping
                # is (ultimately) a subclass of dict, so it's the cleanest way to pass in a completely
                # empty IRBaseMapping to V3Route().
                #
                # (We could also have written V3Route to allow the mapping to be Optional, but that
                # makes a lot of its constructor much uglier.)
                route = config.save_element(
                    'route', irgroup,
                    cls.get_route(config, key, irgroup,
                                  typecast(IRBaseMapping, {})))
                config.routes.append(route)

            # Repeat for our real mappings.
            for mapping in irgroup.mappings:
                key = f"Route-{irgroup.group_id}-{mapping.cache_key}"

                route = cls.get_route(config, key, irgroup, mapping)

                if not route.get('_failed', False):
                    config.routes.append(
                        config.save_element('route', irgroup, route))

        # Once that's done, go build the variants on each route.
        config.route_variants = []

        for route in config.routes:
            # Set up a currently-empty set of variants for this route.
            config.route_variants.append(V3RouteVariants(route))
Ejemplo n.º 15
0
    def good_ambassador_id(self, resource: dict):
        resource_kind = resource.get('kind', '').lower()
        if not self.ambassador_id_required(resource_kind):
            self.logger.debug(
                f"Resource: {resource_kind} does not require an Ambassador ID")
            return True

        # Is an ambassador_id present in this object?
        #
        # NOTE WELL: when we update the status of a Host (or a Mapping?) then reserialization
        # can cause the `ambassador_id` element to turn into an `ambassadorId` element. So
        # treat those as synonymous.
        allowed_ids: StringOrList = resource.get('ambassadorId', None)

        if allowed_ids is None:
            allowed_ids = resource.get('ambassador_id', 'default')

        # If we find the array [ '_automatic_' ] then allow it, so that hardcoded resources
        # can have a useful effect. This is mostly for init-config, but could be used for
        # other things, too.

        if allowed_ids == ["_automatic_"]:
            self.logger.debug(f"ambassador_id {allowed_ids} always accepted")
            return True

        if allowed_ids:
            # Make sure it's a list. Yes, this is Draconian,
            # but the jsonschema will allow only a string or a list,
            # and guess what? Strings are Iterables.
            if type(allowed_ids) != list:
                allowed_ids = typecast(StringOrList, [allowed_ids])

            if Config.ambassador_id in allowed_ids:
                return True
            else:
                rkey = resource.get('rkey', '-anonymous-yaml-')
                name = resource.get('name', '-no-name-')

                self.logger.debug(
                    f"{rkey}: {resource_kind} {name} has IDs {allowed_ids}, no match with {Config.ambassador_id}"
                )
                return False
Ejemplo n.º 16
0
    def get_validator(self, apiVersion: str, kind: str) -> Validator:
        schema_key = "%s-%s" % (apiVersion, kind)

        validator = self.validators.get(schema_key, None)

        if not validator:
            validator = self.get_proto_validator(apiVersion, kind)

        if not validator:
            validator = self.get_jsonschema_validator(apiVersion, kind)

        if not validator:
            # Ew. Early binding for Python lambdas is kinda weird.
            validator = typecast(Validator,
                                 lambda resource, args=(apiVersion, kind): self.cannot_validate(*args))

        if validator:
            self.validators[schema_key] = validator

        return validator
Ejemplo n.º 17
0
    def __init__(self, config: 'V2Config') -> None:
        # We should never be instantiated unless there is, in fact, defined tracing stuff.
        assert config.ir.tracing

        super().__init__()

        tracing = typecast(IRTracing, config.ir.tracing)

        name = tracing['driver']

        if not name.startswith('envoy.'):
            name = 'envoy.%s' % (name.lower())

        driver_config = tracing['driver_config']

        # We check for the full 'envoy.tracers.datadog' below because that's how it's set in the
        # IR code. The other tracers are configured by their short name and then 'envoy.' is
        # appended above.
        if name.lower() == 'envoy.zipkin':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v2.ZipkinConfig'
            # The collector_endpoint is mandatory now.
            if not driver_config.get('collector_endpoint'):
                driver_config['collector_endpoint'] = '/api/v1/spans'
            # Make 128-bit traceid the default
            if not 'trace_id_128bit' in driver_config:
                driver_config['trace_id_128bit'] = True
        elif name.lower() == 'envoy.tracers.datadog':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v2.DatadogConfig'
            if not driver_config.get('service_name'):
                driver_config['service_name'] = 'ambassador'
        elif name.lower() == 'envoy.lightstep':
            driver_config[
                '@type'] = 'type.googleapis.com/envoy.config.trace.v2.LightstepConfig'
        else:
            # This should be impossible, because we ought to have validated the input driver
            # in ambassador/pkg/api/getambassador.io/v2/tracingservice_types.go:47
            raise Exception("Unsupported tracing driver \"%s\"" % name.lower())

        self['http'] = {"name": name, "typed_config": driver_config}
Ejemplo n.º 18
0
    def good_ambassador_id(self, resource: dict):
        resource_kind = resource.get('kind')
        if resource_kind in self.KnativeResources:
            self.logger.debug(f"Knative resource: {resource_kind} does not require an Ambassador ID")
            return True

        # Is an ambassador_id present in this object?
        allowed_ids: StringOrList = resource.get('ambassador_id', 'default')

        if allowed_ids:
            # Make sure it's a list. Yes, this is Draconian,
            # but the jsonschema will allow only a string or a list,
            # and guess what? Strings are Iterables.
            if type(allowed_ids) != list:
                allowed_ids = typecast(StringOrList, [ allowed_ids ])

            if Config.ambassador_id in allowed_ids:
                return True
            else:
                self.logger.debug(f"Ambassador ID {Config.ambassador_id} does not exist in allowed IDs {allowed_ids}")
                self.logger.debug(resource)
                return False
Ejemplo n.º 19
0
def v2filter_authv0(auth: IRAuth):
    assert auth.cluster
    cluster = typecast(IRCluster, auth.cluster)

    assert auth.api_version == "ambassador/v0"

    # This preserves almost exactly the same logic prior to ambassador/v1 implementation.
    request_headers = dict(ExtAuthRequestHeaders)

    for hdr in auth.allowed_headers:
        request_headers[hdr] = True

    # Always allow the default set, above. This may be a slight behavior change from the
    # v0 config, but it seems to aid usability.

    hdrs = set(auth.allowed_headers or [])  # turn list into a set
    hdrs.update(AllowedAuthorizationHeaders)  # merge in a frozenset

    allowed_authorization_headers = sorted(
        hdrs)  # sorted() turns the set back into a list

    allowed_request_headers = sorted(request_headers.keys())

    return {
        'name': 'envoy.ext_authz',
        'config': {
            'http_service': {
                'server_uri': {
                    'uri': auth_cluster_uri(auth, cluster),
                    'cluster': cluster.name,
                    'timeout': "%0.3fs" % (float(auth.timeout_ms) / 1000.0)
                },
                'path_prefix': auth.path_prefix,
                'allowed_authorization_headers': allowed_authorization_headers,
                'allowed_request_headers': allowed_request_headers,
            },
            'send_request_data': auth.allow_request_body
        }
    }
Ejemplo n.º 20
0
    def resolve_targets(self, cluster: IRCluster, resolver_name: Optional[str],
                        hostname: str, namespace: str, port: int) -> Optional[SvcEndpointSet]:
        # Is the host already an IP address?
        is_ip_address = False

        try:
            x = ip_address(hostname)
            is_ip_address = True
        except ValueError:
            pass

        if is_ip_address:
            # Already an IP address, great.
            self.logger.debug(f'cluster {cluster.name}: {hostname} is already an IP address')

            return [
                {
                    'ip': hostname,
                    'port': port,
                    'target_kind': 'IPaddr'
                }
            ]

        # Which resolver should we use?
        if not resolver_name:
            resolver_name = self.ambassador_module.get('resolver', 'kubernetes-service')

        # Casting to str is OK because the Ambassador module's resolver must be a string,
        # so all the paths for resolver_name land with it being a string.
        resolver = self.get_resolver(typecast(str, resolver_name))

        # It should not be possible for resolver to be unset here.
        if not resolver:
            self.post_error(f"cluster {cluster.name} has invalid resolver {resolver_name}?", rkey=cluster.rkey)
            return None

        # OK, ask the resolver for the target list. Understanding the mechanics of resolution
        # and the load balancer policy and all that is up to the resolver.
        return resolver.resolve(self, cluster, hostname, namespace, port)
Ejemplo n.º 21
0
    def get_proto_validator(self, apiVersion, kind) -> Optional[Validator]:
        # See if we can import a protoclass...
        proto_modname = f"ambassador.proto.{apiVersion}.{kind}_pb2"
        proto_classname = f"{kind}Spec"
        m = None

        try:
            m = importlib.import_module(proto_modname)
        except ModuleNotFoundError:
            self.logger.debug(f"no proto in {proto_modname}")
            return None

        protoclass = getattr(m, proto_classname, None)

        if not protoclass:
            self.logger.debug(f"no class {proto_classname} in {proto_modname}")
            return None

        self.logger.debug(f"using validate_with_proto for getambassador.io/{apiVersion} {kind}")

        # Ew. Early binding for Python lambdas is kinda weird.
        return typecast(Validator,
                        lambda resource, protoclass=protoclass: self.validate_with_proto(resource, protoclass))
Ejemplo n.º 22
0
    def merge(self, other: 'IRCluster') -> bool:
        # Is this mergeable?

        mismatches = []

        for key in [
                'type', 'lb_type', 'host_rewrite', 'tls_context',
                'originate_tls', 'grpc', 'connect_timeout_ms',
                'cluster_idle_timeout_ms', 'cluster_max_connection_lifetime_ms'
        ]:
            if self.get(key, None) != other.get(key, None):
                mismatches.append(key)

        if mismatches:
            self.post_error(
                RichStatus.fromError(
                    "cannot merge cluster %s: mismatched attributes %s" %
                    (other.name, ", ".join(mismatches))))
            return False

        # All good.
        if other.urls:
            self.referenced_by(other)

            for url in other.urls:
                self.add_url(url)

        if other.targets:
            self.referenced_by(other)
            if self.targets == None:
                self.targets = other.targets
            else:
                self.targets = typecast(List[Dict[str, Union[int, str]]],
                                        self.targets) + other.targets

        return True
Ejemplo n.º 23
0
    def __init__(self, config: 'V2Config') -> None:
        # We should never be instantiated unless there is, in fact, defined tracing stuff.
        assert config.ir.tracing

        super().__init__()

        tracing = typecast(IRTracing, config.ir.tracing)

        name = tracing['driver']

        if not name.startswith('envoy.'):
            name = 'envoy.%s' % (name.lower())

        driver_config = tracing['driver_config']

        if name.lower() == 'envoy.zipkin':
            # The collector_endpoint is mandatory now.
            if not driver_config.get('collector_endpoint'):
                driver_config['collector_endpoint'] = '/api/v1/spans'

        self['http'] = {
            "name": name,
            "config": driver_config
        }
Ejemplo n.º 24
0
    def get_jsonschema_validator(self, apiVersion, kind) -> Optional[Validator]:
        # Do we have a JSONSchema on disk for this?
        schema_path = os.path.join(self.schema_dir_path, apiVersion, f"{kind}.schema")

        try:
            schema = json.load(open(schema_path, "r"))

            # Note that we'll never get here if the schema doesn't parse.
            if schema:
                self.logger.debug(f"using validate_with_jsonschema for getambassador.io/{apiVersion} {kind}")

                # Ew. Early binding for Python lambdas is kinda weird.
                return typecast(Validator,
                                lambda resource, schema=schema: self.validate_with_jsonschema(resource, schema))
        except OSError:
            self.logger.debug(f"no schema at {schema_path}, not validating")
            return None
        except json.decoder.JSONDecodeError as e:
            self.logger.warning(f"corrupt schema at {schema_path}, skipping ({e})")
            return None

        # This can't actually happen -- the only way to get here is to have an uncaught
        # exception. But it shuts up mypy so WTF.
        return None
Ejemplo n.º 25
0
 def ambassador_id(self) -> str:
     if self._ambassador_id is None:
         return self.name.k8s
     else:
         return typecast(str, self._ambassador_id)
Ejemplo n.º 26
0
    def __init__(self, clusters: Optional[List[dict]] = None) -> None:
        self.clusters = {}

        if clusters:
            for cluster in typecast(List[dict], clusters):
                self[cluster['name']] = DiagCluster(cluster)
Ejemplo n.º 27
0
    def __init__(self, aconf: Config, secret_reader=None, file_checker=None) -> None:
        self.ambassador_id = Config.ambassador_id
        self.ambassador_namespace = Config.ambassador_namespace
        self.ambassador_nodename = aconf.ambassador_nodename
        self.statsd = aconf.statsd

        self.logger = logging.getLogger("ambassador.ir")

        # We're using setattr since since mypy complains about assigning directly to a method.
        secret_root = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', "/ambassador")
        setattr(self, 'secret_reader', secret_reader or KubeSecretReader(secret_root))
        setattr(self, 'file_checker', file_checker if file_checker is not None else os.path.isfile)

        self.logger.debug("IR __init__:")
        self.logger.debug("IR: Version         %s built from %s on %s" % (Version, Build.git.commit, Build.git.branch))
        self.logger.debug("IR: AMBASSADOR_ID   %s" % self.ambassador_id)
        self.logger.debug("IR: Namespace       %s" % self.ambassador_namespace)
        self.logger.debug("IR: Nodename        %s" % self.ambassador_nodename)

        self.logger.debug("IR: file checker:   %s" % getattr(self, 'file_checker').__name__)
        self.logger.debug("IR: secret reader:  %s" % getattr(self, 'secret_reader').__name__)

        # First up: save the Config object. Its source map may be necessary later.
        self.aconf = aconf

        # Next, we'll want a way to keep track of resources we end up working
        # with. It starts out empty.
        self.saved_resources = {}

        # Next, define the initial IR state -- which is empty.
        #
        # Note that we use a map for clusters, not a list -- the reason is that
        # multiple mappings can use the same service, and we don't want multiple
        # clusters.
        self.clusters = {}
        self.grpc_services = {}
        self.filters = []
        self.tracing = None
        self.tls_contexts = {}
        self.ratelimit = None
        self.listeners = []
        self.groups = {}

        # Set up default TLS stuff.
        #
        # XXX This feels like a hack -- shouldn't it be class-wide initialization
        # in TLSModule or TLSContext? So far it's the only place we need anything like
        # this though.

        self.tls_module = None

        # OK! Start by wrangling TLS-context stuff, both from the TLS module (if any)...
        TLSModuleFactory.load_all(self, aconf)

        # ...and from any TLSContext resources.
        self.save_tls_contexts(aconf)

        # Next, handle the "Ambassador" module. This is last so that the Ambassador module has all
        # the TLS contexts available to it.
        self.ambassador_module = typecast(IRAmbassador, self.save_resource(IRAmbassador(self, aconf)))

        # Save breaker & outlier configs.
        self.breakers = aconf.get_config("CircuitBreaker") or {}
        self.outliers = aconf.get_config("OutlierDetection") or {}

        # Save tracing and ratelimit settings.
        self.tracing = typecast(IRTracing, self.save_resource(IRTracing(self, aconf)))
        self.ratelimit = typecast(IRRateLimit, self.save_resource(IRRateLimit(self, aconf)))

        # After the Ambassador and TLS modules are done, we need to set up the
        # filter chains, which requires checking in on the auth, and
        # ratelimit configuration. Note that order of the filters matter.        
        self.save_filter(IRAuth(self, aconf))

        # ...note that ratelimit is a filter too...
        if self.ratelimit:
            self.save_filter(self.ratelimit, already_saved=True)

        # ...then deal with the non-configurable cors filter...
        self.save_filter(IRFilter(ir=self, aconf=aconf,
                                  rkey="ir.cors", kind="ir.cors", name="cors",
                                  config={}))

        # ...and the marginally-configurable router filter.
        router_config = {}

        if self.tracing:
            router_config['start_child_span'] = True

        self.save_filter(IRFilter(ir=self, aconf=aconf,
                                  rkey="ir.router", kind="ir.router", name="router", type="decoder",
                                  config=router_config))

        # We would handle other modules here -- but guess what? There aren't any.
        # At this point ambassador, tls, and the deprecated auth module are all there
        # are, and they're handled above. So. At this point go sort out all the Mappings
        ListenerFactory.load_all(self, aconf)
        MappingFactory.load_all(self, aconf)

        self.walk_saved_resources(aconf, 'add_mappings')

        TLSModuleFactory.finalize(self, aconf)
        ListenerFactory.finalize(self, aconf)
        MappingFactory.finalize(self, aconf)

        # At this point we should know the full set of clusters, so we can normalize
        # any long cluster names.
        collisions: Dict[str, List[str]] = {}

        for name in sorted(self.clusters.keys()):
            if len(name) > 60:
                # Too long.
                short_name = name[0:40]

                collision_list = collisions.setdefault(short_name, [])
                collision_list.append(name)

        for short_name in sorted(collisions.keys()):
            name_list = collisions[short_name]

            i = 0

            for name in sorted(name_list):
                mangled_name = "%s-%d" % (short_name, i)
                i += 1

                self.logger.info("%s => %s" % (name, mangled_name))
                self.clusters[name]['name'] = mangled_name

        # After we have the cluster names fixed up, go finalize filters.
        if self.tracing:
            self.tracing.finalize()

        if self.ratelimit:
            self.ratelimit.finalize()

        for filter in self.filters:
            filter.finalize()
Ejemplo n.º 28
0
    def save_filter(self, resource: IRFilter, already_saved=False) -> None:
        if resource.is_active():
            if not already_saved:
                resource = typecast(IRFilter, self.save_resource(resource))

            self.filters.append(resource)
Ejemplo n.º 29
0
def dump(config_dir_path: Parameter.REQUIRED,
         *,
         secret_dir_path=None,
         watt=False,
         debug=False,
         debug_scout=False,
         k8s=False,
         recurse=False,
         aconf=False,
         ir=False,
         v2=False,
         diag=False,
         features=False):
    """
    Dump various forms of an Ambassador configuration for debugging

    Use --aconf, --ir, and --envoy to control what gets dumped. If none are requested, the IR
    will be dumped.

    :param config_dir_path: Configuration directory to scan for Ambassador YAML files
    :param secret_dir_path: Directory into which to save secrets
    :param watt: If set, input must be a WATT snapshot
    :param debug: If set, generate debugging output
    :param debug_scout: If set, generate debugging output
    :param k8s: If set, assume configuration files are annotated K8s manifests
    :param recurse: If set, recurse into directories below config_dir_path
    :param aconf: If set, dump the Ambassador config
    :param ir: If set, dump the IR
    :param v2: If set, dump the Envoy V2 config
    :param diag: If set, dump the Diagnostics overview
    :param features: If set, dump the feature set
    """

    if not secret_dir_path:
        secret_dir_path = config_dir_path

        if not os.path.isdir(secret_dir_path):
            secret_dir_path = os.path.dirname(secret_dir_path)

    if debug:
        logger.setLevel(logging.DEBUG)

    if debug_scout:
        logging.getLogger('ambassador.scout').setLevel(logging.DEBUG)

    if not (aconf or ir or v2 or diag or features):
        aconf = True
        ir = True
        v2 = True
        diag = False
        features = False

    dump_aconf = aconf
    dump_ir = ir
    dump_v2 = v2
    dump_diag = diag
    dump_features = features

    od = {}
    diagconfig: Optional[EnvoyConfig] = None

    try:
        aconf = Config()
        fetcher = ResourceFetcher(logger, aconf)

        if watt:
            fetcher.parse_watt(open(config_dir_path, "r").read())
        else:
            fetcher.load_from_filesystem(config_dir_path,
                                         k8s=k8s,
                                         recurse=True)

        aconf.load_all(fetcher.sorted())

        # aconf.post_error("Error from string, boo yah")
        # aconf.post_error(RichStatus.fromError("Error from RichStatus"))

        if dump_aconf:
            od['aconf'] = aconf.as_dict()

        secret_handler = CLISecretHandler(logger, config_dir_path,
                                          secret_dir_path, "0")

        ir = IR(aconf,
                file_checker=file_checker,
                secret_handler=secret_handler)

        if dump_ir:
            od['ir'] = ir.as_dict()

        if dump_v2:
            v2config = V2Config(ir)
            diagconfig = v2config
            od['v2'] = v2config.as_dict()

        if dump_diag:
            if not diagconfig:
                diagconfig = V2Config(ir)

            econf = typecast(EnvoyConfig, diagconfig)
            diag = Diagnostics(ir, econf)
            od['diag'] = diag.as_dict()
            od['elements'] = econf.elements

        if dump_features:
            od['features'] = ir.features()

        # scout = Scout()
        # scout_args = {}
        #
        # if ir and not os.environ.get("AMBASSADOR_DISABLE_FEATURES", None):
        #     scout_args["features"] = ir.features()
        #
        # result = scout.report(action="dump", mode="cli", **scout_args)
        # show_notices(result)

        json.dump(od, sys.stdout, sort_keys=True, indent=4)
        sys.stdout.write("\n")
    except Exception as e:
        handle_exception("EXCEPTION from dump",
                         e,
                         config_dir_path=config_dir_path)

        # This is fatal.
        sys.exit(1)
Ejemplo n.º 30
0
    def report(self, force_result: Optional[dict] = None, **kwargs) -> dict:
        _notices: List[ScoutNotice] = []

        env_result = os.environ.get("AMBASSADOR_SCOUT_RESULT", None)
        if env_result:
            force_result = json.loads(env_result)

        result: Optional[dict] = force_result
        result_was_cached: bool = False

        if not result:
            if 'runtime' not in kwargs:
                kwargs['runtime'] = self.runtime

            if 'commit' not in kwargs:
                kwargs['commit'] = Build.git.commit

            if 'branch' not in kwargs:
                kwargs['branch'] = Build.git.branch

            # How long since the last Scout update? If it's been more than an hour,
            # check Scout again.

            now = datetime.datetime.now()

            needs_update = True

            if self._last_update:
                since_last_update = now - typecast(datetime.datetime,
                                                   self._last_update)
                needs_update = (since_last_update > self._update_frequency)

            if needs_update:
                if self.scout:
                    result = self.scout.report(**kwargs)

                    self._last_update = now
                    self._last_result = dict(
                        **typecast(dict, result)) if result else None
                else:
                    result = {"scout": "unavailable: %s" % self._scout_error}
                    _notices.append({
                        "level":
                        "DEBUG",
                        "message":
                        "scout temporarily unavailable: %s" % self._scout_error
                    })

                # Whether we could talk to Scout or not, update the timestamp so we don't
                # try again too soon.
                result_timestamp = datetime.datetime.now()
            else:
                _notices.append({
                    "level": "DEBUG",
                    "message": "Returning cached result"
                })
                result = dict(**typecast(
                    dict, self._last_result)) if self._last_result else None
                result_was_cached = True

                # We can't get here unless self._last_update is set.
                result_timestamp = typecast(datetime.datetime,
                                            self._last_update)
        else:
            _notices.append({
                "level": "INFO",
                "message": "Returning forced Scout result"
            })
            result_timestamp = datetime.datetime.now()

        if not self.semver:
            _notices.append({
                "level":
                "WARNING",
                "message":
                "Ambassador has invalid version '%s'??!" % self.version
            })

        if result:
            result['cached'] = result_was_cached
        else:
            result = {'cached': False}

        result['timestamp'] = result_timestamp.timestamp()

        # Do version & notices stuff.
        if 'latest_version' in result:
            latest_version = result['latest_version']
            latest_semver = self.get_semver(latest_version)

            if latest_semver:
                self._latest_version = latest_version
                self._latest_semver = latest_semver
            else:
                _notices.append({
                    "level":
                    "WARNING",
                    "message":
                    "Scout returned invalid version '%s'??!" % latest_version
                })

        if (self._latest_semver and ((not self.semver) or
                                     (self._latest_semver > self.semver))):
            _notices.append({
                "level":
                "INFO",
                "message":
                "Upgrade available! to Ambassador version %s" %
                self._latest_semver
            })

        if 'notices' in result:
            rnotices = typecast(List[Union[str, ScoutNotice]],
                                result['notices'])

            for notice in rnotices:
                if isinstance(notice, str):
                    _notices.append({"level": "WARNING", "message": notice})
                elif isinstance(notice, dict):
                    lvl = notice.get('level', 'WARNING').upper()
                    msg = notice.get('message', None)

                    if msg:
                        _notices.append({"level": lvl, "message": msg})
                else:
                    _notices.append({
                        "level": "WARNING",
                        "message": json.dumps(notice)
                    })

        self._notices = _notices

        if self._notices:
            result['notices'] = self._notices
        else:
            result.pop('notices', None)

        return result