Example #1
0
    def load_config_watt(self, rqueue: queue.Queue, url: str):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration: watt, %s to %s" % (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger, url, stream2=open(ss_path, "w"))

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        # Weirdly, we don't need a special WattSecretHandler: parse_watt knows how to handle
        # the secrets that watt sends.
        scc = SecretHandler(app.logger, url, app.snapshot_path, snapshot)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)

        if serialization:
            fetcher.parse_watt(serialization)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" % snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
Example #2
0
    def load_config(self, url):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path,
                               "snapshot-%s.yaml" % snapshot)

        self.logger.info("copying configuration from %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if not serialization:
            self.logger.info("no data loaded from snapshot %s?" % snapshot)
            return

        scc = SecretSaver(app.logger, url, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.info("no configuration found in snapshot %s?" %
                             snapshot)
            return

        self._load_ir(aconf, fetcher, scc.url_reader, snapshot)
Example #3
0
    def load_config_kubewatch(self, rqueue: queue.Queue, url: str):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration: kubewatch, %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        elements: List[str] = []

        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if serialization:
            elements.append(serialization)
        else:
            self.logger.debug("no services loaded from snapshot %s" % snapshot)

        if Config.enable_endpoints:
            serialization = load_url_contents(self.logger,
                                              "%s/endpoints" % url,
                                              stream2=open(ss_path, "a"))

            if serialization:
                elements.append(serialization)
            else:
                self.logger.debug("no endpoints loaded from snapshot %s" %
                                  snapshot)

        serialization = "---\n".join(elements)

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        scc = KubewatchSecretHandler(app.logger, url, app.snapshot_path,
                                     snapshot)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" %
                              snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
Example #4
0
    def load_config_fs(self, path: str) -> None:
        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        self.logger.info("loading configuration from disk: %s" % path)

        scc = SecretSaver(app.logger, path, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=False, recurse=True)

        if not fetcher.elements:
            self.logger.info("no configuration found at %s" % path)
            return

        self._load_ir(aconf, fetcher, scc.null_reader, snapshot)
Example #5
0
    def load_config_fs(self, rqueue: queue.Queue, path: str) -> None:
        self.logger.info("loading configuration from disk: %s" % path)

        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        scc = FSSecretHandler(app.logger, path, app.snapshot_path, 0)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=app.k8s, recurse=True)

        if not fetcher.elements:
            self.logger.debug("no configuration resources found at %s" % path)
            # self._respond(rqueue, 204, 'ignoring empty configuration')
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
Example #6
0
    def _load_ir(self, aconf: Config, fetcher: ResourceFetcher,
                 secret_reader: Callable[['IRTLSContext', str, str],
                                         SavedSecret], snapshot: str) -> None:

        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path,
                                  "aconf-%s.json" % snapshot)
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_reader=secret_reader)

        ir_path = os.path.join(app.snapshot_path, "ir-%s.json" % snapshot)
        open(ir_path, "w").write(ir.as_json())

        check_scout(app, "update", ir)

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            return

        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        self.logger.info("configuration updated")

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)
Example #7
0
    def load_config(self, rqueue: queue.Queue, url):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration from %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if os.environ.get('AMBASSADOR_ENABLE_ENDPOINTS'):
            serialization += '---\n' + \
                             load_url_contents(self.logger, "%s/endpoints" % url, stream2=open(ss_path, "a"))

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        scc = SecretSaver(app.logger, url, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" %
                              snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc.url_reader, snapshot)
Example #8
0
def main(k8s_yaml_path: str, debug: bool, force_pod_labels: bool, update: bool,
         source: List[str], labels: List[str], namespace: Optional[str], watch: str,
         include_ir: bool, include_aconf: bool,
         diff_path: Optional[str]=None, kat_name: Optional[str]=None) -> None:
    loglevel = logging.DEBUG if debug else logging.INFO

    logging.basicConfig(
        level=loglevel,
        format="%(asctime)s mockery %(levelname)s: %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S"
    )

    logger = logging.getLogger('mockery')

    logger.debug(f"reading from {k8s_yaml_path}")

    if not source:
        source = [
            "Host", "service", "ingresses",
            "AuthService", "LogService", "Mapping", "Module", "RateLimitService",
            "TCPMapping", "TLSContext", "TracingService",
            "ConsulResolver", "KubernetesEndpointResolver", "KubernetesServiceResolver"
        ]

    if namespace:
        os.environ['AMBASSADOR_NAMESPACE'] = namespace

    # Make labels a list, instead of a tuple.
    labels = list(labels)
    labels_to_force = { l: True for l in labels or [] }

    if kat_name:
        logger.debug(f"KAT name {kat_name}")

        # First set up some labels to force.

        labels_to_force["scope=AmbassadorTest"] = True
        labels_to_force[f"service={kat_name}"] = True

        kat_amb_id_label = f"kat-ambassador-id={kat_name}"

        if kat_amb_id_label not in labels_to_force:
            labels_to_force[kat_amb_id_label] = True
            labels.append(kat_amb_id_label)

        os.environ['AMBASSADOR_ID'] = kat_name

        # Forcibly override the cached ambassador_id.
        Config.ambassador_id = kat_name

    logger.debug(f"namespace {namespace or '*'}")
    logger.debug(f"labels to watch {', '.join(labels)}")
    logger.debug(f"labels to force {', '.join(sorted(labels_to_force.keys()))}")
    logger.debug(f"watch hook {watch}")
    logger.debug(f"sources {', '.join(source)}")

    for key in sorted(os.environ.keys()):
        if key.startswith('AMBASSADOR'):
            logger.debug(f"${key}={os.environ[key]}")

    if force_pod_labels:
        try:
            os.makedirs("/tmp/ambassador-pod-info")
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        with open("/tmp/ambassador-pod-info/labels", "w", encoding="utf-8") as outfile:
            for l in labels_to_force:
                outfile.write(l)
                outfile.write("\n")

    # Pull in the YAML.
    manifest = parse_yaml(open(k8s_yaml_path, "r").read())

    w = Mockery(logger, debug, source, ",".join(labels), namespace, watch)

    iteration = 0

    while True:
        iteration += 1

        if iteration > 10:
            print(f"!!!! Not stable after 10 iterations, failing")
            logger.error("Not stable after 10 iterations, failing")
            sys.exit(1)

        logger.info(f"======== START ITERATION {iteration}")

        w.load(manifest)

        logger.info(f"WATT_K8S: {w.snapshot}")

        hook_ok, any_changes = w.run_hook()
        
        if not hook_ok:
            raise Exception("hook failed")
            
        if any_changes:
            logger.info(f"======== END ITERATION {iteration}: watches changed!")
        else:
            logger.info(f"======== END ITERATION {iteration}: stable!")
            break

    # Once here, we should be good to go.
    try:
        os.makedirs("/tmp/ambassador/snapshots")
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    scc = MockSecretHandler(logger, "mockery", "/tmp/ambassador/snapshots", f"v{iteration}")

    aconf = Config()

    logger.debug(f"Config.ambassador_id {Config.ambassador_id}")
    logger.debug(f"Config.ambassador_namespace {Config.ambassador_namespace}")

    logger.info(f"STABLE WATT_K8S: {w.snapshot}")

    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_watt(w.snapshot)
    aconf.load_all(fetcher.sorted())

    open("/tmp/ambassador/snapshots/aconf.json", "w", encoding="utf-8").write(aconf.as_json())

    ir = IR(aconf, secret_handler=scc)

    open("/tmp/ambassador/snapshots/ir.json", "w", encoding="utf-8").write(ir.as_json())

    econf = EnvoyConfig.generate(ir, "V2")
    bootstrap_config, ads_config = econf.split_config()

    ads_config.pop('@type', None)
    with open("/tmp/ambassador/snapshots/econf.json", "w", encoding="utf-8") as outfile:
        outfile.write(json.dumps(ads_config, sort_keys=True, indent=4))

    with open("/tmp/ambassador/snapshots/bootstrap.json", "w", encoding="utf-8") as outfile:
        outfile.write(json.dumps(bootstrap_config, sort_keys=True, indent=4))

    diag = Diagnostics(ir, econf)

    with open("/tmp/ambassador/snapshots/diag.json", "w", encoding="utf-8") as outfile:
        outfile.write(json.dumps(diag.as_dict(), sort_keys=True, indent=4))

    if diff_path:
        diffs = False

        pairs_to_check = [
            (os.path.join(diff_path, 'snapshots', 'econf.json'), '/tmp/ambassador/snapshots/econf.json'),
            (os.path.join(diff_path, 'bootstrap-ads.json'), '/tmp/ambassador/snapshots/bootstrap.json')
        ]

        if include_ir:
            pairs_to_check.append(
                ( os.path.join(diff_path, 'snapshots', 'ir.json'), '/tmp/ambassador/snapshots/ir.json' )
            )

        if include_aconf:
            pairs_to_check.append(
                ( os.path.join(diff_path, 'snapshots', 'aconf.json'), '/tmp/ambassador/snapshots/aconf.json' )
            )

        for gold_path, check_path in pairs_to_check:
            if update:
                logger.info(f"mv {check_path} {gold_path}")
                shutil.move(check_path, gold_path)
            elif not filecmp.cmp(gold_path, check_path):
                diffs = True

                gold_lines = open(gold_path, "r", encoding="utf-8").readlines()
                check_lines = open(check_path, "r", encoding="utf-8").readlines()

                for line in difflib.unified_diff(gold_lines, check_lines, fromfile=gold_path, tofile=check_path):
                    sys.stdout.write(line)

        if diffs:
            sys.exit(1)
Example #9
0
    def _load_ir(self, rqueue: queue.Queue, aconf: Config,
                 fetcher: ResourceFetcher, secret_handler: SecretHandler,
                 snapshot: str) -> None:
        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_handler=secret_handler)

        ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
        open(ir_path, "w").write(ir.as_json())

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config,
                                          retries=self.app.validation_retries):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            # Don't use app.check_scout; it will deadlock.
            self.check_scout("attempted bad update")
            self._respond(
                rqueue, 500,
                'ignoring: invalid Envoy configuration in snapshot %s' %
                snapshot)
            return

        snapcount = int(os.environ.get('AMBASSADOR_SNAPSHOT_COUNT', "4"))
        snaplist: List[Tuple[str, str]] = []

        if snapcount > 0:
            self.logger.debug("rotating snapshots for snapshot %s" % snapshot)

            # If snapcount is 4, this range statement becomes range(-4, -1)
            # which gives [ -4, -3, -2 ], which the list comprehension turns
            # into [ ( "-3", "-4" ), ( "-2", "-3" ), ( "-1", "-2" ) ]...
            # which is the list of suffixes to rename to rotate the snapshots.

            snaplist += [(str(x + 1), str(x))
                         for x in range(-1 * snapcount, -1)]

            # After dealing with that, we need to rotate the current file into -1.
            snaplist.append(('', '-1'))

        # Whether or not we do any rotation, we need to cycle in the '-tmp' file.
        snaplist.append(('-tmp', ''))

        for from_suffix, to_suffix in snaplist:
            for fmt in [
                    "aconf{}.json", "econf{}.json", "ir{}.json",
                    "snapshot{}.yaml"
            ]:
                from_path = os.path.join(app.snapshot_path,
                                         fmt.format(from_suffix))
                to_path = os.path.join(app.snapshot_path,
                                       fmt.format(to_suffix))

                try:
                    self.logger.debug("rotate: %s -> %s" %
                                      (from_path, to_path))
                    os.rename(from_path, to_path)
                except IOError as e:
                    self.logger.debug("skip %s -> %s: %s" %
                                      (from_path, to_path, e))
                    pass
                except Exception as e:
                    self.logger.debug("could not rename %s -> %s: %s" %
                                      (from_path, to_path, e))

        app.latest_snapshot = snapshot
        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        if app.ir.k8s_status_updates:
            for name in app.ir.k8s_status_updates.keys():
                kind, update = app.ir.k8s_status_updates[name]

                self.logger.info(
                    f"doing K8s status update for {kind} {name}...")

                text = json.dumps(update)

                with open(f'/tmp/kstat-{kind}-{name}', 'w') as out:
                    out.write(text)

                cmd = [
                    '/ambassador/kubestatus', kind, '-f',
                    f'metadata.name={name}', '-u', '/dev/fd/0'
                ]
                self.logger.info(f"Running command: {cmd}")

                try:
                    rc = subprocess.run(cmd,
                                        input=text.encode('utf-8'),
                                        timeout=5)
                    self.logger.info(f'...update finished, rc {rc.returncode}')
                except subprocess.TimeoutExpired as e:
                    self.logger.error(f'...update timed out, {e}')

        self.logger.info("configuration updated from snapshot %s" % snapshot)
        self._respond(rqueue, 200,
                      'configuration updated from snapshot %s' % snapshot)

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)

        # Check our environment...
        self.check_environment()

        self.chime()
Example #10
0
    def load_config_fs(self, rqueue: queue.Queue, path: str) -> None:
        self.logger.info("loading configuration from disk: %s" % path)

        # The "path" here can just be a path, but it can also be a command for testing,
        # if the user has chosen to allow that.

        if self.app.allow_fs_commands and (':' in path):
            pfx, rest = path.split(':', 1)

            if pfx.lower() == 'cmd':
                fields = rest.split(':', 1)

                cmd = fields[0].upper()

                args = fields[1:] if (len(fields) > 1) else None

                if cmd.upper() == 'CHIME':
                    self.logger.info('CMD: Chiming')

                    self.chime()

                    self._respond(rqueue, 200, 'Chimed')
                elif cmd.upper() == 'CHIME_RESET':
                    self.chimed = False
                    self.last_chime = False
                    self.env_good = False

                    self.app.scout.reset_events()
                    self.app.scout.report(mode="boot",
                                          action="boot1",
                                          no_cache=True)

                    self.logger.info('CMD: Reset chime state')
                    self._respond(rqueue, 200, 'CMD: Reset chime state')
                elif cmd.upper() == 'SCOUT_CACHE_RESET':
                    self.app.scout.reset_cache_time()

                    self.logger.info('CMD: Reset Scout cache time')
                    self._respond(rqueue, 200, 'CMD: Reset Scout cache time')
                elif cmd.upper() == 'ENV_OK':
                    self.env_good = True
                    self.failure_list = None

                    self.logger.info('CMD: Marked environment good')
                    self._respond(rqueue, 200, 'CMD: Marked environment good')
                elif cmd.upper() == 'ENV_BAD':
                    self.env_good = False
                    self.failure_list = ['failure forced']

                    self.logger.info('CMD: Marked environment bad')
                    self._respond(rqueue, 200, 'CMD: Marked environment bad')
                else:
                    self.logger.info(f'CMD: no such command "{cmd}"')
                    self._respond(rqueue, 400, f'CMD: no such command "{cmd}"')

                return
            else:
                self.logger.info(f'CONFIG_FS: invalid prefix "{pfx}"')
                self._respond(rqueue, 400,
                              f'CONFIG_FS: invalid prefix "{pfx}"')

            return

        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        scc = FSSecretHandler(app.logger, path, app.snapshot_path, "0")

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=app.k8s, recurse=True)

        if not fetcher.elements:
            self.logger.debug("no configuration resources found at %s" % path)
            # self._respond(rqueue, 204, 'ignoring empty configuration')
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
Example #11
0
    def _load_ir(self, rqueue: queue.Queue, aconf: Config,
                 fetcher: ResourceFetcher,
                 secret_reader: Callable[['IRTLSContext', str, str],
                                         SavedSecret], snapshot: str) -> None:
        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_reader=secret_reader)

        ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
        open(ir_path, "w").write(ir.as_json())

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            app.check_scout("attempted bad update")
            self._respond(
                rqueue, 500,
                'ignoring: invalid Envoy configuration in snapshot %s' %
                snapshot)
            return

        self.logger.info("rotating snapshots for snapshot %s" % snapshot)

        for from_suffix, to_suffix in [('-3', '-4'), ('-2', '-3'),
                                       ('-1', '-2'), ('', '-1'), ('-tmp', '')]:
            for fmt in [
                    "aconf{}.json", "econf{}.json", "ir{}.json",
                    "snapshot{}.yaml"
            ]:
                try:
                    from_path = os.path.join(app.snapshot_path,
                                             fmt.format(from_suffix))
                    to_path = os.path.join(app.snapshot_path,
                                           fmt.format(to_suffix))

                    self.logger.debug("rotate: %s -> %s" %
                                      (from_path, to_path))
                    os.rename(from_path, to_path)
                except IOError as e:
                    self.logger.debug("skip %s -> %s: %s" %
                                      (from_path, to_path, e))
                except Exception as e:
                    self.logger.debug("could not rename %s -> %s: %s" %
                                      (from_path, to_path, e))

        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        self.logger.info("configuration updated from snapshot %s" % snapshot)
        self._respond(rqueue, 200,
                      'configuration updated from snapshot %s' % snapshot)

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)

        # Don't use app.check_scout; it will deadlock. And don't bother doing the Scout
        # update until after we've taken care of Envoy.
        self.check_scout("update")
Example #12
0
    # Don't bother actually saving resources that come up when working with
    # the faked modules.
    def save_resource(self, resource: 'IRResource') -> 'IRResource':
        return resource


#### Mainline.

yaml_stream = sys.stdin

if args:
    yaml_stream = open(args[0], "r")

aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_watt(yaml_stream.read())

aconf.load_all(fetcher.sorted())

# We can lift mappings straight from the aconf...
mappings = aconf.get_config('mappings') or {}

# ...but we need the fake IR to deal with resolvers and TLS contexts.
fake = FakeIR(logger, aconf)

logger.debug("FakeIR: %s" % fake.as_json())

resolvers = fake.resolvers
contexts = fake.tls_contexts
Example #13
0
    def load_yaml(self, yaml_stream):
        self.aconf = Config()

        fetcher = ResourceFetcher(self.logger, self.aconf, watch_only=True)
        fetcher.parse_watt(yaml_stream.read())

        self.aconf.load_all(fetcher.sorted())

        # We can lift mappings straight from the aconf...
        mappings = self.aconf.get_config('mappings') or {}

        # ...but we need the fake IR to deal with resolvers and TLS contexts.
        self.fake = FakeIR(self.aconf, logger=self.logger)

        self.logger.debug("IR: %s" % self.fake.as_json())

        resolvers = self.fake.resolvers
        contexts = self.fake.tls_contexts

        self.logger.debug(f'mappings: {len(mappings)}')
        self.logger.debug(f'resolvers: {len(resolvers)}')
        self.logger.debug(f'contexts: {len(contexts)}')

        global_resolver = self.fake.ambassador_module.get('resolver', None)

        global_label_selector = os.environ.get('AMBASSADOR_LABEL_SELECTOR', '')
        self.logger.debug('label-selector: %s' % global_label_selector)

        # watch the AES Secret if the edge stack is running
        if self.fake.edge_stack_allowed:
            aes_secret_name = os.getenv(ENV_AES_SECRET_NAME,
                                        DEFAULT_AES_SECRET_NAME)
            aes_secret_namespace = os.getenv(ENV_AES_SECRET_NAMESPACE,
                                             Config.ambassador_namespace)
            self.logger.debug(
                f'edge stack detected: need secret {aes_secret_name}.{aes_secret_namespace}'
            )
            self.add_kube_watch(
                f'Secret {aes_secret_name}',
                'secret',
                namespace=aes_secret_namespace,
                field_selector=f"metadata.name={aes_secret_name}")

        # Walk hosts.
        for host in self.fake.get_hosts():
            sel = host.get('selector') or {}
            match_labels = sel.get('matchLabels') or {}

            label_selector = None

            if match_labels:
                label_selector = ','.join(
                    [f"{l}={v}" for l, v in match_labels.items()])

            for wanted_kind in ['service', 'secret']:
                self.add_kube_watch(f"Host {host.name}",
                                    wanted_kind,
                                    host.namespace,
                                    label_selector=label_selector)

        for mname, mapping in mappings.items():
            res_name = mapping.get('resolver', None)
            res_source = 'mapping'

            if not res_name:
                res_name = global_resolver
                res_source = 'defaults'

            ctx_name = mapping.get('tls', None)

            self.logger.debug(
                f'Mapping {mname}: resolver {res_name} from {res_source}, service {mapping.service}, tls {ctx_name}'
            )

            if res_name:
                resolver = resolvers.get(res_name, None)
                self.logger.debug(f'-> resolver {resolver}')

                if resolver:
                    svc = Service(logger, mapping.service, ctx_name)

                    if resolver.kind == 'ConsulResolver':
                        self.logger.debug(
                            f'Mapping {mname} uses Consul resolver {res_name}')

                        # At the moment, we stuff the resolver's datacenter into the association
                        # ID for this watch. The ResourceFetcher relies on that.

                        self.consul_watches.append({
                            "id": resolver.datacenter,
                            "consul-address": resolver.address,
                            "datacenter": resolver.datacenter,
                            "service-name": svc.hostname
                        })
                    elif resolver.kind == 'KubernetesEndpointResolver':
                        host = svc.hostname
                        namespace = Config.ambassador_namespace

                        if not host:
                            # This is really kind of impossible.
                            self.logger.error(
                                f"KubernetesEndpointResolver {res_name} has no 'hostname'"
                            )
                            continue

                        if "." in host:
                            (host, namespace) = host.split(".", 2)[0:2]

                        self.logger.debug(
                            f'...kube endpoints: svc {svc.hostname} -> host {host} namespace {namespace}'
                        )

                        self.add_kube_watch(
                            f"endpoint",
                            "endpoints",
                            namespace,
                            label_selector=global_label_selector,
                            field_selector=f"metadata.name={host}")

        for secret_key, secret_info in self.fake.secret_recorder.needed.items(
        ):
            self.logger.debug(
                f'need secret {secret_info.name}.{secret_info.namespace}')

            self.add_kube_watch(
                f"needed secret",
                "secret",
                secret_info.namespace,
                field_selector=f"metadata.name={secret_info.name}")

        if self.fake.edge_stack_allowed:
            # If the edge stack is allowed, make sure we watch for our fallback context.
            self.add_kube_watch("Fallback TLSContext",
                                "TLSContext",
                                namespace=Config.ambassador_namespace)

        ambassador_basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR',
                                            '/ambassador')

        if os.path.exists(
                os.path.join(ambassador_basedir,
                             '.ambassadorinstallations_ok')):
            self.add_kube_watch("AmbassadorInstallations",
                                "ambassadorinstallations.getambassador.io",
                                Config.ambassador_namespace)

        ambassador_knative_requested = (os.environ.get(
            "AMBASSADOR_KNATIVE_SUPPORT", "-unset-").lower() == 'true')

        if ambassador_knative_requested:
            self.logger.debug('Looking for Knative support...')

            if os.path.exists(
                    os.path.join(ambassador_basedir,
                                 '.knative_clusteringress_ok')):
                # Watch for clusteringresses.networking.internal.knative.dev in any namespace and with any labels.

                self.logger.debug(
                    'watching for clusteringresses.networking.internal.knative.dev'
                )
                self.add_kube_watch(
                    "Knative clusteringresses",
                    "clusteringresses.networking.internal.knative.dev", None)

            if os.path.exists(
                    os.path.join(ambassador_basedir, '.knative_ingress_ok')):
                # Watch for ingresses.networking.internal.knative.dev in any namespace and
                # with any labels.

                self.add_kube_watch(
                    "Knative ingresses",
                    "ingresses.networking.internal.knative.dev", None)

        self.watchset = {
            "kubernetes-watches": self.kube_watches,
            "consul-watches": self.consul_watches
        }

        save_dir = os.environ.get('AMBASSADOR_WATCH_DIR', '/tmp')

        if save_dir:
            json.dump(self.watchset,
                      open(os.path.join(save_dir, 'watch.json'), "w"))
Example #14
0
def test_config(testname, dirpath, configdir):
    # pytest.xfail("old V1 tests are disabled")
    # return
    
    global logger 
    errors = []

    if not os.path.isdir(configdir):
        errors.append("configdir %s is not a directory" % configdir)

    print("==== loading resources")

    aconf = Config()
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.load_from_filesystem(configdir, recurse=True)
    aconf.load_all(fetcher.sorted())

    ir = IR(aconf, file_checker=file_always_exists, secret_reader=atest_secret_reader)
    v1config = V1Config(ir)

    print("==== checking IR")

    current = get_old_intermediate(aconf, ir, v1config)
    current['envoy_config'] = filtered_overview(current['envoy_config'])
    current = sanitize_errors(current)

    current_path = os.path.join(dirpath, "intermediate.json")
    json.dump(current, open(current_path, "w"), sort_keys=True, indent=4)

    # Check the IR against its gold file, if that gold file exists.
    gold_path = os.path.join(dirpath, "gold.intermediate.json")

    if os.path.exists(gold_path):
        gold_parsed = None

        try:
            gold_parsed = json.load(open(gold_path, "r"))
        except json.decoder.JSONDecodeError as e:
            errors.append("%s was unparseable?" % gold_path)

        gold_no_yaml = normalize_gold(gold_parsed)
        gold_no_yaml_path = os.path.join(dirpath, "gold.no_yaml.json")
        json.dump(gold_no_yaml, open(gold_no_yaml_path, "w"), sort_keys=True, indent=4)

        udiff = unified_diff(gold_no_yaml_path, current_path)

        if udiff:
            errors.append("gold.intermediate.json and intermediate.json do not match!\n\n%s" % "\n".join(udiff))

    print("==== checking V1")

    # Check the V1 config against its gold file, if it exists (and it should).
    gold_path = os.path.join(dirpath, "gold.json")

    if os.path.exists(gold_path):
        v1path = os.path.join(dirpath, "v1.json")
        json.dump(v1config.as_dict(), open(v1path, "w"), sort_keys=True, indent=4)

        udiff = unified_diff(gold_path, v1path)

        if udiff:
            errors.append("gold.json and v1.json do not match!\n\n%s" % "\n".join(udiff))

    # if ambassador.code != 0:
    #     errors.append('ambassador failed! %s' % ambassador.code)
    # else:
    #     envoy = shell([ 'docker', 'run',
    #                         '--rm',
    #                         '-v', '%s:/etc/ambassador-config' % dirpath,
    #                         VALIDATOR_IMAGE,
    #                         '/usr/local/bin/envoy',
    #                            '--base-id', '1',
    #                            '--mode', 'validate',
    #                            '--service-cluster', 'test',
    #                            '-c', '/etc/ambassador-config/envoy.json' ],
    #                   verbose=True)
    #
    #     envoy_succeeded = (envoy.code == 0)
    #
    #     if not envoy_succeeded:
    #         errors.append('envoy failed! %s' % envoy.code)
    #
    #     envoy_output = list(envoy.output())
    #
    #     if envoy_succeeded:
    #         if not envoy_output[-1].strip().endswith(' OK'):
    #             errors.append('envoy validation failed!')
    #
    # print("==== checking short-circuit with existing config")
    #
    # ambassador = shell([ 'ambassador', 'config', '--check', configdir, envoy_json_out ])
    #
    # print(ambassador.errors(raw=True))
    #
    # if ambassador.code != 0:
    #     errors.append('ambassador repeat check failed! %s' % ambassador.code)
    #
    # if 'Output file exists' not in ambassador.errors(raw=True):
    #     errors.append('ambassador repeat check did not short circuit??')

    if errors:
        print("---- ERRORS")
        print("%s" % "\n".join(errors))

    assert not errors, ("failing, _errors: %d" % len(errors))
Example #15
0
def config(config_dir_path: Parameter.REQUIRED, output_json_path: Parameter.REQUIRED, *,
           debug=False, debug_scout=False, check=False, k8s=False, ir=None, aconf=None,
           exit_on_error=False):
    """
    Generate an Envoy configuration

    :param config_dir_path: Configuration directory to scan for Ambassador YAML files
    :param output_json_path: Path to output envoy.json
    :param debug: If set, generate debugging output
    :param debug_scout: If set, generate debugging output when talking to Scout
    :param check: If set, generate configuration only if it doesn't already exist
    :param k8s: If set, assume configuration files are annotated K8s manifests
    :param exit_on_error: If set, will exit with status 1 on any configuration error
    :param ir: Pathname to which to dump the IR (not dumped if not present)
    :param aconf: Pathname to which to dump the aconf (not dumped if not present)
    """

    if debug:
        logger.setLevel(logging.DEBUG)

    if debug_scout:
        logging.getLogger('ambassador.scout').setLevel(logging.DEBUG)

    try:
        logger.debug("CHECK MODE  %s" % check)
        logger.debug("CONFIG DIR  %s" % config_dir_path)
        logger.debug("OUTPUT PATH %s" % output_json_path)

        dump_aconf: Optional[str] = aconf
        dump_ir: Optional[str] = ir

        # Bypass the existence check...
        output_exists = False

        if check:
            # ...oh no wait, they explicitly asked for the existence check!
            # Assume that the file exists (ie, we'll do nothing) unless we
            # determine otherwise.
            output_exists = True

            try:
                json.loads(open(output_json_path, "r").read())
            except FileNotFoundError:
                logger.debug("output file does not exist")
                output_exists = False
            except OSError:
                logger.warning("output file is not sane?")
                output_exists = False
            except json.decoder.JSONDecodeError:
                logger.warning("output file is not valid JSON")
                output_exists = False

            logger.info("Output file %s" % ("exists" if output_exists else "does not exist"))

        rc = RichStatus.fromError("impossible error")

        if not output_exists:
            # Either we didn't need to check, or the check didn't turn up
            # a valid config. Regenerate.
            logger.info("Generating new Envoy configuration...")

            aconf = Config()
            fetcher = ResourceFetcher(logger, aconf)
            fetcher.load_from_filesystem(config_dir_path, k8s=k8s)
            aconf.load_all(fetcher.sorted())

            if dump_aconf:
                with open(dump_aconf, "w") as output:
                    output.write(aconf.as_json())
                    output.write("\n")

            # If exit_on_error is set, log _errors and exit with status 1
            if exit_on_error and aconf.errors:
                raise Exception("errors in: {0}".format(', '.join(aconf.errors.keys())))

            secret_handler = NullSecretHandler(logger, config_dir_path, config_dir_path, "0")

            ir = IR(aconf, file_checker=file_checker, secret_handler=secret_handler)

            if dump_ir:
                with open(dump_ir, "w") as output:
                    output.write(ir.as_json())
                    output.write("\n")

            # clize considers kwargs with False for default value as flags,
            # resulting in the logic below.
            # https://clize.readthedocs.io/en/stable/basics.html#accepting-flags

            logger.info("Writing envoy V2 configuration")
            v2config = V2Config(ir)
            rc = RichStatus.OK(msg="huh_v2")

            if rc:
                with open(output_json_path, "w") as output:
                    output.write(v2config.as_json())
                    output.write("\n")
            else:
                logger.error("Could not generate new Envoy configuration: %s" % rc.error)

        scout = Scout()
        result = scout.report(action="config", mode="cli")
        show_notices(result)
    except Exception as e:
        handle_exception("EXCEPTION from config", e,
                         config_dir_path=config_dir_path, output_json_path=output_json_path)

        # This is fatal.
        sys.exit(1)
Example #16
0
def dump(config_dir_path: Parameter.REQUIRED, *,
         secret_dir_path=None, watt=False, debug=False, debug_scout=False, k8s=False, recurse=False,
         aconf=False, ir=False, v2=False, diag=False, features=False):
    """
    Dump various forms of an Ambassador configuration for debugging

    Use --aconf, --ir, and --envoy to control what gets dumped. If none are requested, the IR
    will be dumped.

    :param config_dir_path: Configuration directory to scan for Ambassador YAML files
    :param secret_dir_path: Directory into which to save secrets
    :param watt: If set, input must be a WATT snapshot
    :param debug: If set, generate debugging output
    :param debug_scout: If set, generate debugging output
    :param k8s: If set, assume configuration files are annotated K8s manifests
    :param recurse: If set, recurse into directories below config_dir_path
    :param aconf: If set, dump the Ambassador config
    :param ir: If set, dump the IR
    :param v2: If set, dump the Envoy V2 config
    :param diag: If set, dump the Diagnostics overview
    :param features: If set, dump the feature set
    """

    if not secret_dir_path:
        secret_dir_path = config_dir_path

        if not os.path.isdir(secret_dir_path):
            secret_dir_path = os.path.dirname(secret_dir_path)

    if debug:
        logger.setLevel(logging.DEBUG)

    if debug_scout:
        logging.getLogger('ambassador.scout').setLevel(logging.DEBUG)

    if not (aconf or ir or v2 or diag or features):
        aconf = True
        ir = True
        v2 = True
        diag = False
        features = False

    dump_aconf = aconf
    dump_ir = ir
    dump_v2 = v2
    dump_diag = diag
    dump_features = features

    od = {}
    diagconfig: Optional[EnvoyConfig] = None

    try:
        aconf = Config()
        fetcher = ResourceFetcher(logger, aconf)

        if watt:
            fetcher.parse_watt(open(config_dir_path, "r").read())
        else:
            fetcher.load_from_filesystem(config_dir_path, k8s=k8s, recurse=True)

        aconf.load_all(fetcher.sorted())

        # aconf.post_error("Error from string, boo yah")
        # aconf.post_error(RichStatus.fromError("Error from RichStatus"))

        if dump_aconf:
            od['aconf'] = aconf.as_dict()

        secret_handler = NullSecretHandler(logger, config_dir_path, secret_dir_path, "0")

        ir = IR(aconf, file_checker=file_checker, secret_handler=secret_handler)

        if dump_ir:
            od['ir'] = ir.as_dict()

        if dump_v2:
            v2config = V2Config(ir)
            diagconfig = v2config
            od['v2'] = v2config.as_dict()

        if dump_diag:
            if not diagconfig:
                diagconfig = V2Config(ir)

            econf = typecast(EnvoyConfig, diagconfig)
            diag = Diagnostics(ir, econf)
            od['diag'] = diag.as_dict()
            od['elements'] = econf.elements

        if dump_features:
            od['features'] = ir.features()

        # scout = Scout()
        # scout_args = {}
        #
        # if ir and not os.environ.get("AMBASSADOR_DISABLE_FEATURES", None):
        #     scout_args["features"] = ir.features()
        #
        # result = scout.report(action="dump", mode="cli", **scout_args)
        # show_notices(result)

        json.dump(od, sys.stdout, sort_keys=True, indent=4)
        sys.stdout.write("\n")
    except Exception as e:
        handle_exception("EXCEPTION from dump", e,
                         config_dir_path=config_dir_path)

        # This is fatal.
        sys.exit(1)
Example #17
0
    def _load_ir(self, rqueue: queue.Queue, aconf: Config,
                 fetcher: ResourceFetcher, secret_handler: SecretHandler,
                 snapshot: str) -> None:
        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_handler=secret_handler)

        ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
        open(ir_path, "w").write(ir.as_json())

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            app.check_scout("attempted bad update")
            self._respond(
                rqueue, 500,
                'ignoring: invalid Envoy configuration in snapshot %s' %
                snapshot)
            return

        snapcount = int(os.environ.get('AMBASSADOR_SNAPSHOT_COUNT', "4"))
        snaplist: List[Tuple[str, str]] = []

        if snapcount > 0:
            self.logger.debug("rotating snapshots for snapshot %s" % snapshot)

            # If snapcount is 4, this range statement becomes range(-4, -1)
            # which gives [ -4, -3, -2 ], which the list comprehension turns
            # into [ ( "-3", "-4" ), ( "-2", "-3" ), ( "-1", "-2" ) ]...
            # which is the list of suffixes to rename to rotate the snapshots.

            snaplist += [(str(x + 1), str(x))
                         for x in range(-1 * snapcount, -1)]

            # After dealing with that, we need to rotate the current file into -1.
            snaplist.append(('', '-1'))

        # Whether or not we do any rotation, we need to cycle in the '-tmp' file.
        snaplist.append(('-tmp', ''))

        for from_suffix, to_suffix in snaplist:
            for fmt in [
                    "aconf{}.json", "econf{}.json", "ir{}.json",
                    "snapshot{}.yaml"
            ]:
                from_path = os.path.join(app.snapshot_path,
                                         fmt.format(from_suffix))
                to_path = os.path.join(app.snapshot_path,
                                       fmt.format(to_suffix))

                try:
                    self.logger.debug("rotate: %s -> %s" %
                                      (from_path, to_path))
                    os.rename(from_path, to_path)
                except IOError as e:
                    self.logger.debug("skip %s -> %s: %s" %
                                      (from_path, to_path, e))
                    pass
                except Exception as e:
                    self.logger.debug("could not rename %s -> %s: %s" %
                                      (from_path, to_path, e))

        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        self.logger.info("configuration updated from snapshot %s" % snapshot)
        self._respond(rqueue, 200,
                      'configuration updated from snapshot %s' % snapshot)

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)
            # app.scout_updater = PeriodicTrigger(lambda: app.watcher.check_scout("30s"), period=30)

        # Don't use app.check_scout; it will deadlock. And don't bother doing the Scout
        # update until after we've taken care of Envoy.
        self.check_scout("update")
Example #18
0
    # Never cache anything.
    def cache_secret(self, context: 'IRTLSContext', secret_info: SecretInfo):
        return SavedSecret(secret_info.name, secret_info.namespace,
                           '-crt-path-', '-key-path-', {
                               'tls_crt': '-crt-',
                               'tls_key': '-key-'
                           })


scc = SecretHandler(logger, "test-dump", "ss")

yamlpath = sys.argv[1] if len(sys.argv) > 1 else "consul-3.yaml"

aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_watt(open(yamlpath, "r").read())

aconf.load_all(fetcher.sorted())

open("test-aconf.json", "w").write(aconf.as_json())

# sys.exit(0)

ir = IR(aconf, secret_handler=scc)

open("test-ir.json", "w").write(ir.as_json())

econf = V2Config(ir)

open("test-v2.json", "w").write(econf.as_json())