예제 #1
0
    def build(self) -> Tuple[IR, EnvoyConfig]:
        # Do a build, return IR & econf, but also stash them in self.builds.

        yaml_data = yaml.safe_dump_all(self.resources.values())

        aconf = Config()

        fetcher = ResourceFetcher(logger, aconf)
        fetcher.parse_yaml(yaml_data, k8s=True)

        aconf.load_all(fetcher.sorted())

        ir = IR(aconf,
                cache=self.cache,
                file_checker=lambda path: True,
                secret_handler=self.secret_handler)

        assert ir, "could not create an IR"

        econf = EnvoyConfig.generate(ir, "V2", cache=self.cache)

        assert econf, "could not create an econf"

        self.builds.append((ir, econf))

        return ir, econf
예제 #2
0
def get_aconf(app) -> Config:
    # We need to find the sync-# directory with the highest number...
    sync_dirs = []
    latest = app.config_dir_prefix

    for subdir in os.listdir(app.config_dir_prefix):
        if subdir.startswith("sync-"):
            try:
                sync_dirs.append(int(subdir.replace("sync-", "")))
            except ValueError:
                pass

    if sync_dirs:
        latest_generation = sorted(sync_dirs, reverse=True)[0]
        latest = os.path.join(app.config_dir_prefix, "sync-%d" % latest_generation)

    app.logger.debug("Fetching resources from %s" % latest)

    app.scc = SplitConfigChecker(app.logger, latest)

    aconf = Config()
    aconf.load_from_directory(latest, k8s=app.k8s, recurse=True)

    app.notices = Notices(app.notice_path)
    app.notices.reset()

    return aconf
예제 #3
0
    def _load_ir(self, aconf: Config, fetcher: ResourceFetcher,
                 secret_reader: Callable[['IRTLSContext', str, str],
                                         SavedSecret], snapshot: str) -> None:

        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path,
                                  "aconf-%s.json" % snapshot)
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_reader=secret_reader)

        ir_path = os.path.join(app.snapshot_path, "ir-%s.json" % snapshot)
        open(ir_path, "w").write(ir.as_json())

        check_scout(app, "update", ir)

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            return

        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        self.logger.info("configuration updated")

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)
예제 #4
0
    def test_deduplication(self):
        aconf = Config()

        p = DeduplicatingKubernetesProcessor(CountingKubernetesProcessor(aconf, valid_mapping.gvk, 'test'))

        assert p.try_process(valid_mapping)
        assert p.try_process(valid_mapping)
        assert p.try_process(valid_mapping)

        assert aconf.get_count('test') == 1
예제 #5
0
    def test_count(self):
        aconf = Config()

        p = CountingKubernetesProcessor(aconf, valid_mapping.gvk, 'test')

        assert p.try_process(valid_mapping), 'Processor rejected matching resource'
        assert p.try_process(valid_mapping), 'Processor rejected matching resource (again)'
        assert not p.try_process(valid_knative_ingress), 'Processor accepted non-matching resource'

        assert aconf.get_count('test') == 2, 'Processor did not increment counter'
예제 #6
0
def test_knative_counters():
    aconf = Config()
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(knative_ingress_example, k8s=True)
    aconf.load_all(fetcher.sorted())

    secret_handler = NullSecretHandler(logger, None, None, "0")
    ir = IR(aconf, secret_handler=secret_handler)
    feats = ir.features()

    assert feats['knative_ingress_count'] == 1, f"Expected a Knative ingress, did not find one"
    assert feats['cluster_ingress_count'] == 0, f"Expected no Knative cluster ingresses, found at least one"
예제 #7
0
def _get_envoy_config(yaml, version='V3'):
    aconf = Config()
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(default_listener_manifests() + yaml, k8s=True)

    aconf.load_all(fetcher.sorted())

    secret_handler = NullSecretHandler(logger, None, None, "0")

    ir = IR(aconf, file_checker=lambda path: True, secret_handler=secret_handler)

    assert ir

    return EnvoyConfig.generate(ir, version)
예제 #8
0
def test_envvar_expansion():
    os.environ["TEST_SERVICE"] = "foo"

    aconf = Config()

    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(yaml)

    aconf.load_all(fetcher.sorted())

    mappings = aconf.config["mappings"]
    test_mapping = mappings["test_mapping"]

    assert test_mapping.service == "foo:9999"
예제 #9
0
def _get_envoy_config(yaml):
    aconf = Config()
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(yaml)

    aconf.load_all(fetcher.sorted())

    secret_handler = NullSecretHandler(logger, None, None, "0")

    ir = IR(aconf, file_checker=lambda path: True, secret_handler=secret_handler)

    assert ir

    return EnvoyConfig.generate(ir, "V2")
예제 #10
0
def _test_errorresponse(yaml, expectations, expect_fail=False):
    aconf = Config()

    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(yaml)

    aconf.load_all(fetcher.sorted())

    secret_handler = NullSecretHandler(logger, None, None, "0")

    ir = IR(aconf,
            file_checker=lambda path: True,
            secret_handler=secret_handler)

    error_response = IRErrorResponse(
        ir, aconf, ir.ambassador_module.get('error_response_overrides', None),
        ir.ambassador_module)

    error_response.setup(ir, aconf)
    if aconf.errors:
        print("errors: %s" % repr(aconf.errors))

    ir_conf = error_response.config()
    if expect_fail:
        assert ir_conf is None
        return
    assert ir_conf

    # There should be no default body format override
    body_format = ir_conf.get('body_format', None)
    assert body_format is None

    mappers = ir_conf.get('mappers', None)
    assert mappers
    assert len(mappers) == len(expectations), \
            f"unexpected len(mappers) {len(expectations)} != len(expectations) {len(expectations)}"

    for i in range(len(expectations)):
        expected_filter, expected_body_format_override = expectations[i]
        m = mappers[i]

        print(
            "checking with expected_body_format_override %s and expected_filter %s"
            % (expected_body_format_override, expected_filter))
        print("checking m: ", m)
        actual_filter = m['filter']
        assert m['filter'] == expected_filter
        if expected_body_format_override:
            actual_body_format_override = m['body_format_override']
            assert actual_body_format_override == expected_body_format_override
예제 #11
0
    def load_config(self, url):
        snapshot = url.split('/')[-1]
        aconf_path = os.path.join(app.config_dir_prefix,
                                  "snapshot-%s.yaml" % snapshot)
        ir_path = os.path.join(app.config_dir_prefix, "ir-%s.json" % snapshot)

        self.logger.info("copying configuration from %s to %s" %
                         (url, aconf_path))

        saved = save_url_contents(self.logger, "%s/services" % url, aconf_path)

        if saved:
            scc = SecretSaver(app.logger, url, app.config_dir_prefix)

            aconf = Config()
            # Yeah yeah yeah. It's not really a directory. Whatever.
            aconf.load_from_directory(aconf_path, k8s=app.k8s, recurse=True)

            ir = IR(aconf, secret_reader=scc.url_reader)
            open(ir_path, "w").write(ir.as_json())

            check_scout(app, "update", ir)

            econf = EnvoyConfig.generate(ir, "V2")
            diag = Diagnostics(ir, econf)

            bootstrap_config, ads_config = econf.split_config()

            self.logger.info("saving Envoy configuration for snapshot %s" %
                             snapshot)

            with open(app.bootstrap_path, "w") as output:
                output.write(
                    json.dumps(bootstrap_config, sort_keys=True, indent=4))

            with open(app.ads_path, "w") as output:
                output.write(json.dumps(ads_config, sort_keys=True, indent=4))

            app.aconf = aconf
            app.ir = ir
            app.econf = econf
            app.diag = diag

            if app.ambex_pid != 0:
                self.logger.info("notifying PID %d ambex" % app.ambex_pid)
                os.kill(app.ambex_pid, signal.SIGHUP)

            self.logger.info("configuration updated")
예제 #12
0
def test_shadow_v3():
    aconf = Config()

    yaml = '''
---
apiVersion: getambassador.io/v2
kind: Mapping
name: httpbin-mapping
service: httpbin
prefix: /httpbin/
---
apiVersion: getambassador.io/v2
kind: Mapping
name: httpbin-mapping-shadow
service: httpbin-shadow
prefix: /httpbin/
shadow: true
weight: 10
'''
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_yaml(yaml)

    aconf.load_all(fetcher.sorted())

    secret_handler = MockSecretHandler(logger, "mockery",
                                       "/tmp/ambassador/snapshots", "v1")
    ir = IR(aconf,
            file_checker=lambda path: True,
            secret_handler=secret_handler)

    assert ir

    econf = EnvoyConfig.generate(ir, "V3")

    bootstrap_config, ads_config, _ = econf.split_config()
    ads_config.pop('@type', None)

    mirrored_config = get_mirrored_config(ads_config)
    assert 'request_mirror_policies' in mirrored_config['route']
    assert len(mirrored_config['route']['request_mirror_policies']) == 1
    mirror_policy = mirrored_config['route']['request_mirror_policies'][0]
    assert mirror_policy['cluster'] == 'cluster_shadow_httpbin_shadow_default'
    assert mirror_policy['runtime_fraction']['default_value'][
        'numerator'] == 10
    assert mirror_policy['runtime_fraction']['default_value'][
        'denominator'] == 'HUNDRED'
    assert_valid_envoy_config(ads_config)
    assert_valid_envoy_config(bootstrap_config)
예제 #13
0
파일: diagd.py 프로젝트: jpoley/ambassador
    def load_config(self, url):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path,
                               "snapshot-%s.yaml" % snapshot)

        self.logger.info("copying configuration from %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if not serialization:
            self.logger.info("no data loaded from snapshot %s?" % snapshot)
            return

        scc = SecretSaver(app.logger, url, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.info("no configuration found in snapshot %s?" %
                             snapshot)
            return

        self._load_ir(aconf, fetcher, scc.url_reader, snapshot)
예제 #14
0
    def load_config_watt(self, rqueue: queue.Queue, url: str):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration: watt, %s to %s" % (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger, url, stream2=open(ss_path, "w"))

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        # Weirdly, we don't need a special WattSecretHandler: parse_watt knows how to handle
        # the secrets that watt sends.
        scc = SecretHandler(app.logger, url, app.snapshot_path, snapshot)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)

        if serialization:
            fetcher.parse_watt(serialization)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" % snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
예제 #15
0
def _test_compiler(test_name, envoy_api_version="V2"):
    test_path = os.path.join(testfolder, test_name)
    basename = os.path.basename(test_path)

    with open(os.path.join(test_path, 'bootstrap-ads.json'), 'r') as f:
        expected_bootstrap = json.loads(f.read())
    node_name = expected_bootstrap.get('node', {}).get('cluster', None)
    assert node_name
    namespace = node_name.replace(test_name + '-', '', 1)

    with open(os.path.join(test_path, 'snapshot.yaml'), 'r') as f:
        watt = f.read()

    Config.ambassador_id = basename
    Config.ambassador_namespace = namespace
    os.environ['AMBASSADOR_ID'] = basename
    os.environ['AMBASSADOR_NAMESPACE'] = namespace
    aconf = Config()
    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_watt(watt)

    aconf.load_all(fetcher.sorted())

    secret_handler = MockSecretHandler(logger, "mockery", "/tmp/ambassador/snapshots", "v1")

    ir = IR(aconf, file_checker=lambda path: True, secret_handler=secret_handler)
    expected_econf_file = 'econf.json'
    if ir.edge_stack_allowed:
        expected_econf_file = 'econf-aes.json'
    with open(os.path.join(test_path, expected_econf_file), 'r') as f:
        expected_econf = json.loads(f.read())

    assert ir
    econf = EnvoyConfig.generate(ir, version=envoy_api_version)
    bootstrap_config, ads_config, _ = econf.split_config()
    ads_config.pop('@type', None)

    assert bootstrap_config == expected_bootstrap
    assert ads_config == expected_econf
    assert_valid_envoy_config(ads_config)
    assert_valid_envoy_config(bootstrap_config)

# @pytest.mark.compilertest
# @pytest.mark.parametrize("test_name", testdata)
# def test_compiler_v3(test_name):
#     _test_compiler(test_name, envoy_api_version="V3")
예제 #16
0
    def test_mapping_v1(self):
        aconf = Config()
        mgr = ResourceManager(logger, aconf)

        assert AmbassadorProcessor(mgr).try_process(valid_mapping_v1)
        assert len(mgr.elements) == 1

        aconf.load_all(mgr.elements)
        assert len(aconf.errors) == 0

        mappings = aconf.get_config('mappings')
        assert len(mappings) == 1

        mapping = next(iter(mappings.values()))
        assert mapping.apiVersion == valid_mapping_v1.gvk.api_version
        assert mapping.name == valid_mapping_v1.name
        assert mapping.namespace == valid_mapping_v1.namespace
        assert mapping.prefix == valid_mapping_v1.spec['prefix']
        assert mapping.service == valid_mapping_v1.spec['service']
예제 #17
0
    def load_config_kubewatch(self, rqueue: queue.Queue, url: str):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration: kubewatch, %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        elements: List[str] = []

        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if serialization:
            elements.append(serialization)
        else:
            self.logger.debug("no services loaded from snapshot %s" % snapshot)

        if Config.enable_endpoints:
            serialization = load_url_contents(self.logger,
                                              "%s/endpoints" % url,
                                              stream2=open(ss_path, "a"))

            if serialization:
                elements.append(serialization)
            else:
                self.logger.debug("no endpoints loaded from snapshot %s" %
                                  snapshot)

        serialization = "---\n".join(elements)

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        scc = KubewatchSecretHandler(app.logger, url, app.snapshot_path,
                                     snapshot)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" %
                              snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
예제 #18
0
    def generate_config(self, changes, output):
        if os.path.exists(output):
            shutil.rmtree(output)
        os.makedirs(output)
        for filename, config in self.configs.items():
            path = os.path.join(output, filename)
            with open(path, "w") as fd:
                fd.write(config)
            logger.debug("Wrote %s to %s" % (filename, path))

        plural = "" if (changes == 1) else "s"

        logger.info("generating config with gencount %d (%d change%s)" %
                    (self.restart_count, changes, plural))

        aconf = Config()
        aconf.load_from_directory(output)
        ir = IR(aconf, secret_reader=KubeSecretReader(secret_root))
        envoy_config = V2Config(ir)

        bootstrap_config, ads_config = envoy_config.split_config()

        scout = Scout(install_id=self.cluster_id)
        scout_args = {"gencount": self.restart_count}

        if not os.environ.get("AMBASSADOR_DISABLE_FEATURES", None):
            scout_args["features"] = ir.features()

        result = scout.report(mode="kubewatch",
                              action="reconfigure",
                              **scout_args)
        notices = result.pop("notices", [])

        logger.debug("scout result %s" %
                     json.dumps(result, sort_keys=True, indent=4))

        for notice in notices:
            logger.log(logging.getLevelName(notice.get('level', 'WARNING')),
                       notice.get('message', '?????'))

        return bootstrap_config, ads_config
예제 #19
0
    def test_aggregation(self):
        aconf = Config()

        fp = FinalizingKubernetesProcessor()

        p = AggregateKubernetesProcessor([
            CountingKubernetesProcessor(aconf, valid_knative_ingress.gvk, 'test_1'),
            CountingKubernetesProcessor(aconf, valid_mapping.gvk, 'test_2'),
            fp,
        ])

        assert len(p.kinds()) == 2

        assert p.try_process(valid_knative_ingress)
        assert p.try_process(valid_mapping)

        assert aconf.get_count('test_1') == 1
        assert aconf.get_count('test_2') == 1

        p.finalize()
        assert fp.finalized, 'Aggregate processor did not call finalizers'
예제 #20
0
파일: diagd.py 프로젝트: jpoley/ambassador
    def load_config_fs(self, path: str) -> None:
        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        self.logger.info("loading configuration from disk: %s" % path)

        scc = SecretSaver(app.logger, path, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=False, recurse=True)

        if not fetcher.elements:
            self.logger.info("no configuration found at %s" % path)
            return

        self._load_ir(aconf, fetcher, scc.null_reader, snapshot)
예제 #21
0
    def load_config_fs(self, rqueue: queue.Queue, path: str) -> None:
        self.logger.info("loading configuration from disk: %s" % path)

        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        scc = FSSecretHandler(app.logger, path, app.snapshot_path, 0)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=app.k8s, recurse=True)

        if not fetcher.elements:
            self.logger.debug("no configuration resources found at %s" % path)
            # self._respond(rqueue, 204, 'ignoring empty configuration')
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
예제 #22
0
def test_resourcefetcher_handle_k8s_service():
    aconf = Config()

    fetcher = ResourceFetcher(logger, aconf)

    # Test no metadata key
    svc = {}
    result = fetcher.handle_k8s_service(svc)
    assert result is None

    svc["metadata"] = {
        "name": "testservice",
        "annotations": {
            "foo": "bar"
        }
    }
    # Test no ambassador annotation
    result = fetcher.handle_k8s_service(svc)
    assert result == ('testservice.default', [])

    # Test empty annotation
    svc["metadata"]["annotations"]["getambassador.io/config"] = {}
    result = fetcher.handle_k8s_service(svc)
    assert result == ('testservice.default', [])

    # Test valid annotation
    svc["metadata"]["annotations"]["getambassador.io/config"] = """apiVersion: getambassador.io/v1
kind: Mapping
name: test_mapping
prefix: /test/
service: test:9999"""
    result = fetcher.handle_k8s_service(svc)
    expected = {
        'apiVersion': 'getambassador.io/v1',
        'kind': 'Mapping',
        'name': 'test_mapping',
        'prefix': '/test/',
        'service': 'test:9999',
        'namespace': 'default'
    }
    assert result == ('testservice.default', [expected])
예제 #23
0
    def load_config(self, rqueue: queue.Queue, url):
        snapshot = url.split('/')[-1]
        ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")

        self.logger.info("copying configuration from %s to %s" %
                         (url, ss_path))

        # Grab the serialization, and save it to disk too.
        serialization = load_url_contents(self.logger,
                                          "%s/services" % url,
                                          stream2=open(ss_path, "w"))

        if os.environ.get('AMBASSADOR_ENABLE_ENDPOINTS'):
            serialization += '---\n' + \
                             load_url_contents(self.logger, "%s/endpoints" % url, stream2=open(ss_path, "a"))

        if not serialization:
            self.logger.debug("no data loaded from snapshot %s" % snapshot)
            # We never used to return here. I'm not sure if that's really correct?
            # self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
            # return

        scc = SecretSaver(app.logger, url, app.snapshot_path)

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.parse_yaml(serialization, k8s=True)

        if not fetcher.elements:
            self.logger.debug("no configuration found in snapshot %s" %
                              snapshot)

            # Don't actually bail here. If they send over a valid config that happens
            # to have nothing for us, it's still a legit config.
            # self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
            # return

        self._load_ir(rqueue, aconf, fetcher, scc.url_reader, snapshot)
예제 #24
0
파일: diagd.py 프로젝트: tsukker/ambassador
    def load_config_fs(self, rqueue: queue.Queue, path: str) -> None:
        self.logger.info("loading configuration from disk: %s" % path)

        # The "path" here can just be a path, but it can also be a command for testing,
        # if the user has chosen to allow that.

        if self.app.allow_fs_commands and (':' in path):
            pfx, rest = path.split(':', 1)

            if pfx.lower() == 'cmd':
                fields = rest.split(':', 1)

                cmd = fields[0].upper()

                args = fields[1:] if (len(fields) > 1) else None

                if cmd.upper() == 'CHIME':
                    self.logger.info('CMD: Chiming')

                    self.chime()

                    self._respond(rqueue, 200, 'Chimed')
                elif cmd.upper() == 'CHIME_RESET':
                    self.chimed = False
                    self.last_chime = False
                    self.env_good = False

                    self.app.scout.reset_events()
                    self.app.scout.report(mode="boot",
                                          action="boot1",
                                          no_cache=True)

                    self.logger.info('CMD: Reset chime state')
                    self._respond(rqueue, 200, 'CMD: Reset chime state')
                elif cmd.upper() == 'SCOUT_CACHE_RESET':
                    self.app.scout.reset_cache_time()

                    self.logger.info('CMD: Reset Scout cache time')
                    self._respond(rqueue, 200, 'CMD: Reset Scout cache time')
                elif cmd.upper() == 'ENV_OK':
                    self.env_good = True
                    self.failure_list = None

                    self.logger.info('CMD: Marked environment good')
                    self._respond(rqueue, 200, 'CMD: Marked environment good')
                elif cmd.upper() == 'ENV_BAD':
                    self.env_good = False
                    self.failure_list = ['failure forced']

                    self.logger.info('CMD: Marked environment bad')
                    self._respond(rqueue, 200, 'CMD: Marked environment bad')
                else:
                    self.logger.info(f'CMD: no such command "{cmd}"')
                    self._respond(rqueue, 400, f'CMD: no such command "{cmd}"')

                return
            else:
                self.logger.info(f'CONFIG_FS: invalid prefix "{pfx}"')
                self._respond(rqueue, 400,
                              f'CONFIG_FS: invalid prefix "{pfx}"')

            return

        snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
        scc = FSSecretHandler(app.logger, path, app.snapshot_path, "0")

        aconf = Config()
        fetcher = ResourceFetcher(app.logger, aconf)
        fetcher.load_from_filesystem(path, k8s=app.k8s, recurse=True)

        if not fetcher.elements:
            self.logger.debug("no configuration resources found at %s" % path)
            # self._respond(rqueue, 204, 'ignoring empty configuration')
            # return

        self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
예제 #25
0
def main(k8s_yaml_paths: List[str],
         debug: bool,
         force_pod_labels: bool,
         update: bool,
         source: List[str],
         labels: List[str],
         namespace: Optional[str],
         watch: str,
         include_ir: bool,
         include_aconf: bool,
         diff_path: Optional[str] = None,
         kat_name: Optional[str] = None) -> None:
    loglevel = logging.DEBUG if debug else logging.INFO

    logging.basicConfig(
        level=loglevel,
        format="%(asctime)s mockery %(levelname)s: %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S")

    logger = logging.getLogger('mockery')

    logger.debug(f"reading from {k8s_yaml_paths}")

    if not source:
        source = [
            "Host", "service", "ingresses", "AuthService", "LogService",
            "Mapping", "Module", "RateLimitService", "TCPMapping",
            "TLSContext", "TracingService", "ConsulResolver",
            "KubernetesEndpointResolver", "KubernetesServiceResolver"
        ]

    if namespace:
        os.environ['AMBASSADOR_NAMESPACE'] = namespace

    # Make labels a list, instead of a tuple.
    labels = list(labels)
    labels_to_force = {l: True for l in labels or []}

    if kat_name:
        logger.debug(f"KAT name {kat_name}")

        # First set up some labels to force.

        labels_to_force["scope=AmbassadorTest"] = True
        labels_to_force[f"service={kat_name}"] = True

        kat_amb_id_label = f"kat-ambassador-id={kat_name}"

        if kat_amb_id_label not in labels_to_force:
            labels_to_force[kat_amb_id_label] = True
            labels.append(kat_amb_id_label)

        os.environ['AMBASSADOR_ID'] = kat_name

        # Forcibly override the cached ambassador_id.
        Config.ambassador_id = kat_name

    logger.debug(f"namespace {namespace or '*'}")
    logger.debug(f"labels to watch {', '.join(labels)}")
    logger.debug(
        f"labels to force {', '.join(sorted(labels_to_force.keys()))}")
    logger.debug(f"watch hook {watch}")
    logger.debug(f"sources {', '.join(source)}")

    for key in sorted(os.environ.keys()):
        if key.startswith('AMBASSADOR'):
            logger.debug(f"${key}={os.environ[key]}")

    if force_pod_labels:
        try:
            os.makedirs("/tmp/ambassador-pod-info")
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        with open("/tmp/ambassador-pod-info/labels", "w",
                  encoding="utf-8") as outfile:
            for l in labels_to_force:
                outfile.write(l)
                outfile.write("\n")

    # Pull in the YAML.
    input_yaml = ''.join([open(x, "r").read() for x in k8s_yaml_paths])
    manifest = parse_yaml(input_yaml)

    w = Mockery(logger, debug, source, ",".join(labels), namespace, watch)

    iteration = 0

    while True:
        iteration += 1

        if iteration > 10:
            print(f"!!!! Not stable after 10 iterations, failing")
            logger.error("Not stable after 10 iterations, failing")
            sys.exit(1)

        logger.info(f"======== START ITERATION {iteration}")

        w.load(manifest)

        logger.info(f"WATT_K8S: {w.snapshot}")

        hook_ok, any_changes = w.run_hook()

        if not hook_ok:
            raise Exception("hook failed")

        if any_changes:
            logger.info(
                f"======== END ITERATION {iteration}: watches changed!")
        else:
            logger.info(f"======== END ITERATION {iteration}: stable!")
            break

    # Once here, we should be good to go.
    try:
        os.makedirs("/tmp/ambassador/snapshots")
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    scc = MockSecretHandler(logger, "mockery", "/tmp/ambassador/snapshots",
                            f"v{iteration}")

    aconf = Config()

    logger.debug(f"Config.ambassador_id {Config.ambassador_id}")
    logger.debug(f"Config.ambassador_namespace {Config.ambassador_namespace}")

    logger.info(f"STABLE WATT_K8S: {w.snapshot}")

    fetcher = ResourceFetcher(logger, aconf)
    fetcher.parse_watt(w.snapshot)
    aconf.load_all(fetcher.sorted())

    open("/tmp/ambassador/snapshots/aconf.json", "w",
         encoding="utf-8").write(aconf.as_json())

    ir = IR(aconf, secret_handler=scc)

    open("/tmp/ambassador/snapshots/ir.json", "w",
         encoding="utf-8").write(ir.as_json())

    econf = EnvoyConfig.generate(ir, "V2")
    bootstrap_config, ads_config, clustermap = econf.split_config()

    ads_config.pop('@type', None)
    with open("/tmp/ambassador/snapshots/econf.json", "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(ads_config, pretty=True))

    with open("/tmp/ambassador/snapshots/bootstrap.json",
              "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(bootstrap_config, pretty=True))

    diag = Diagnostics(ir, econf)

    with open("/tmp/ambassador/snapshots/diag.json", "w",
              encoding="utf-8") as outfile:
        outfile.write(dump_json(diag.as_dict(), pretty=True))

    if diff_path:
        diffs = False

        pairs_to_check = [(os.path.join(diff_path, 'snapshots', 'econf.json'),
                           '/tmp/ambassador/snapshots/econf.json'),
                          (os.path.join(diff_path, 'bootstrap-ads.json'),
                           '/tmp/ambassador/snapshots/bootstrap.json')]

        if include_ir:
            pairs_to_check.append(
                (os.path.join(diff_path, 'snapshots',
                              'ir.json'), '/tmp/ambassador/snapshots/ir.json'))

        if include_aconf:
            pairs_to_check.append((os.path.join(diff_path, 'snapshots',
                                                'aconf.json'),
                                   '/tmp/ambassador/snapshots/aconf.json'))

        for gold_path, check_path in pairs_to_check:
            if update:
                logger.info(f"mv {check_path} {gold_path}")
                shutil.move(check_path, gold_path)
            elif not filecmp.cmp(gold_path, check_path):
                diffs = True

                gold_lines = open(gold_path, "r", encoding="utf-8").readlines()
                check_lines = open(check_path, "r",
                                   encoding="utf-8").readlines()

                for line in difflib.unified_diff(gold_lines,
                                                 check_lines,
                                                 fromfile=gold_path,
                                                 tofile=check_path):
                    sys.stdout.write(line)

        if diffs:
            sys.exit(1)
예제 #26
0
 def setup(self):
     self.manager = ResourceManager(logger, Config())
     self.processor = ServiceProcessor(self.manager)
예제 #27
0
 def setup(self):
     self.manager = ResourceManager(
         logger, Config(), DependencyManager([
             ServiceDependency(),
         ]))
     self.processor = ServiceProcessor(self.manager)
예제 #28
0
    def _load_ir(self, rqueue: queue.Queue, aconf: Config,
                 fetcher: ResourceFetcher, secret_handler: SecretHandler,
                 snapshot: str) -> None:
        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_handler=secret_handler)

        ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
        open(ir_path, "w").write(ir.as_json())

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            app.check_scout("attempted bad update")
            self._respond(
                rqueue, 500,
                'ignoring: invalid Envoy configuration in snapshot %s' %
                snapshot)
            return

        snapcount = int(os.environ.get('AMBASSADOR_SNAPSHOT_COUNT', "4"))
        snaplist: List[Tuple[str, str]] = []

        if snapcount > 0:
            self.logger.debug("rotating snapshots for snapshot %s" % snapshot)

            # If snapcount is 4, this range statement becomes range(-4, -1)
            # which gives [ -4, -3, -2 ], which the list comprehension turns
            # into [ ( "-3", "-4" ), ( "-2", "-3" ), ( "-1", "-2" ) ]...
            # which is the list of suffixes to rename to rotate the snapshots.

            snaplist += [(str(x + 1), str(x))
                         for x in range(-1 * snapcount, -1)]

            # After dealing with that, we need to rotate the current file into -1.
            snaplist.append(('', '-1'))

        # Whether or not we do any rotation, we need to cycle in the '-tmp' file.
        snaplist.append(('-tmp', ''))

        for from_suffix, to_suffix in snaplist:
            for fmt in [
                    "aconf{}.json", "econf{}.json", "ir{}.json",
                    "snapshot{}.yaml"
            ]:
                from_path = os.path.join(app.snapshot_path,
                                         fmt.format(from_suffix))
                to_path = os.path.join(app.snapshot_path,
                                       fmt.format(to_suffix))

                try:
                    self.logger.debug("rotate: %s -> %s" %
                                      (from_path, to_path))
                    os.rename(from_path, to_path)
                except IOError as e:
                    self.logger.debug("skip %s -> %s: %s" %
                                      (from_path, to_path, e))
                    pass
                except Exception as e:
                    self.logger.debug("could not rename %s -> %s: %s" %
                                      (from_path, to_path, e))

        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        self.logger.info("configuration updated from snapshot %s" % snapshot)
        self._respond(rqueue, 200,
                      'configuration updated from snapshot %s' % snapshot)

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)
            # app.scout_updater = PeriodicTrigger(lambda: app.watcher.check_scout("30s"), period=30)

        # Don't use app.check_scout; it will deadlock. And don't bother doing the Scout
        # update until after we've taken care of Envoy.
        self.check_scout("update")
예제 #29
0
def config(config_dir_path: Parameter.REQUIRED,
           output_json_path: Parameter.REQUIRED,
           *,
           debug=False,
           debug_scout=False,
           check=False,
           k8s=False,
           ir=None,
           aconf=None,
           exit_on_error=False):
    """
    Generate an Envoy configuration

    :param config_dir_path: Configuration directory to scan for Ambassador YAML files
    :param output_json_path: Path to output envoy.json
    :param debug: If set, generate debugging output
    :param debug_scout: If set, generate debugging output when talking to Scout
    :param check: If set, generate configuration only if it doesn't already exist
    :param k8s: If set, assume configuration files are annotated K8s manifests
    :param exit_on_error: If set, will exit with status 1 on any configuration error
    :param ir: Pathname to which to dump the IR (not dumped if not present)
    :param aconf: Pathname to which to dump the aconf (not dumped if not present)
    """

    if debug:
        logger.setLevel(logging.DEBUG)

    if debug_scout:
        logging.getLogger('ambassador.scout').setLevel(logging.DEBUG)

    try:
        logger.debug("CHECK MODE  %s" % check)
        logger.debug("CONFIG DIR  %s" % config_dir_path)
        logger.debug("OUTPUT PATH %s" % output_json_path)

        dump_aconf: Optional[str] = aconf
        dump_ir: Optional[str] = ir

        # Bypass the existence check...
        output_exists = False

        if check:
            # ...oh no wait, they explicitly asked for the existence check!
            # Assume that the file exists (ie, we'll do nothing) unless we
            # determine otherwise.
            output_exists = True

            try:
                parse_json(open(output_json_path, "r").read())
            except FileNotFoundError:
                logger.debug("output file does not exist")
                output_exists = False
            except OSError:
                logger.warning("output file is not sane?")
                output_exists = False
            except json.decoder.JSONDecodeError:
                logger.warning("output file is not valid JSON")
                output_exists = False

            logger.info("Output file %s" %
                        ("exists" if output_exists else "does not exist"))

        rc = RichStatus.fromError("impossible error")

        if not output_exists:
            # Either we didn't need to check, or the check didn't turn up
            # a valid config. Regenerate.
            logger.info("Generating new Envoy configuration...")

            aconf = Config()
            fetcher = ResourceFetcher(logger, aconf)
            fetcher.load_from_filesystem(config_dir_path, k8s=k8s)
            aconf.load_all(fetcher.sorted())

            if dump_aconf:
                with open(dump_aconf, "w") as output:
                    output.write(aconf.as_json())
                    output.write("\n")

            # If exit_on_error is set, log _errors and exit with status 1
            if exit_on_error and aconf.errors:
                raise Exception("errors in: {0}".format(', '.join(
                    aconf.errors.keys())))

            secret_handler = NullSecretHandler(logger, config_dir_path,
                                               config_dir_path, "0")

            ir = IR(aconf,
                    file_checker=file_checker,
                    secret_handler=secret_handler)

            if dump_ir:
                with open(dump_ir, "w") as output:
                    output.write(ir.as_json())
                    output.write("\n")

            # clize considers kwargs with False for default value as flags,
            # resulting in the logic below.
            # https://clize.readthedocs.io/en/stable/basics.html#accepting-flags

            logger.info("Writing envoy V2 configuration")
            v2config = V2Config(ir)
            rc = RichStatus.OK(msg="huh_v2")

            if rc:
                with open(output_json_path, "w") as output:
                    output.write(v2config.as_json())
                    output.write("\n")
            else:
                logger.error("Could not generate new Envoy configuration: %s" %
                             rc.error)

        scout = Scout()
        result = scout.report(action="config", mode="cli")
        show_notices(result)
    except Exception as e:
        handle_exception("EXCEPTION from config",
                         e,
                         config_dir_path=config_dir_path,
                         output_json_path=output_json_path)

        # This is fatal.
        sys.exit(1)
예제 #30
0
파일: diagd.py 프로젝트: tsukker/ambassador
    def _load_ir(self, rqueue: queue.Queue, aconf: Config,
                 fetcher: ResourceFetcher, secret_handler: SecretHandler,
                 snapshot: str) -> None:
        aconf.load_all(fetcher.sorted())

        aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
        open(aconf_path, "w").write(aconf.as_json())

        ir = IR(aconf, secret_handler=secret_handler)

        ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
        open(ir_path, "w").write(ir.as_json())

        econf = EnvoyConfig.generate(ir, "V2")
        diag = Diagnostics(ir, econf)

        bootstrap_config, ads_config = econf.split_config()

        if not self.validate_envoy_config(config=ads_config,
                                          retries=self.app.validation_retries):
            self.logger.info(
                "no updates were performed due to invalid envoy configuration, continuing with current configuration..."
            )
            # Don't use app.check_scout; it will deadlock.
            self.check_scout("attempted bad update")
            self._respond(
                rqueue, 500,
                'ignoring: invalid Envoy configuration in snapshot %s' %
                snapshot)
            return

        snapcount = int(os.environ.get('AMBASSADOR_SNAPSHOT_COUNT', "4"))
        snaplist: List[Tuple[str, str]] = []

        if snapcount > 0:
            self.logger.debug("rotating snapshots for snapshot %s" % snapshot)

            # If snapcount is 4, this range statement becomes range(-4, -1)
            # which gives [ -4, -3, -2 ], which the list comprehension turns
            # into [ ( "-3", "-4" ), ( "-2", "-3" ), ( "-1", "-2" ) ]...
            # which is the list of suffixes to rename to rotate the snapshots.

            snaplist += [(str(x + 1), str(x))
                         for x in range(-1 * snapcount, -1)]

            # After dealing with that, we need to rotate the current file into -1.
            snaplist.append(('', '-1'))

        # Whether or not we do any rotation, we need to cycle in the '-tmp' file.
        snaplist.append(('-tmp', ''))

        for from_suffix, to_suffix in snaplist:
            for fmt in [
                    "aconf{}.json", "econf{}.json", "ir{}.json",
                    "snapshot{}.yaml"
            ]:
                from_path = os.path.join(app.snapshot_path,
                                         fmt.format(from_suffix))
                to_path = os.path.join(app.snapshot_path,
                                       fmt.format(to_suffix))

                try:
                    self.logger.debug("rotate: %s -> %s" %
                                      (from_path, to_path))
                    os.rename(from_path, to_path)
                except IOError as e:
                    self.logger.debug("skip %s -> %s: %s" %
                                      (from_path, to_path, e))
                    pass
                except Exception as e:
                    self.logger.debug("could not rename %s -> %s: %s" %
                                      (from_path, to_path, e))

        app.latest_snapshot = snapshot
        self.logger.info("saving Envoy configuration for snapshot %s" %
                         snapshot)

        with open(app.bootstrap_path, "w") as output:
            output.write(json.dumps(bootstrap_config, sort_keys=True,
                                    indent=4))

        with open(app.ads_path, "w") as output:
            output.write(json.dumps(ads_config, sort_keys=True, indent=4))

        app.aconf = aconf
        app.ir = ir
        app.econf = econf
        app.diag = diag

        if app.kick:
            self.logger.info("running '%s'" % app.kick)
            os.system(app.kick)
        elif app.ambex_pid != 0:
            self.logger.info("notifying PID %d ambex" % app.ambex_pid)
            os.kill(app.ambex_pid, signal.SIGHUP)

        if app.ir.k8s_status_updates:
            for name in app.ir.k8s_status_updates.keys():
                kind, update = app.ir.k8s_status_updates[name]

                self.logger.info(
                    f"doing K8s status update for {kind} {name}...")

                text = json.dumps(update)

                with open(f'/tmp/kstat-{kind}-{name}', 'w') as out:
                    out.write(text)

                cmd = [
                    '/ambassador/kubestatus', kind, '-f',
                    f'metadata.name={name}', '-u', '/dev/fd/0'
                ]
                self.logger.info(f"Running command: {cmd}")

                try:
                    rc = subprocess.run(cmd,
                                        input=text.encode('utf-8'),
                                        timeout=5)
                    self.logger.info(f'...update finished, rc {rc.returncode}')
                except subprocess.TimeoutExpired as e:
                    self.logger.error(f'...update timed out, {e}')

        self.logger.info("configuration updated from snapshot %s" % snapshot)
        self._respond(rqueue, 200,
                      'configuration updated from snapshot %s' % snapshot)

        if app.health_checks and not app.stats_updater:
            app.logger.info("starting Envoy status updater")
            app.stats_updater = PeriodicTrigger(app.watcher.update_estats,
                                                period=5)

        # Check our environment...
        self.check_environment()

        self.chime()