def test_digests(caplog): path = __file__ + "/../examples/digest-ensemble.yaml" manifest = LocalEnv(path).get_manifest() runner = Runner(manifest) output = six.StringIO() # so we don't save the file job = runner.run(JobOptions(startTime=1, out=output)) # deploy assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace() # print(job.out.getvalue()) digestKeys = job.manifest.manifest.config["changes"][0]["digestKeys"] assert digestKeys == "run,cleartext_input,::anInstance::cleartext_prop" digest = job.manifest.manifest.config["changes"][0]["digestValue"] assert digest == "961216502983569ae51c5c8f96b106ccecca0d3e" filepath = FilePath(__file__ + "/../fixtures/helmrepo") digestContents = filepath.__digestable__(dict(manifest=manifest)) assert digestContents == "git:800472c7b1b2ea128464b9144c1440ca7289a5fa" filepath = FilePath(__file__ + "/../..") # root of repo digestContents = filepath.__digestable__(dict(manifest=manifest)) assert digestContents.startswith("git:"), digestContents with caplog.at_level(logging.DEBUG): manifest2 = YamlManifest(job.out.getvalue(), path=os.path.dirname(path), localEnv=manifest.localEnv) output2 = six.StringIO() # so we don't save the file job2 = Runner(manifest2).run(JobOptions(startTime=2, out=output2)) assert not job2.unexpectedAbort, job2.unexpectedAbort.get_stack_trace() # print(job2.out.getvalue()) summary = job2.json_summary() # print(json.dumps(summary, indent=2)) assert summary == { "job": { "id": "A01120000000", "status": "ok", "total": 0, "ok": 0, "error": 0, "unknown": 0, "skipped": 0, "changed": 0, }, "outputs": {}, "tasks": [], } logMsg = "skipping task configure for instance nodeA with state NodeState.configured and status Status.ok: no change detected" assert logMsg in caplog.text
def test_import(self): foreign = ( """ apiVersion: %s kind: Manifest spec: service_template: node_types: test.nodes.AbstractTest: derived_from: tosca.nodes.Root properties: private_address: type: string required: false metadata: sensitive: true interfaces: Install: operations: check: implementation: SetAttribute instances: anInstance: template: type: test.nodes.AbstractTest """ % API_VERSION ) localConfig = """ apiVersion: unfurl/v1alpha1 kind: Project environments: defaults: repositories: in_context: url: file:. secrets: vault_secrets: default: a_password external: foreign: manifest: file: foreignmanifest.yaml instance: "*" # this is the default """ # import a node from a external manifest and have an abstract node template select it # check will be run on it each time mainManifest = ( """ apiVersion: %s kind: Manifest spec: service_template: imports: - file: foreignmanifest.yaml#/spec/service_template repository: in_context topology_template: outputs: server_ip: value: {eval: "::foreign:anInstance::private_address"} node_templates: anInstance: type: test.nodes.AbstractTest directives: - select """ % API_VERSION ) runner = CliRunner() # delete UNFURL_HOME try: UNFURL_HOME = os.environ.get("UNFURL_HOME") with runner.isolated_filesystem(): os.environ["UNFURL_HOME"] = "" with open("foreignmanifest.yaml", "w") as f: f.write(foreign) with open("unfurl.yaml", "w") as f: f.write(localConfig) with open("manifest.yaml", "w") as f: f.write(mainManifest) manifest = LocalEnv("manifest.yaml").get_manifest() assert manifest.manifest.vault and manifest.manifest.vault.secrets job = Runner(manifest).run( JobOptions(add=True, startTime="time-to-test") ) # print(job.out.getvalue()) # print(job.jsonSummary(True)) assert job.status == Status.ok, job.summary() self.assertEqual( [ { "operation": "check", "configurator": "tests.test_tosca.SetAttributeConfigurator", "changed": True, "priority": "required", "reason": "check", "status": "ok", "target": "foreign:anInstance", "targetStatus": "ok", "targetState": None, # "started", "template": "anInstance", "type": "test.nodes.AbstractTest", } ], job.json_summary()["tasks"], ) job.get_outputs() self.assertEqual(job.get_outputs()["server_ip"], "10.0.0.1") self.assertEqual( len(manifest.localEnv._manifests), 2, manifest.localEnv._manifests ) # print("output", job.out.getvalue()) assert "10.0.0.1" not in job.out.getvalue(), job.out.getvalue() vaultString1 = "server_ip: !vault |\n $ANSIBLE_VAULT;1.1;AES256" assert vaultString1 in job.out.getvalue() vaultString2 = ( "private_address: !vault |\n $ANSIBLE_VAULT;1.1;AES256" ) assert vaultString2 in job.out.getvalue() # reload: manifest2 = LocalEnv("manifest.yaml").get_manifest() assert manifest2.lastJob # test that restored manifest create a shadow instance for the foreign instance imported = manifest2.imports["foreign"].resource assert imported imported2 = manifest2.imports.find_import("foreign:anInstance") assert imported2 assert imported2.shadow self.assertIs(imported2.root, manifest2.get_root_resource()) self.assertEqual(imported2.attributes["private_address"], "10.0.0.1") self.assertIsNot(imported2.shadow.root, manifest2.get_root_resource()) finally: if UNFURL_HOME is not None: os.environ["UNFURL_HOME"] = UNFURL_HOME
def test_manifest(self): path = __file__ + "/../examples/mock-helm-ensemble.yaml" manifest = YamlManifest(path=path) runner = Runner(manifest) assert not manifest.lastJob, "expected new manifest" output = six.StringIO() # so we don't save the file job = runner.run(JobOptions(add=True, out=output, startTime=1)) assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace() # print(manifest.statusSummary()) self.assertEqual( job.json_summary(), { "job": { "id": "A01110000000", "status": "ok", "total": 5, "ok": 5, "error": 0, "unknown": 0, "skipped": 0, "changed": 5, }, "outputs": {}, "tasks": [ { "status": "ok", "target": "stagingCluster", "operation": "discover", "template": "stagingCluster", "type": "unfurl.nodes.K8sCluster", "targetStatus": "ok", "targetState": None, "changed": True, "configurator": "unfurl.configurators.k8s.ClusterConfigurator", "priority": "required", "reason": "discover", }, { "status": "ok", "target": "defaultNamespace", "operation": "discover", "template": "defaultNamespace", "type": "unfurl.nodes.K8sNamespace", "targetStatus": "ok", "targetState": None, "changed": True, "configurator": "unfurl.configurators.k8s.ResourceConfigurator", "priority": "required", "reason": "discover", }, { "status": "ok", "target": "gitlab-release", "operation": "execute", "template": "gitlab-release", "type": "unfurl.nodes.HelmRelease", "targetStatus": "pending", "targetState": None, "changed": True, "configurator": "tests.test_examples.HelmConfigurator", "priority": "required", "reason": "step:helm", }, { "status": "ok", "target": "gitlab-release", "operation": "subtaskOperation", "template": "gitlab-release", "type": "unfurl.nodes.HelmRelease", "targetStatus": "pending", "targetState": None, "changed": True, "configurator": "tests.test_examples.DummyShellConfigurator", "priority": "required", "reason": "subtask: for step:helm: unfurl.interfaces.install.Helm.execute", }, { "status": "ok", "target": "gitlab-release", "operation": "discover", "template": "gitlab-release", "type": "unfurl.nodes.HelmRelease", "targetStatus": "ok", "targetState": None, "changed": True, "configurator": "unfurl.configurators.shell.ShellConfigurator", "priority": "required", "reason": "step:helm", }, ], }, ) # manifest shouldn't have changed # print("1", output.getvalue()) baseDir = __file__ + "/../examples/" manifest2 = YamlManifest(output.getvalue(), path=baseDir) # print(manifest2.statusSummary()) output2 = six.StringIO() job2 = Runner(manifest2).run( JobOptions(add=True, out=output2, startTime=2)) # print("2", output2.getvalue()) # print("2", job2.json_summary(True)) # print(job2._json_plan_summary(True)) assert not job2.unexpectedAbort, job2.unexpectedAbort.get_stack_trace() # should not have found any tasks to run: assert len(job2.workDone) == 0, job2.workDone # self.assertEqual(output.getvalue(), output2.getvalue()) output3 = six.StringIO() manifest3 = YamlManifest(output2.getvalue(), path=baseDir) job3 = Runner(manifest3).run( JobOptions(workflow="undeploy", out=output3, startTime=2)) # print(output3.getvalue()) # only the chart delete task should have ran as it owns the resources it created # print(job3.json_summary(True)) assert len(job3.workDone) == 1, job3.json_summary() tasks = list(job3.workDone.values()) assert tasks[0].target.status.name == "absent", tasks[0].target.status
def test_k8s_config(self): os.environ["TEST_SECRET"] = "a secret" manifest = YamlManifest(MANIFEST) job = Runner(manifest).run(JobOptions(add=True, startTime=1)) assert not job.unexpectedAbort assert job.status == Status.ok, job.summary() # print(job.summary()) # print(job.out.getvalue()) # verify secret contents isn't saved in config assert "a secret" not in job.out.getvalue() assert "YSBzZWNyZXQ" not in job.out.getvalue() # base64 of "a secret" # print (job.out.getvalue()) assert "<<REDACTED>>" in job.out.getvalue() assert not job.unexpectedAbort assert job.status == Status.ok, job.summary() manifest = YamlManifest(job.out.getvalue()) job2 = Runner(manifest).run( JobOptions(workflow="undeploy", startTime=2)) results = job2.json_summary() assert not job2.unexpectedAbort assert job2.status == Status.ok, job2.summary() assert results == { "job": { "id": "A01120000000", "status": "ok", "total": 2, "ok": 2, "error": 0, "unknown": 0, "skipped": 0, "changed": 2, }, "outputs": {}, "tasks": [ { "status": "ok", "target": "testSecret", "operation": "delete", "template": "testSecret", "type": "unfurl.nodes.K8sSecretResource", "targetStatus": "absent", "targetState": "deleted", "changed": True, "configurator": "unfurl.configurators.k8s.ResourceConfigurator", "priority": "required", "reason": "undeploy", }, { "status": "ok", "target": "k8sNamespace", "operation": "delete", "template": "k8sNamespace", "type": "unfurl.nodes.K8sNamespace", "targetStatus": "absent", "targetState": "deleted", "changed": True, "configurator": "unfurl.configurators.k8s.ResourceConfigurator", "priority": "required", "reason": "undeploy", }, ], } assert len(results["tasks"]) == 2, results
def test_dependencies(self): """ Don't run a task if it depends on a live attribute on an non-operational instance. C is deployed after A and B and A depends on a C property (which are static) B depends on a C attribute (which are live) So B should run after C """ self.maxDiff = None manifest = YamlManifest(manifestContent) runner = Runner(manifest) job = runner.run(JobOptions(startTime=1)) # deploy assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace() summary = job.json_summary() # print("deployed") # print(json.dumps(summary, indent=2)) # print(job.out.getvalue()) # dependencies detected during render should be saved dependencies = [ dict(ref="::nodeC::attr", required=True, expected=None) ] self.assertEqual( dependencies, job.manifest.manifest.config["changes"][2]["dependencies"], ) self.assertEqual( summary, { "job": { "id": "A01110000000", "status": "ok", "total": 3, "ok": 3, "error": 0, "unknown": 0, "skipped": 0, "changed": 3, }, "outputs": {}, "tasks": [ { "status": "ok", "target": "nodeA", "operation": "configure", "template": "nodeA", "type": "tosca.nodes.Root", "targetStatus": "ok", "targetState": "configured", "changed": True, "configurator": "unfurl.configurators.TemplateConfigurator", "priority": "required", "reason": "add", }, { "status": "ok", "target": "nodeC", "operation": "configure", "template": "nodeC", "type": "nodes.Test", "targetStatus": "ok", "targetState": "configured", "changed": True, "configurator": "unfurl.configurators.TemplateConfigurator", "priority": "required", "reason": "add", }, { "status": "ok", "target": "nodeB", "operation": "configure", "template": "nodeB", "type": "tosca.nodes.Root", "targetStatus": "ok", "targetState": "configured", "changed": True, "configurator": "unfurl.configurators.TemplateConfigurator", "priority": "required", "reason": "add", }, ], }, ) # Deploy again: B's task should run now since C should have been deployed manifest2 = YamlManifest(job.out.getvalue()) job = Runner(manifest2).run(JobOptions(startTime=2)) assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace() summary = job.json_summary() # changes = job.manifest.manifest.config["changes"] # XXX test that attr: "live" is in changes # print(job.out.getvalue()) self.assertEqual( summary, { "job": { "id": "A01120000000", "status": "ok", "total": 0, "ok": 0, "error": 0, "unknown": 0, "skipped": 0, "changed": 0, }, "outputs": {}, "tasks": [], }, )