예제 #1
0
    def test_deploy(self):
        # make sure this works
        f = urllib.request.urlopen(
            "http://localhost:8010/fixtures/helmrepo/index.yaml")
        f.close()

        runner = Runner(YamlManifest(manifest))
        run1 = runner.run(JobOptions(planOnly=True, verbose=3, startTime=1))
        mysql_release = runner.manifest.rootResource.findResource(
            "mysql_release")
        query = ".::.requirements::[.name=host]::.target::name"
        res = mysql_release.query(query)
        assert res == 'unfurl-helm-unittest'

        runner = Runner(YamlManifest(manifest))
        run1 = runner.run(JobOptions(dryrun=False, verbose=3, startTime=1))
        assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()
        summary = run1.jsonSummary()
        # runner.manifest.statusSummary()
        # print(summary)
        self.assertEqual(
            summary["job"],
            {
                "id": "A01110000000",
                "status": "ok",
                "total": 4,
                "ok": 4,
                "error": 0,
                "unknown": 0,
                "skipped": 0,
                "changed": 4,
            },
        )
        assert all(task["targetStatus"] == "ok"
                   for task in summary["tasks"]), summary["tasks"]
예제 #2
0
    def test_supervisor(self):
        cli_runner = CliRunner()
        with cli_runner.isolated_filesystem():
            src_path = Path(
                __file__).parent / "examples" / "supervisor-ensemble.yaml"
            path = shutil.copy(src_path, ".")
            runner = Runner(YamlManifest(path=path))
            try:
                job = runner.run(JobOptions(startTime=1, check=True))  # deploy
                assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace(
                )
                summary = job.json_summary()
                # print(json.dumps(summary, indent=2))
                self.assertEqual(
                    {
                        "id": "A01110000000",
                        "status": "ok",
                        "total": 5,
                        "ok": 5,
                        "error": 0,
                        "unknown": 0,
                        "skipped": 0,
                        "changed": 4,
                    },
                    summary["job"],
                )

                time.sleep(0.25)
                f = urllib.request.urlopen("http://127.0.0.1:8012/")
                expected = b"Directory listing for /"
                self.assertIn(expected, f.read())

                runner = Runner(YamlManifest(path=path))
                job = runner.run(JobOptions(workflow="undeploy", startTime=2))
                assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace(
                )
                summary = job.json_summary()
                # print(json.dumps(summary, indent=2))
                self.assertEqual(
                    {
                        "id": "A01120000000",
                        "status": "ok",
                        "total": 3,
                        "ok": 3,
                        "error": 0,
                        "unknown": 0,
                        "skipped": 0,
                        "changed": 3,
                    },
                    summary["job"],
                )
            finally:
                # NOTE: to manually kill: pkill -lf supervisord
                if os.path.exists("supervisord/local/supervisord.pid"):
                    with open("supervisord/local/supervisord.pid") as f:
                        pid = int(f.read())
                        print("killing", pid)
                        os.kill(pid, signal.SIGINT)
예제 #3
0
    def test_check(self):
        runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
        runner.run(JobOptions(workflow="deploy"))
        job = runner.run(JobOptions(workflow="check"))

        assert job.status == Status.ok
        task = list(job.workDone.values())[0]
        # this means that dns records were correctly set during deploy:
        assert task.target_status == Status.ok
        assert task.result.result == "DNS records in sync"
예제 #4
0
    def test_supervisor(self):
        cliRunner = CliRunner()
        with cliRunner.isolated_filesystem():
            runner = Runner(YamlManifest(manifest))
            try:
                job = runner.run(JobOptions(startTime=1))  # deploy
                assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace(
                )
                summary = job.jsonSummary()
                self.assertEqual(
                    {
                        "id": "A01110000000",
                        "status": "ok",
                        "total": 4,
                        "ok": 4,
                        "error": 0,
                        "unknown": 0,
                        "skipped": 0,
                        "changed": 3,
                    },
                    summary["job"],
                )

                # print(json.dumps(summary, indent=2))
                time.sleep(0.25)
                f = urllib.request.urlopen("http://127.0.0.1:8012/")
                expected = b"Directory listing for /"
                self.assertIn(expected, f.read())

                runner = Runner(YamlManifest(job.out.getvalue()))
                job = runner.run(JobOptions(workflow="undeploy", startTime=2))
                assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace(
                )
                summary = job.jsonSummary()
                # print(json.dumps(summary, indent=2))
                self.assertEqual(
                    {
                        "id": "A01120000000",
                        "status": "ok",
                        "total": 3,
                        "ok": 3,
                        "error": 0,
                        "unknown": 0,
                        "skipped": 0,
                        "changed": 3,
                    },
                    summary["job"],
                )
            finally:
                if os.path.exists("supervisord/local/supervisord.pid"):
                    with open("supervisord/local/supervisord.pid") as f:
                        pid = f.read()
                        print("killing", pid)
                        os.kill(pid)
예제 #5
0
    def test_delete(self):
        runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
        job = runner.run(JobOptions(workflow="deploy"))
        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
        node = job.rootResource.find_resource("test_node")
        assert node and len(node.attributes["zone"]) == 2

        job = runner.run(JobOptions(workflow="undeploy"))
        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
        node = job.rootResource.find_resource("test_node")
        assert dict(node.attributes["zone"]) == {}
예제 #6
0
    def test_preConditions(self):
        """test that the configuration only runs if the resource meets the requirements"""
        runner = Runner(YamlManifest(manifest))
        test1 = runner.manifest.get_root_resource().find_resource("test1")
        assert test1

        self.assertEqual(test1.attributes["meetsTheRequirement"], "copy")
        #     notYetProvided = runner.manifest.spec[0].findMissingProvided(test1)
        #     self.assertEqual(str(notYetProvided[1]),
        # """[<ValidationError: "'copyOfMeetsTheRequirement' is a required property">]""")

        jobOptions1 = JobOptions(instance="test1", startTime=1)
        run1 = runner.run(jobOptions1)
        # print(run1.out.getvalue())
        assert not run1.unexpectedAbort, run1.unexpectedAbort.get_stack_trace()
        self.assertEqual(test1.attributes["copyOfMeetsTheRequirement"], "copy")

        # provided = runner.manifest.spec[0].findMissingProvided(test1)
        # assert not provided, provided

        # check that the modifications were recorded
        self.assertEqual(
            runner.manifest.manifest.config["changes"][0]["changes"],
            {"::test1": {
                "copyOfMeetsTheRequirement": "copy"
            }},
        )

        test2 = runner.manifest.get_root_resource().find_resource("test2")
        assert test2
        requiredAttribute = test2.attributes["meetsTheRequirement"]
        assert requiredAttribute is False, requiredAttribute
        # missing = runner.manifest.specs[0].findMissingRequirements(test2)
        # self.assertEqual(str(missing[1]), '''[<ValidationError: "False is not of type 'string'">]''')

        self.verifyRoundtrip(run1.out.getvalue(), jobOptions1)

        jobOptions2 = JobOptions(instance="test2", startTime=2)
        run2 = runner.run(jobOptions2)
        assert not run2.unexpectedAbort, run2.unexpectedAbort.get_stack_trace()
        assert run2.status == Status.error, run2.status
        # XXX better error reporting
        # self.assertEqual(str(run2.problems), "can't run required configuration: resource test2 doesn't meet requirement")

        # print(run2.out.getvalue())
        # don't re-run the failed configurations so nothing will have changed
        jobOptions2.repair = "none"
        jobOptions2.skip_new = True
        self.verifyRoundtrip(run2.out.getvalue(), jobOptions2)
예제 #7
0
    def test_login(self):
        """
        test that runner figures out the proper tasks to run
        """
        import docker

        client = docker.from_env()
        assert client, "docker not installed?"

        runner = Runner(YamlManifest(manifest))
        # pickled = pickle.dumps(runner.manifest, -1)
        # manifest2 = pickle.loads(pickled)

        run1 = runner.run(JobOptions(instance="test1"))
        assert len(run1.workDone) == 1, run1.workDone
        tasks = list(run1.workDone.values())
        # docker login will fail because user doesn't exist:
        assert tasks[0].status.name == "error", tasks[0].status
        self.assertIn("401 Client Error",
                      tasks[0].result.result.get("msg", ""))
        # but the repository and image path will have been created
        self.assertEqual(
            tasks[0].result.outputs.get("image_path"),
            "index.docker.io/repo/image",
        )
        registry = tasks[0].result.outputs.get("registry")
        assert registry and isinstance(registry,
                                       toscaparser.repositories.Repository)
        assert not run1.unexpectedAbort, run1.unexpectedAbort.get_stack_trace()
예제 #8
0
파일: test_helm.py 프로젝트: mccue/unfurl
    def test_deploy(self):
        # make sure this works
        f = urllib.request.urlopen(
            "http://localhost:8010/fixtures/helmrepo/index.yaml")
        f.close()

        runner = Runner(YamlManifest(manifest))

        run1 = runner.run(JobOptions(dryrun=False, verbose=3, startTime=1))
        assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()
        summary = run1.jsonSummary()
        # runner.manifest.statusSummary()
        # print(summary)
        self.assertEqual(
            summary["job"],
            {
                "id": "A01110000000",
                "status": "ok",
                "total": 4,
                "ok": 4,
                "error": 0,
                "unknown": 0,
                "skipped": 0,
                "changed": 4,
            },
        )
        assert all(task["targetStatus"] == "ok"
                   for task in summary["tasks"]), summary["tasks"]
예제 #9
0
    def test_discover(self):
        path = __file__ + "/../examples/helm-manifest.yaml"
        manifest = YamlManifest(path=path)
        runner = Runner(manifest)
        assert not manifest.lastJob, "expected new manifest"
        output = six.StringIO()  # so we don't save the file
        job = runner.run(JobOptions(workflow="discover", out=output, startTime=1))
        # print(job.summary())
        # print("discovered", runner.manifest.tosca.discovered)
        # print("discovered manifest", output.getvalue())
        assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace()

        baseDir = __file__ + "/../examples/"
        manifest2 = YamlManifest(output.getvalue(), path=baseDir)
        manifest2.manifest.path = os.path.abspath(
            path
        )  # set the basedir which sets the current working dir
        # manifest2.statusSummary()
        output2 = six.StringIO()
        job2 = Runner(manifest2).run(
            JobOptions(workflow="discover", out=output2, startTime=2)
        )
        # print("2", output2.getvalue())
        # print('job2', job2.summary())
        assert not job2.unexpectedAbort, job2.unexpectedAbort.getStackTrace()
        # print("job", json.dumps(job2.jsonSummary(), indent=2))
        # should not have found any tasks to run:
        assert len(job2.workDone) == 8, list(job2.workDone)
예제 #10
0
def test_result_template_errors(caplog):
    manifest = """\
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
  service_template:
    topology_template:
      node_templates:
        testNode:
          type: tosca.nodes.Root
          interfaces:
           Standard:
            operations:
              configure:
                implementation:
                  className: unfurl.configurators.TemplateConfigurator
                inputs:
                  resultTemplate: |
                    - name: .self
                      attributes:
                        outputVar: "{{ SELF.missing }}"
"""
    runner = Runner(YamlManifest(manifest))
    job = runner.run()
    assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
    for record in caplog.records:
        if record.levelname == "WARNING":
            assert (
                record.getMessage() ==
                'error processing resultTemplate for testNode: <<Error rendering template: missing attribute or key: "missing">>'
            )
            break
    else:
        assert False, "log message not found"
예제 #11
0
 def test_fileRef(self):
     simple = ("""
 apiVersion: %s
 kind: Manifest
 spec:
   service_template:
     topology_template:
       node_templates:
         test:
           type: tosca.nodes.Root
           properties:
             file:
               eval:
                 file: foo.txt
           interfaces:
             Standard:
                 create:
                   implementation:  FileTest
                   inputs:
                     path:
                       eval: file::path
                     contents:
                       eval: file::contents
 """ % API_VERSION)
     cliRunner = CliRunner()
     with cliRunner.isolated_filesystem():  # as tmpDir
         manifest = YamlManifest(simple, path=".")
         runner = Runner(manifest)
         output = six.StringIO()
         with open("foo.txt", "w") as f:
             f.write("test")
         job = runner.run(JobOptions(add=True, out=output,
                                     startTime="test"))
     task = list(job.workDone.values())[0]
     self.assertEqual(task.result.result, "foo.txt")
예제 #12
0
    def test_manifest(self):
        path = __file__ + "/../examples/helm-manifest.yaml"
        manifest = YamlManifest(path=path)
        runner = Runner(manifest)

        assert not manifest.lastJob, "expected new manifest"
        output = six.StringIO()  # so we don't save the file
        job = runner.run(JobOptions(add=True, out=output, startTime="test"))
        assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace()

        # manifest shouldn't have changed
        # print("1", output.getvalue())
        manifest2 = YamlManifest(output.getvalue())
        # manifest2.statusSummary()
        output2 = six.StringIO()
        job2 = Runner(manifest2).run(
            JobOptions(add=True, out=output2, startTime=1))
        # print("2", output2.getvalue())
        # print(job2.summary())
        assert not job2.unexpectedAbort, job2.unexpectedAbort.getStackTrace()
        # should not have found any tasks to run:
        assert len(job2.workDone) == 0, job2.workDone
        self.maxDiff = None
        # self.assertEqual(output.getvalue(), output2.getvalue())

        output3 = six.StringIO()
        manifest3 = YamlManifest(output2.getvalue())
        job3 = Runner(manifest3).run(
            JobOptions(workflow="undeploy", out=output3, startTime=2))
        # print(output3.getvalue())
        # only the chart delete task should have ran as it owns the resources it created
        # print(job3.jsonSummary())
        assert len(job3.workDone) == 1, job3.jsonSummary()
        tasks = list(job3.workDone.values())
        assert tasks[0].target.status.name == "absent", tasks[0].target.status
예제 #13
0
파일: utils.py 프로젝트: onecommons/unfurl
def lifecycle(
        manifest: Manifest,
        steps: Optional[Iterable[Step]] = DEFAULT_STEPS) -> Iterable[Job]:
    runner = Runner(manifest)
    for i, step in enumerate(steps, start=1):
        print(f"starting step #{i} - {step.workflow}")
        job = runner.run(JobOptions(workflow=step.workflow, starttime=i))
        yield _check_job(job, i, step)
예제 #14
0
    def test_timeout_with_ensemble(self):
        runner = Runner(YamlManifest(ENSEMBLE_TIMEOUT))
        start_time = datetime.now()

        job = runner.run(JobOptions(instance="test_node"))

        delta = datetime.now() - start_time
        assert job.status == Status.error
        assert delta < timedelta(seconds=2), delta - timedelta(seconds=2)
예제 #15
0
    def test_addingResources(self):
        runner = Runner(YamlManifest(manifest))
        jobOptions = JobOptions(instance="test3", startTime=1)
        run = runner.run(jobOptions)
        assert not run.unexpectedAbort, run.unexpectedAbort.get_stack_trace()
        # self.assertEqual(list(run.workDone.keys()), [('test3', 'test'), ('added1', 'config1')])

        # print('config', run.out.getvalue())

        changes = runner.manifest.manifest.config["changes"][0]["changes"]
        added = {".added": {"name": "added1", "template": "test1"}}
        self.assertEqual(changes["::added1"], added)

        # verify modified
        self.assertEqual(changes["::test3"],
                         {"copyOfMeetsTheRequirement": "copy"})

        # print('test3', run.out.getvalue())
        jobOptions.repair = "none"
        self.verifyRoundtrip(run.out.getvalue(), jobOptions)

        jobOptions = JobOptions(instance="test4", startTime=2)
        run = runner.run(jobOptions)
        assert not run.unexpectedAbort, run.unexpectedAbort.get_stack_trace()
        self.assertEqual(
            [(t.name, t.target.name) for t in run.workDone.values()],
            [
                ("for add: Standard.configure", "test4"),
                ("for add: Standard.configure", "added2"),
            ],
        )
        # print('test4', run.out.getvalue())

        # XXX
        # verify dependencies added
        # dependencies = lookupPath(
        #     runner.manifest.manifest.config,
        #     "root.instances.test4.status.configurations.test.dependencies".split("."),
        # )
        # self.assertEqual(dependencies, [{"ref": "::added2"}])

        jobOptions.repair = "none"
        self.verifyRoundtrip(run.out.getvalue(), jobOptions)
예제 #16
0
    def test_run_without_dry_run(self, command, dryrun):
        ensemble = ENSEMBLE_DRY_RUN.format(command=command, dryrun=dryrun)
        runner = Runner(YamlManifest(ensemble))

        job = runner.run(JobOptions(instance="test_node", dryrun=False))

        assert job.status == Status.ok
        task = list(job.workDone.values())[0]
        cmd = task.result.result["cmd"].strip()
        assert cmd == "echo hello world"
예제 #17
0
    def test_error_if_dry_run_not_defined_for_task(self):
        ensemble = ENSEMBLE_DRY_RUN.format(command="command: echo hello world",
                                           dryrun="")
        runner = Runner(YamlManifest(ensemble))

        job = runner.run(JobOptions(instance="test_node", dryrun=True))

        task = list(job.workDone.values())[0]
        assert job.status == Status.error
        assert task.result.result == "could not run: dry run not supported"
예제 #18
0
    def test_exclusive(self, manager_sync):
        runner = Runner(YamlManifest(ENSEMBLE_EXCLUSIVE))

        job = runner.run(JobOptions(workflow="deploy"))

        assert job.status == Status.ok
        node = job.rootResource.find_resource("test_node")
        # records are replaced by instance
        assert len(node.attributes["zone"]) == 1
        assert manager_sync.called
예제 #19
0
    def test_container(self):
        """
        test that runner figures out the proper tasks to run
        """
        import docker

        client = docker.from_env()
        assert client, "docker not installed?"

        runner = Runner(YamlManifest(manifest))
        # pickled = pickle.dumps(runner.manifest, -1)
        # manifest2 = pickle.loads(pickled)

        run1 = runner.run(JobOptions(check=True, template="container1"))
        # configure (start op shouldn't run since docker_container sets state to started)
        assert len(run1.workDone) == 2, run1.workDone
        tasks = list(run1.workDone.values())
        assert not tasks[1].target.attributes.get(
            "container"), "testing that container property isn't required"
        # print([task.result.outputs for task in tasks])
        container = tasks[1].result.outputs.get("container")
        assert container
        self.assertEqual(container["Name"], "/test_docker")
        self.assertEqual(container["State"]["Status"], "exited")
        self.assertEqual(container["Config"]["Image"], "busybox")
        self.assertIn("hello", container["Output"].strip())

        assert tasks[0].status.name == "ok", tasks[0].status
        assert tasks[1].status.name == "ok", tasks[1].status
        assert not run1.unexpectedAbort, run1.unexpectedAbort.get_stack_trace()
        assert tasks[0].target.status.name == "ok", tasks[0].target.status
        assert tasks[1].target.status.name == "ok", tasks[1].target.status

        run2 = runner.run(
            JobOptions(workflow="undeploy", template="container1"))
        # stop op shouldn't be called, just delete
        assert len(run2.workDone) == 1, run2.workDone
        assert not run2.unexpectedAbort, run2.unexpectedAbort.get_stack_trace()
        tasks = list(run2.workDone.values())
        # runner.manifest.dump()
        assert tasks[0].status.name == "ok", tasks[0].status
        assert tasks[0].target.status.name == "absent", tasks[0].target.status
예제 #20
0
 def test_configurator(self):
     """
     test that runner figures out the proper tasks to run
     """
     runner = Runner(YamlManifest(manifest))
     run1 = runner.run(JobOptions(resource="test1"))
     assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()
     assert len(run1.workDone) == 1, run1.workDone
     result = list(run1.workDone.values())[0].result
     self.assertEqual(result.outputs, {"fact1": "test1", "fact2": "test"})
     self.assertEqual(result.result.get("stdout"), sys.executable)
     assert run1.status == Status.ok, run1.summary()
예제 #21
0
    def test_shell(self):
        """
        test that runner figures out the proper tasks to run
        """
        runner = Runner(YamlManifest(self.ensemble))

        job = runner.run(JobOptions(instance="test1"))

        assert len(job.workDone) == 1, job.workDone
        node = runner.manifest.get_root_resource().find_resource("test1")
        assert node.attributes["stdout"] == "helloworld"
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
예제 #22
0
    def test_neededTasks(self):
        """
        test that runner figures out the proper tasks to run
        """
        runner = Runner(YamlManifest(manifest))
        test1 = runner.manifest.get_root_resource().find_resource("test1")
        assert test1
        # missing = runner.manifest.spec[0].findInvalidPreconditions(test1)
        # assert not missing, missing

        run1 = runner.run(JobOptions(instance="test1"))
        assert not run1.unexpectedAbort, run1.unexpectedAbort.get_stack_trace()
        assert len(run1.workDone) == 1, run1.summary()
예제 #23
0
    def test_relationships(self):
        runner = Runner(YamlManifest(ENSEMBLE_WITH_RELATIONSHIPS))
        job = runner.run(JobOptions(workflow="deploy"))

        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
        node = job.rootResource.find_resource("test_zone")
        assert node
        assert node.attributes["zone"]["www"]["type"] == "A"
        assert node.attributes["zone"]["www"]["value"] == "10.10.10.1"
        assert node.attributes["managed_records"]["www"][
            "value"] == "10.10.10.1"

        # if the compute ip address changeses (here via check), the zone should be updated
        try:
            os.environ["OCTODNS_TEST_IP"] = "10.10.10.2"
            job = runner.run(JobOptions(workflow="check"))
        finally:
            del os.environ["OCTODNS_TEST_IP"]

        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()

        compute = job.rootResource.find_resource("compute")
        assert compute
        assert compute.attributes["public_address"] == "10.10.10.2"

        node = job.rootResource.find_resource("test_zone")
        assert node.status == Status.error  # it's now out of sync
        assert node.attributes["zone"]["www"]["value"] == "10.10.10.1"
        assert node.attributes["managed_records"]["www"][
            "value"] == "10.10.10.2"

        job = runner.run(JobOptions(workflow="undeploy"))
        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
        node = job.rootResource.find_resource("test_zone")
        assert dict(node.attributes["zone"]) == {}
예제 #24
0
    def test_undeploy(self):
        runner = Runner(YamlManifest(manifest))
        # print('load');  runner.manifest.statusSummary()
        run = runner.run(JobOptions(workflow="check", startTime=2))
        summary = run.jsonSummary()
        assert not run.unexpectedAbort, run.unexpectedAbort.getStackTrace()

        # print('check'); runner.manifest.statusSummary()
        run2 = runner.run(
            JobOptions(workflow="undeploy", startTime=3,
                       destroyunmanaged=True))

        assert not run2.unexpectedAbort, run2.unexpectedAbort.getStackTrace()
        summary = run2.jsonSummary()
        # print('undeploy'); runner.manifest.statusSummary()

        # note! if tests fail may need to run:
        #      helm uninstall mysql-test -n unfurl-helm-unittest
        #  and kubectl delete namespace unfurl-helm-unittest

        # note: this test relies on stable_repo being place in the helm cache by test_deploy()
        # comment out the repository requirement to run this test standalone
        assert all(task["targetStatus"] == "absent"
                   for task in summary["tasks"]), summary["tasks"]
        self.assertEqual(
            summary["job"],
            {
                "id": "A01130000000",
                "status": "ok",
                "total": 3,
                "ok": 3,
                "error": 0,
                "unknown": 0,
                "skipped": 0,
                "changed": 3,
            },
        )
예제 #25
0
    def test_shell(self):
        """
    test that runner figures out the proper tasks to run
    """
        runner = Runner(YamlManifest(manifest))

        run1 = runner.run(JobOptions(instance="test1"))
        assert len(run1.workDone) == 1, run1.workDone
        self.assertEqual(
            runner.manifest.getRootResource()
            .findResource("test1")
            .attributes["stdout"],
            "helloworld",
        )
        assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()
예제 #26
0
    def test_configure(self):
        runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
        job = runner.run(JobOptions(workflow="deploy"))

        assert job.status == Status.ok
        assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
        node = job.rootResource.find_resource("test_node")
        assert node.attributes["zone"][""]["type"] == "A"
        assert node.attributes["zone"][""]["values"] == [
            "2.3.4.5",
            "2.3.4.6",
        ]
        assert node.attributes["zone"]["www"]["values"] == [
            "2.3.4.5",
            "2.3.4.6",
        ]
예제 #27
0
def test_digests(caplog):
    path = __file__ + "/../examples/digest-ensemble.yaml"
    manifest = LocalEnv(path).get_manifest()
    runner = Runner(manifest)
    output = six.StringIO()  # so we don't save the file
    job = runner.run(JobOptions(startTime=1, out=output))  # deploy
    assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
    # print(job.out.getvalue())
    digestKeys = job.manifest.manifest.config["changes"][0]["digestKeys"]
    assert digestKeys == "run,cleartext_input,::anInstance::cleartext_prop"
    digest = job.manifest.manifest.config["changes"][0]["digestValue"]
    assert digest == "961216502983569ae51c5c8f96b106ccecca0d3e"

    filepath = FilePath(__file__ + "/../fixtures/helmrepo")
    digestContents = filepath.__digestable__(dict(manifest=manifest))
    assert digestContents == "git:800472c7b1b2ea128464b9144c1440ca7289a5fa"

    filepath = FilePath(__file__ + "/../..")  # root of repo
    digestContents = filepath.__digestable__(dict(manifest=manifest))
    assert digestContents.startswith("git:"), digestContents

    with caplog.at_level(logging.DEBUG):
        manifest2 = YamlManifest(job.out.getvalue(),
                                 path=os.path.dirname(path),
                                 localEnv=manifest.localEnv)
        output2 = six.StringIO()  # so we don't save the file
        job2 = Runner(manifest2).run(JobOptions(startTime=2, out=output2))
        assert not job2.unexpectedAbort, job2.unexpectedAbort.get_stack_trace()
        # print(job2.out.getvalue())
        summary = job2.json_summary()
        # print(json.dumps(summary, indent=2))
        assert summary == {
            "job": {
                "id": "A01120000000",
                "status": "ok",
                "total": 0,
                "ok": 0,
                "error": 0,
                "unknown": 0,
                "skipped": 0,
                "changed": 0,
            },
            "outputs": {},
            "tasks": [],
        }
        logMsg = "skipping task configure for instance nodeA with state NodeState.configured and status Status.ok: no change detected"
        assert logMsg in caplog.text
예제 #28
0
    def test_workflows(self):
        manifest = YamlManifest(
            path=__file__ + "/../examples/test-workflow-manifest.yaml"
        )
        # print(manifest.tosca.template.nested_tosca_tpls)
        self.assertEqual(len(manifest.tosca._workflows), 3)

        runner = Runner(manifest)
        output = six.StringIO()
        job = runner.run(
            JobOptions(add=True, planOnly=True, out=output, startTime="test")
        )
        # print(job.jsonSummary())
        assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace()
        self.assertEqual(job.status.name, "ok")
        self.assertEqual(job.stats()["ok"], 4)
        self.assertEqual(job.stats()["changed"], 4)
예제 #29
0
    def test_manifest(self):
        simple = (
            """
    apiVersion: %s
    kind: Manifest
    spec:
      instances:
        anInstance:
          # template: foo
          interfaces:
            Standard:
              operations:
               configure:
                implementation:    TestSubtask
                inputs: {}
    """
            % API_VERSION
        )
        manifest = YamlManifest(simple)
        runner = Runner(manifest)
        self.assertEqual(runner.taskCount, 0)
        output = six.StringIO()
        job = runner.run(JobOptions(add=True, out=output, startTime="test"))
        assert not job.unexpectedAbort, job.unexpectedAbort.getStackTrace()
        # workDone includes subtasks
        assert len(job.workDone) == 2, job.workDone

        # manifest shouldn't have changed
        manifest2 = YamlManifest(output.getvalue())
        lock = manifest2.manifest.config["lock"]
        assert "runtime" in lock and len(lock["repositories"]) == 3
        self.assertEqual(
            manifest2.lastJob["summary"],
            "2 tasks (2 changed, 2 ok, 0 failed, 0 unknown, 0 skipped)",
        )
        output2 = six.StringIO()
        job2 = Runner(manifest2).run(JobOptions(add=True, out=output2))
        assert not job2.unexpectedAbort, job2.unexpectedAbort.getStackTrace()

        # should not find any tasks to run
        assert len(job2.workDone) == 0, job2.workDone
        self.maxDiff = None
        self.assertEqual(output.getvalue(), output2.getvalue())
예제 #30
0
   def test_TemplateConfigurator(self):
       manifest = """\
 apiVersion: unfurl/v1alpha1
 kind: Manifest
 spec:
   service_template:
     topology_template:
       node_templates:
         testNode:
           type: tosca.nodes.Root
           interfaces:
            Standard:
             operations:
               configure:
                 implementation:
                   className: unfurl.configurators.TemplateConfigurator
                 inputs:
                   done:
                     result:
                       outputVar: true
                   resultTemplate: |
                     - name: .self
                       attributes:
                         outputVar: "{{ outputVar }}"
 """
       runner = Runner(YamlManifest(manifest))
       job = runner.run()
       assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
       self.assertEqual(
           job.stats(),
           {
               "total": 1,
               "ok": 1,
               "error": 0,
               "unknown": 0,
               "skipped": 0,
               "changed": 1
           },
       )
       assert job.rootResource.find_resource(
           "testNode").attributes["outputVar"]