示例#1
0
    def _check_6ad4f426f005(self, engine, data):
        self.assertEqual("6ad4f426f005",
                         api.get_backend().schema_revision(engine=engine))

        deployment_table = db_utils.get_table(engine, "deployments")
        task_table = db_utils.get_table(engine, "tasks")
        task_result_table = db_utils.get_table(engine, "task_results")
        with engine.connect() as conn:
            task_results = conn.execute(task_result_table.select()).fetchall()
            self.assertEqual(1, len(task_results))
            task_result = task_results[0]

            # check that "hooks" field added
            self.assertEqual({"hooks": []}, json.loads(task_result.data))

            # Remove task result
            conn.execute(task_result_table.delete().where(
                task_result_table.c.id == task_result.id))

            # Remove task
            conn.execute(
                task_table.delete().where(task_table.c.uuid == "my_task"))

            # Remove deployment
            conn.execute(deployment_table.delete().where(
                deployment_table.c.uuid == "my_deployment"))
示例#2
0
    def _check_6ad4f426f005(self, engine, data):
        self.assertEqual("6ad4f426f005",
                         api.get_backend().schema_revision(engine=engine))

        deployment_table = db_utils.get_table(engine, "deployments")
        task_table = db_utils.get_table(engine, "tasks")
        task_result_table = db_utils.get_table(engine, "task_results")
        with engine.connect() as conn:
            task_results = conn.execute(task_result_table.select()).fetchall()
            self.assertEqual(1, len(task_results))
            task_result = task_results[0]

            # check that "hooks" field added
            self.assertEqual({"hooks": []}, json.loads(task_result.data))

            # Remove task result
            conn.execute(
                task_result_table.delete().where(
                    task_result_table.c.id == task_result.id)
            )

            # Remove task
            conn.execute(
                task_table.delete().where(task_table.c.uuid == "my_task"))

            # Remove deployment
            conn.execute(
                deployment_table.delete().where(
                    deployment_table.c.uuid == "my_deployment")
            )
示例#3
0
    def _check_08e1515a576c(self, engine, data):
        self.assertEqual("08e1515a576c",
                         api.get_backend().schema_revision(engine=engine))

        tasks = self._08e1515a576c_logs

        deployment_table = db_utils.get_table(engine, "deployments")
        task_table = db_utils.get_table(engine, "tasks")

        with engine.connect() as conn:
            tasks_found = conn.execute(task_table.select()).fetchall()
            for task in tasks_found:
                actual_log = json.loads(task.verification_log)
                self.assertIsInstance(actual_log, dict)
                expected = tasks[int(task.uuid)]["post"]
                for key in expected:
                    self.assertEqual(expected[key], actual_log[key])

                conn.execute(
                    task_table.delete().where(task_table.c.uuid == task.uuid))

            deployment_uuid = self._08e1515a576c_deployment_uuid
            conn.execute(
                deployment_table.delete().where(
                    deployment_table.c.uuid == deployment_uuid)
            )
示例#4
0
    def walk_versions(self, engine=None):
        """Walk through versions.

        Determine latest version script from the repo, then
        upgrade from 1 through to the latest, with no data
        in the databases. This just checks that the schema itself
        upgrades successfully.
        """

        self._configure(engine)
        # NOTE(ikhudoshyn): Now DB contains certain schema
        # so we can not execute all migrations starting from
        # init. So we cleanup the DB.
        s_api.get_backend().schema_cleanup()
        up_and_down_versions = self._up_and_down_versions()
        for ver_up, ver_down in up_and_down_versions:
            self._migrate_up(engine, ver_up, with_data=True)
    def walk_versions(self, engine=None):
        """Walk through versions.

        Determine latest version script from the repo, then
        upgrade from 1 through to the latest, with no data
        in the databases. This just checks that the schema itself
        upgrades successfully.
        """

        self._configure(engine)
        # NOTE(ikhudoshyn): Now DB contains certain schema
        # so we can not execute all migrations starting from
        # init. So we cleanup the DB.
        s_api.get_backend().schema_cleanup()
        up_and_down_versions = self._up_and_down_versions()
        for ver_up, ver_down in up_and_down_versions:
            self._migrate_up(engine, ver_up, with_data=True)
示例#6
0
    def _check_32fada9b2fde(self, engine, data):
        self.assertEqual("32fada9b2fde",
                         api.get_backend().schema_revision(engine=engine))

        original_deployments = self._32fada9b2fde_deployments

        deployment_table = db_utils.get_table(engine, "deployments")

        with engine.connect() as conn:
            deployments_found = conn.execute(
                deployment_table.select()).fetchall()
            for deployment in deployments_found:
                # check deployment
                self.assertIn(deployment.uuid, original_deployments)
                self.assertIn(deployment.name, original_deployments)

                config = json.loads(deployment.config)
                if config != original_deployments[deployment.uuid]:
                    if deployment.uuid.startswith("should-not-be-changed"):
                        self.fail("Config of deployment '%s' is changes, but "
                                  "should not." % deployment.uuid)
                    if "admin_domain_name" in deployment.config:
                        self.fail("Config of deployment '%s' should not "
                                  "contain `admin_domain_name` field." %
                                  deployment.uuid)

                    endpoint_type = (original_deployments[
                                     deployment.uuid].get("endpoint_type"))
                    if endpoint_type in (None, "public"):
                        self.assertNotIn("endpoint_type", config)
                    else:
                        self.assertIn("endpoint_type", config)
                        self.assertEqual(endpoint_type,
                                         config["endpoint_type"])

                    existing.ExistingCloud({"config": config}).validate()
                else:
                    if not deployment.uuid.startswith("should-not-be-changed"):
                        self.fail("Config of deployment '%s' is not changes, "
                                  "but should." % deployment.uuid)

                # this deployment created at _pre_upgrade step is not needed
                # anymore and we can remove it
                conn.execute(
                    deployment_table.delete().where(
                        deployment_table.c.uuid == deployment.uuid)
                )
示例#7
0
    def _check_32fada9b2fde(self, engine, data):
        self.assertEqual("32fada9b2fde",
                         api.get_backend().schema_revision(engine=engine))

        original_deployments = self._32fada9b2fde_deployments

        deployment_table = db_utils.get_table(engine, "deployments")

        with engine.connect() as conn:
            deployments_found = conn.execute(
                deployment_table.select()).fetchall()
            for deployment in deployments_found:
                # check deployment
                self.assertIn(deployment.uuid, original_deployments)
                self.assertIn(deployment.name, original_deployments)

                config = json.loads(deployment.config)
                if config != original_deployments[deployment.uuid]:
                    if deployment.uuid.startswith("should-not-be-changed"):
                        self.fail("Config of deployment '%s' is changes, but "
                                  "should not." % deployment.uuid)
                    if "admin_domain_name" in deployment.config:
                        self.fail("Config of deployment '%s' should not "
                                  "contain `admin_domain_name` field." %
                                  deployment.uuid)

                    endpoint_type = (original_deployments[deployment.uuid].get(
                        "endpoint_type"))
                    if endpoint_type in (None, "public"):
                        self.assertNotIn("endpoint_type", config)
                    else:
                        self.assertIn("endpoint_type", config)
                        self.assertEqual(endpoint_type,
                                         config["endpoint_type"])

                    existing.ExistingCloud({"config": config}).validate()
                else:
                    if not deployment.uuid.startswith("should-not-be-changed"):
                        self.fail("Config of deployment '%s' is not changes, "
                                  "but should." % deployment.uuid)

                # this deployment created at _pre_upgrade step is not needed
                # anymore and we can remove it
                conn.execute(deployment_table.delete().where(
                    deployment_table.c.uuid == deployment.uuid))
示例#8
0
    def _check_08e1515a576c(self, engine, data):
        self.assertEqual("08e1515a576c",
                         api.get_backend().schema_revision(engine=engine))

        tasks = self._08e1515a576c_logs

        deployment_table = db_utils.get_table(engine, "deployments")
        task_table = db_utils.get_table(engine, "tasks")

        with engine.connect() as conn:
            tasks_found = conn.execute(task_table.select()).fetchall()
            for task in tasks_found:
                actual_log = json.loads(task.verification_log)
                self.assertIsInstance(actual_log, dict)
                expected = tasks[int(task.uuid)]["post"]
                for key in expected:
                    self.assertEqual(expected[key], actual_log[key])

                conn.execute(
                    task_table.delete().where(task_table.c.uuid == task.uuid))

            deployment_uuid = self._08e1515a576c_deployment_uuid
            conn.execute(deployment_table.delete().where(
                deployment_table.c.uuid == deployment_uuid))
示例#9
0
 def _check_3177d36ea270(self, engine, data):
     self.assertEqual("3177d36ea270",
                      api.get_backend().schema_revision(engine=engine))
     self.assertColumnExists(engine, "deployments", "credentials")
     self.assertColumnNotExists(engine, "deployments", "admin")
     self.assertColumnNotExists(engine, "deployments", "users")
示例#10
0
 def _check_3177d36ea270(self, engine, data):
     self.assertEqual(
         "3177d36ea270", api.get_backend().schema_revision(engine=engine))
     self.assertColumnExists(engine, "deployments", "credentials")
     self.assertColumnNotExists(engine, "deployments", "admin")
     self.assertColumnNotExists(engine, "deployments", "users")
示例#11
0
    def _check_e654a0648db0(self, engine, data):
        self.assertEqual("e654a0648db0",
                         api.get_backend().schema_revision(engine=engine))

        task_table = db_utils.get_table(engine, "tasks")
        subtask_table = db_utils.get_table(engine, "subtasks")
        workload_table = db_utils.get_table(engine, "workloads")
        workloaddata_table = db_utils.get_table(engine, "workloaddata")
        tag_table = db_utils.get_table(engine, "tags")
        deployment_table = db_utils.get_table(engine, "deployments")

        with engine.connect() as conn:

            # Check task

            tasks_found = conn.execute(task_table.select().where(
                task_table.c.uuid == self._e654a0648db0_task_uuid)).fetchall()

            self.assertEqual(len(tasks_found), 1)

            task_found = tasks_found[0]
            self.assertEqual(task_found.uuid, self._e654a0648db0_task_uuid)
            self.assertEqual(task_found.deployment_uuid,
                             self._e654a0648db0_deployment_uuid)
            self.assertEqual(task_found.status, consts.TaskStatus.FINISHED)
            # NOTE(ikhudoshyn): if for all workloads success == True
            self.assertEqual(task_found.pass_sla, False)
            # NOTE(ikhudoshyn): sum of all full_durations of all workloads
            self.assertEqual(task_found.task_duration, 142)
            # NOTE(ikhudoshyn): we have no info on validation duration in old
            # schema
            self.assertEqual(task_found.validation_duration, 0)
            self.assertEqual(json.loads(task_found.validation_result), {})

            # Check subtask

            subtasks_found = conn.execute(subtask_table.select().where(
                subtask_table.c.task_uuid ==
                self._e654a0648db0_task_uuid)).fetchall()

            self.assertEqual(len(subtasks_found), 1)

            subtask_found = subtasks_found[0]
            self.assertEqual(subtask_found.task_uuid,
                             self._e654a0648db0_task_uuid)

            # NOTE(ikhudoshyn): if for all workloads success == True
            self.assertEqual(subtask_found.pass_sla, False)
            # NOTE(ikhudoshyn): sum of all full_durations of all workloads
            self.assertEqual(subtask_found.duration, 142)

            self._e654a0648db0_subtask_uuid = subtask_found.uuid

            # Check tag

            tags_found = conn.execute(tag_table.select().where(
                tag_table.c.uuid == self._e654a0648db0_task_uuid)).fetchall()

            self.assertEqual(len(tags_found), 1)
            self.assertEqual(tags_found[0].tag, "test_tag")
            self.assertEqual(tags_found[0].type, consts.TagType.TASK)

            # Check workload

            workloads_found = conn.execute(workload_table.select().where(
                workload_table.c.task_uuid ==
                self._e654a0648db0_task_uuid)).fetchall()

            self.assertEqual(len(workloads_found), 1)

            workload_found = workloads_found[0]

            self.assertEqual(workload_found.task_uuid,
                             self._e654a0648db0_task_uuid)

            self.assertEqual(workload_found.subtask_uuid,
                             self._e654a0648db0_subtask_uuid)

            self.assertEqual(workload_found.name, "test_scenario")
            self.assertEqual(workload_found.position, 0)
            self.assertEqual(workload_found.runner_type, "theRunner")
            self.assertEqual(workload_found.runner,
                             json.dumps({"type": "theRunner"}))
            self.assertEqual(workload_found.sla, json.dumps({"s": "S"}))
            self.assertEqual(workload_found.args, json.dumps({"a": "A"}))
            self.assertEqual(workload_found.context, json.dumps({"c": "C"}))
            self.assertEqual(
                workload_found.sla_results,
                json.dumps({"sla": [{
                    "success": True
                }, {
                    "success": False
                }]}))
            self.assertEqual(workload_found.context_execution, json.dumps({}))
            self.assertEqual(workload_found.load_duration, 42)
            self.assertEqual(workload_found.full_duration, 142)
            self.assertEqual(workload_found.min_duration, 1)
            self.assertEqual(workload_found.max_duration, 8)
            self.assertEqual(workload_found.total_iteration_count, 3)
            self.assertEqual(workload_found.failed_iteration_count, 1)
            self.assertEqual(workload_found.pass_sla, False)

            self._e654a0648db0_workload_uuid = workload_found.uuid

            # Check workloadData

            workloaddata_found = conn.execute(
                workloaddata_table.select().where(
                    workloaddata_table.c.task_uuid ==
                    self._e654a0648db0_task_uuid)).fetchall()

            self.assertEqual(len(workloaddata_found), 1)

            wloaddata_found = workloaddata_found[0]

            self.assertEqual(wloaddata_found.task_uuid,
                             self._e654a0648db0_task_uuid)

            self.assertEqual(wloaddata_found.workload_uuid,
                             self._e654a0648db0_workload_uuid)

            self.assertEqual(wloaddata_found.chunk_order, 0)
            self.assertEqual(wloaddata_found.chunk_size, 0)
            self.assertEqual(wloaddata_found.compressed_chunk_size, 0)
            self.assertEqual(wloaddata_found.iteration_count, 3)
            self.assertEqual(wloaddata_found.failed_iteration_count, 1)
            self.assertEqual(
                wloaddata_found.chunk_data,
                json.dumps({
                    "raw": [
                        {
                            "error": "e",
                            "duration": 3
                        },
                        {
                            "duration": 1
                        },
                        {
                            "duration": 8
                        },
                    ]
                }))

            # Delete all stuff created at _pre_upgrade step

            conn.execute(tag_table.delete().where(
                tag_table.c.uuid == self._e654a0648db0_task_uuid))

            conn.execute(workloaddata_table.delete().where(
                workloaddata_table.c.task_uuid ==
                self._e654a0648db0_task_uuid))

            conn.execute(workload_table.delete().where(
                workload_table.c.task_uuid == self._e654a0648db0_task_uuid))
            conn.execute(subtask_table.delete().where(
                subtask_table.c.task_uuid == self._e654a0648db0_task_uuid))

            conn.execute(task_table.delete().where(
                task_table.c.uuid == self._e654a0648db0_task_uuid))

            conn.execute(deployment_table.delete().where(
                deployment_table.c.uuid == self._e654a0648db0_deployment_uuid))