def upgrade():
    dialect = api.get_engine().dialect

    deployments_columns = [
        sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("parent_uuid", sa.String(length=36), nullable=True),
        sa.Column("name", sa.String(length=255), nullable=True),
        sa.Column("started_at", sa.DateTime(), nullable=True),
        sa.Column("completed_at", sa.DateTime(), nullable=True),
        sa.Column("config", sa_types.MutableJSONEncodedDict(), nullable=False),
        sa.Column("admin", sa.PickleType(), nullable=True),
        sa.Column("users", sa.PickleType(), nullable=False),
        sa.Column("enum_deployments_status",
                  sa.Enum("cleanup->failed",
                          "cleanup->finished",
                          "cleanup->started",
                          "deploy->failed",
                          "deploy->finished",
                          "deploy->inconsistent",
                          "deploy->init",
                          "deploy->started",
                          "deploy->subdeploy",
                          name="enum_deploy_status"),
                  nullable=False),
        sa.PrimaryKeyConstraint("id"),
        sa.UniqueConstraint("name")
    ]

    if dialect.name.startswith("sqlite"):
        deployments_columns.append(
            sa.ForeignKeyConstraint(["parent_uuid"], [u"deployments.uuid"],
                                    name="fk_parent_uuid",
                                    use_alter=True))

    # commands auto generated by Alembic - please adjust!
    op.create_table("deployments", *deployments_columns)

    op.create_index("deployment_parent_uuid",
                    "deployments", ["parent_uuid"],
                    unique=False)

    op.create_index("deployment_uuid", "deployments", ["uuid"], unique=True)

    if not dialect.name.startswith("sqlite"):
        op.create_foreign_key("fk_parent_uuid", "deployments", "deployments",
                              ["parent_uuid"], ["uuid"])

    op.create_table(
        "workers", sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("hostname", sa.String(length=255), nullable=True),
        sa.PrimaryKeyConstraint("id"),
        sa.UniqueConstraint("hostname", name="uniq_worker@hostname"))

    op.create_table(
        "resources", sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("provider_name", sa.String(length=255), nullable=True),
        sa.Column("type", sa.String(length=255), nullable=True),
        sa.Column("info", sa_types.MutableJSONEncodedDict(), nullable=False),
        sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
        sa.ForeignKeyConstraint(["deployment_uuid"], [u"deployments.uuid"]),
        sa.PrimaryKeyConstraint("id"))
    op.create_index("resource_deployment_uuid",
                    "resources", ["deployment_uuid"],
                    unique=False)

    op.create_index("resource_provider_name",
                    "resources", ["deployment_uuid", "provider_name"],
                    unique=False)

    op.create_index("resource_provider_name_and_type",
                    "resources", ["deployment_uuid", "provider_name", "type"],
                    unique=False)

    op.create_index("resource_type",
                    "resources", ["deployment_uuid", "type"],
                    unique=False)

    op.create_table(
        "tasks", sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("status",
                  sa.Enum("aborted",
                          "aborting",
                          "cleaning up",
                          "failed",
                          "finished",
                          "init",
                          "paused",
                          "running",
                          "setting up",
                          "soft_aborting",
                          "verifying",
                          name="enum_tasks_status"),
                  nullable=False),
        sa.Column("verification_log", sa.Text(), nullable=True),
        sa.Column("tag", sa.String(length=64), nullable=True),
        sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
        sa.ForeignKeyConstraint(
            ["deployment_uuid"],
            [u"deployments.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_index("task_deployment",
                    "tasks", ["deployment_uuid"],
                    unique=False)

    op.create_index("task_status", "tasks", ["status"], unique=False)

    op.create_index("task_uuid", "tasks", ["uuid"], unique=True)

    op.create_table(
        "verifications", sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
        sa.Column("status",
                  sa.Enum("aborted",
                          "aborting",
                          "cleaning up",
                          "failed",
                          "finished",
                          "init",
                          "paused",
                          "running",
                          "setting up",
                          "soft_aborting",
                          "verifying",
                          name="enum_tasks_status"),
                  nullable=False),
        sa.Column("set_name", sa.String(length=20), nullable=True),
        sa.Column("tests", sa.Integer(), nullable=True),
        sa.Column("errors", sa.Integer(), nullable=True),
        sa.Column("failures", sa.Integer(), nullable=True),
        sa.Column("time", sa.Float(), nullable=True),
        sa.ForeignKeyConstraint(
            ["deployment_uuid"],
            [u"deployments.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_index("verification_uuid",
                    "verifications", ["uuid"],
                    unique=True)

    op.create_table(
        "task_results", sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("key", sa_types.MutableJSONEncodedDict(), nullable=False),
        sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
        sa.Column("task_uuid", sa.String(length=36), nullable=True),
        sa.ForeignKeyConstraint(
            ["task_uuid"],
            ["tasks.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_table(
        "verification_results",
        sa.Column("created_at", sa.DateTime(), nullable=True),
        sa.Column("updated_at", sa.DateTime(), nullable=True),
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("verification_uuid", sa.String(length=36), nullable=True),
        sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
        sa.ForeignKeyConstraint(["verification_uuid"], ["verifications.uuid"]),
        sa.PrimaryKeyConstraint("id"))
Exemple #2
0
from rally.common.db import sa_types
from rally import exceptions

# revision identifiers, used by Alembic.
revision = "dc0fe6de6786"
down_revision = "95208e4eface"
branch_labels = None
depends_on = None

deployments_helper = sa.Table(
    "deployments",
    sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("config", sa_types.MutableJSONEncodedDict()),
)


def upgrade():
    connection = op.get_bind()
    inspector = reflection.Inspector.from_engine(connection)
    if "deployments" not in inspector.get_table_names():
        # 7287df262dbc did not fail. nothing to do
        return

    for deployment in connection.execute(deployments_helper.select()):
        config = deployment.config
        if isinstance(config, dict) and (config.get(
                "type", "") == "ExistingCloud" and "creds" not in config):
            extra = config.pop("extra", None)
def upgrade():
    with op.batch_alter_table("subtasks") as batch_op:
        batch_op.add_column(
            sa.Column("contexts",
                      sa_types.MutableJSONEncodedDict(),
                      default={},
                      nullable=False))
        batch_op.add_column(
            sa.Column("contexts_results",
                      sa_types.MutableJSONEncodedList(),
                      default=[],
                      nullable=False))
        # it was not used, so we do not need to migrate the data
        batch_op.drop_column("context")
    with op.batch_alter_table("workloads") as batch_op:
        batch_op.add_column(
            sa.Column("contexts",
                      sa_types.MutableJSONEncodedDict(),
                      default={},
                      nullable=False))
        batch_op.add_column(
            sa.Column("contexts_results",
                      sa_types.MutableJSONEncodedList(),
                      default=[],
                      nullable=False))
        # it was not used, so we do not need to migrate the data
        batch_op.drop_column("context_execution")

    connection = op.get_bind()
    for workload in connection.execute(workload_helper.select()):
        # NOTE(andreykurilin): The real information about execution of contexts
        #   for previous results are missed (expected thing) and it is
        #   impossible to guess started_at and finished_at timestamps of each
        #   context. Let's do not add random data, since no data is better
        #   that the wrong one.

        if workload.start_time is None:
            # The load did not start in workload. It can mean that one of
            #   contexts had failed or some error had happened in the runner
            #   itself. In both cases, we do not have much data to do anything,
            #   so making an assumption that no contexts were executed at all.
            contexts_results = []
        else:
            # We cannot guess timings for each contexts, but we can restore
            # started_at and finished_at timings for setup and cleanup methods
            # of context manager.

            # The workload record in the database is created right before the
            # launch of ContextManage.
            ctx_setup_started_at = float(workload.created_at.strftime("%s"))
            # There is a small preparation step of a runner between finishing
            # the setup of context manager and launching the load itself. It
            # doesn't take much time, let it be 0.01 seconds
            ctx_setup_finished_at = workload.start_time - 0.01
            # The context manager starts cleanup right after the load is
            # finished. Again, there can be possible delay, let it be 0.01
            # seconds
            ctx_cleanup_started_at = (workload.start_time +
                                      workload.load_duration + 0.01)
            # We cannot rely on updated_at field since it can be affected by
            # another migration. The full_duration is a timestamp of the moment
            # when the load is finished, all results are saved in the database
            # and cleanup method of context manager is performed. It is not the
            # right timestamp of finishing cleanup, but it should be almost
            # there. Let's deduct 0.1 seconds.
            ctx_cleanup_finished_at = (ctx_setup_started_at +
                                       workload.full_duration - 0.1)

            # plugin_name and arguments should be used only for analyzing, not
            # for restoring original task itself, so we can use custom thing
            # here
            contexts_results = [{
                "plugin_name": "AllExecutedContexts",
                "plugin_cfg": {
                    "description":
                    "It is impossible to restore stats of executed "
                    "contexts while performing database migration. "
                    "The current info displays the approximate timestamps "
                    "which should say when the first setup method was "
                    "executed, when the last setup method finished, when "
                    "the first cleanup was started and when the last "
                    "cleanup finished. Also, please not that it is "
                    "impossible to guess information about possible "
                    "errors, so the current stats are marked as "
                    "successful."
                },
                "setup": {
                    "started_at": ctx_setup_started_at,
                    "finished_at": ctx_setup_finished_at,
                    "atomic_actions": [],
                    "error": None
                },
                "cleanup": {
                    "started_at": ctx_cleanup_started_at,
                    "finished_at": ctx_cleanup_finished_at,
                    "atomic_actions": [],
                    "error": None
                }
            }]

            possible_order = _process_contexts(workload.context)
            if possible_order:
                contexts_results[0]["plugin_cfg"]["order_of_execution"] = {
                    "note":
                    "We do not know if all setup methods were "
                    "executed, but if they were, the following order "
                    "is right.",
                    "order":
                    possible_order
                }

        connection.execute(workload_helper.update().where(
            workload_helper.c.uuid == workload.uuid).values({
                "contexts":
                workload_helper.c.context,
                "contexts_results":
                contexts_results
            }))

    with op.batch_alter_table("workloads") as batch_op:
        batch_op.drop_column("context")
from rally.common.db import sa_types
from rally import exceptions
from rally import plugins
from rally.task import context

# revision identifiers, used by Alembic.
revision = "dc46687661df"
down_revision = "4394bdc32cfd"
branch_labels = None
depends_on = None

subtask_helper = sa.Table(
    "subtasks", sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("context", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts_results", sa_types.MutableJSONEncodedList()))

workload_helper = sa.Table(
    "workloads",
    sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("task_uuid", sa.String(length=36), nullable=False),
    sa.Column("context", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts_results", sa_types.MutableJSONEncodedList()),
    sa.Column("start_time", sa_types.TimeStamp),
    sa.Column("created_at", sa.DateTime),
    sa.Column("load_duration", sa.Float),
Exemple #5
0
import sqlalchemy as sa  # noqa

from rally.common.db import sa_types
from rally import exceptions

# revision identifiers, used by Alembic.
revision = "6ad4f426f005"
down_revision = "08e1515a576c"
branch_labels = None
depends_on = None

task_results_helper = sa.Table(
    "task_results",
    sa.MetaData(),
    sa.Column("id", sa.Integer(), nullable=False),
    sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
)


def upgrade():
    connection = op.get_bind()
    for task_result in connection.execute(task_results_helper.select()):
        data = task_result.data
        data["hooks"] = []
        connection.execute(task_results_helper.update().where(
            task_results_helper.c.id == task_result.id).values(data=data))


def downgrade():
    raise exceptions.DowngradeNotSupported()
    "tasks", sa.MetaData(),
    sa.Column("created_at", sa.DateTime(), nullable=True),
    sa.Column("updated_at", sa.DateTime(), nullable=True),
    sa.Column("id", sa.Integer(), nullable=False),
    sa.Column("uuid", sa.String(length=36), nullable=False),
    sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
    sa.Column("title", sa.String(length=64), default=""),
    sa.Column("description", sa.Text(), default=""),
    sa.Column("input_task", sa.Text(), default=""),
    sa.Column("validation_duration", sa.Float()),
    sa.Column("task_duration", sa.Float()), sa.Column("pass_sla",
                                                      sa.Boolean()),
    sa.Column("status", OLD_ENUM, nullable=False),
    sa.Column("new_status", sa.String(36), default=consts.TaskStatus.INIT),
    sa.Column("validation_result",
              sa_types.MutableJSONEncodedDict(),
              default={},
              nullable=False))

subtask = sa.Table(
    "subtasks", sa.MetaData(), sa.Column("created_at", sa.DateTime()),
    sa.Column("updated_at", sa.DateTime()),
    sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
    sa.Column("uuid", sa.String(length=36), nullable=False),
    sa.Column("task_uuid", sa.String(length=36), nullable=False),
    sa.Column("title", sa.String(length=64), default=""),
    sa.Column("description", sa.Text(), default=""),
    sa.Column("context",
              sa_types.MutableJSONEncodedDict(),
              default={},
              nullable=False),