def upgrade():
    with op.batch_alter_table("subtasks") as batch_op:
        batch_op.add_column(
            sa.Column("contexts",
                      sa_types.MutableJSONEncodedDict(),
                      default={},
                      nullable=False))
        batch_op.add_column(
            sa.Column("contexts_results",
                      sa_types.MutableJSONEncodedList(),
                      default=[],
                      nullable=False))
        # it was not used, so we do not need to migrate the data
        batch_op.drop_column("context")
    with op.batch_alter_table("workloads") as batch_op:
        batch_op.add_column(
            sa.Column("contexts",
                      sa_types.MutableJSONEncodedDict(),
                      default={},
                      nullable=False))
        batch_op.add_column(
            sa.Column("contexts_results",
                      sa_types.MutableJSONEncodedList(),
                      default=[],
                      nullable=False))
        # it was not used, so we do not need to migrate the data
        batch_op.drop_column("context_execution")

    connection = op.get_bind()
    for workload in connection.execute(workload_helper.select()):
        # NOTE(andreykurilin): The real information about execution of contexts
        #   for previous results are missed (expected thing) and it is
        #   impossible to guess started_at and finished_at timestamps of each
        #   context. Let's do not add random data, since no data is better
        #   that the wrong one.

        if workload.start_time is None:
            # The load did not start in workload. It can mean that one of
            #   contexts had failed or some error had happened in the runner
            #   itself. In both cases, we do not have much data to do anything,
            #   so making an assumption that no contexts were executed at all.
            contexts_results = []
        else:
            # We cannot guess timings for each contexts, but we can restore
            # started_at and finished_at timings for setup and cleanup methods
            # of context manager.

            # The workload record in the database is created right before the
            # launch of ContextManage.
            ctx_setup_started_at = float(workload.created_at.strftime("%s"))
            # There is a small preparation step of a runner between finishing
            # the setup of context manager and launching the load itself. It
            # doesn't take much time, let it be 0.01 seconds
            ctx_setup_finished_at = workload.start_time - 0.01
            # The context manager starts cleanup right after the load is
            # finished. Again, there can be possible delay, let it be 0.01
            # seconds
            ctx_cleanup_started_at = (workload.start_time +
                                      workload.load_duration + 0.01)
            # We cannot rely on updated_at field since it can be affected by
            # another migration. The full_duration is a timestamp of the moment
            # when the load is finished, all results are saved in the database
            # and cleanup method of context manager is performed. It is not the
            # right timestamp of finishing cleanup, but it should be almost
            # there. Let's deduct 0.1 seconds.
            ctx_cleanup_finished_at = (ctx_setup_started_at +
                                       workload.full_duration - 0.1)

            # plugin_name and arguments should be used only for analyzing, not
            # for restoring original task itself, so we can use custom thing
            # here
            contexts_results = [{
                "plugin_name": "AllExecutedContexts",
                "plugin_cfg": {
                    "description":
                    "It is impossible to restore stats of executed "
                    "contexts while performing database migration. "
                    "The current info displays the approximate timestamps "
                    "which should say when the first setup method was "
                    "executed, when the last setup method finished, when "
                    "the first cleanup was started and when the last "
                    "cleanup finished. Also, please not that it is "
                    "impossible to guess information about possible "
                    "errors, so the current stats are marked as "
                    "successful."
                },
                "setup": {
                    "started_at": ctx_setup_started_at,
                    "finished_at": ctx_setup_finished_at,
                    "atomic_actions": [],
                    "error": None
                },
                "cleanup": {
                    "started_at": ctx_cleanup_started_at,
                    "finished_at": ctx_cleanup_finished_at,
                    "atomic_actions": [],
                    "error": None
                }
            }]

            possible_order = _process_contexts(workload.context)
            if possible_order:
                contexts_results[0]["plugin_cfg"]["order_of_execution"] = {
                    "note":
                    "We do not know if all setup methods were "
                    "executed, but if they were, the following order "
                    "is right.",
                    "order":
                    possible_order
                }

        connection.execute(workload_helper.update().where(
            workload_helper.c.uuid == workload.uuid).values({
                "contexts":
                workload_helper.c.context,
                "contexts_results":
                contexts_results
            }))

    with op.batch_alter_table("workloads") as batch_op:
        batch_op.drop_column("context")
Exemple #2
0
    sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("start_time", sa_types.TimeStamp),
    sa.Column("statistics",
              sa_types.MutableJSONEncodedDict,
              default={},
              nullable=False),
)

workload_data_helper = sa.Table(
    "workloaddata", sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("workload_uuid", sa.String(length=36), nullable=False),
    sa.Column("chunk_data", sa_types.MutableJSONEncodedDict(), nullable=False))


def upgrade():
    connection = op.get_bind()
    workloads = {}
    for wdata in connection.execute(workload_data_helper.select()):
        workloads.setdefault(wdata.workload_uuid, [])

        chunk_data = wdata.chunk_data["raw"]

        require_updating = False
        for itr in chunk_data:
            if "output" not in itr:
                itr["output"] = {"additive": [], "complete": []}
                if "scenario_output" in itr and itr["scenario_output"]["data"]:
from rally.common.db.sqlalchemy import types as sa_types
from rally import exceptions
from rally import plugins
from rally.task import context

# revision identifiers, used by Alembic.
revision = "dc46687661df"
down_revision = "4394bdc32cfd"
branch_labels = None
depends_on = None

subtask_helper = sa.Table(
    "subtasks", sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("context", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts_results", sa_types.MutableJSONEncodedList()))

workload_helper = sa.Table(
    "workloads",
    sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("task_uuid", sa.String(length=36), nullable=False),
    sa.Column("context", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts", sa_types.MutableJSONEncodedDict()),
    sa.Column("contexts_results", sa_types.MutableJSONEncodedList()),
    sa.Column("start_time", sa_types.TimeStamp),
    sa.Column("created_at", sa.DateTime),
    sa.Column("load_duration", sa.Float),
    "deploy->finished": "READY",
    "deploy->failed": "FAILED TO CREATE",
    "deploy->inconsistent": "FAILED TO CREATE",
    "deploy->subdeploy": "INITIALIZING",
    "cleanup->started": "CLEANING",
    "cleanup->failed": "READY",
    "cleanup->finished": "READY"
}

deployments_helper = sa.Table(
    "deployments",
    sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("name", sa.String(255)),
    sa.Column("config", sa_types.MutableJSONEncodedDict()),
    sa.Column("credentials", sa_types.MutableJSONEncodedDict()),
    sa.Column("enum_deployments_status", sa.Enum(*STATUS_MAP.keys())),
    sa.Column("created_at", sa.DateTime),
)

envs_helper = sa.Table(
    "envs", sa.MetaData(),
    sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
    sa.Column("uuid", sa.String(36), nullable=False),
    sa.Column("name", sa.String(255)), sa.Column("description", sa.Text),
    sa.Column("status", sa.String(36)),
    sa.Column("extras", sa_types.MutableJSONEncodedDict),
    sa.Column("spec", sa_types.MutableJSONEncodedDict),
    sa.Column("created_at", sa.DateTime), sa.Column("updated_at", sa.DateTime))
def upgrade():
    conn = op.get_bind()

    subtask_table = op.create_table(
        "subtasks", sa.Column("created_at", sa.DateTime()),
        sa.Column("updated_at", sa.DateTime()),
        sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("task_uuid", sa.String(length=36), nullable=False),
        sa.Column("title", sa.String(length=64), default=""),
        sa.Column("description", sa.Text(), default=""),
        sa.Column("context",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("sla",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False), sa.Column("duration", sa.Float()),
        sa.Column("run_in_parallel",
                  sa.Boolean(),
                  default=False,
                  nullable=False), sa.Column("pass_sla", sa.Boolean()),
        sa.Column("status",
                  sa.Enum("finished",
                          "running",
                          "crashed",
                          name="enum_subtasks_status"),
                  nullable=False),
        sa.ForeignKeyConstraint(
            ["task_uuid"],
            ["tasks.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_index("subtask_uuid", "subtasks", ["uuid"], unique=True)
    op.create_index("subtask_status", "subtasks", ["status"], unique=False)

    workload_table = op.create_table(
        "workloads", sa.Column("created_at", sa.DateTime()),
        sa.Column("updated_at", sa.DateTime()),
        sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("task_uuid", sa.String(length=36), nullable=False),
        sa.Column("subtask_uuid", sa.String(length=36), nullable=False),
        sa.Column("name", sa.String(length=64), nullable=False),
        sa.Column("description", sa.Text(), default=""),
        sa.Column("position", sa.Integer(), default=0, nullable=False),
        sa.Column("runner_type", sa.String(length=64), nullable=False),
        sa.Column("runner",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("args",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("context",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("hooks",
                  sa_types.MutableJSONEncodedList(),
                  default=[],
                  nullable=False),
        sa.Column("sla",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("sla_results",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("context_execution",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.Column("load_duration", sa.Float(), default=0),
        sa.Column("full_duration", sa.Float(), default=0),
        sa.Column("min_duration", sa.Float(), default=0),
        sa.Column("max_duration", sa.Float(), default=0),
        sa.Column("total_iteration_count", sa.Integer(), default=0),
        sa.Column("failed_iteration_count", sa.Integer(), default=0),
        sa.Column("pass_sla", sa.Boolean()),
        sa.Column("statistics",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False), sa.Column("start_time", sa.DateTime()),
        sa.Column("_profiling_data", sa.Text(), default=""),
        sa.ForeignKeyConstraint(
            ["task_uuid"],
            ["tasks.uuid"],
        ), sa.ForeignKeyConstraint(
            ["subtask_uuid"],
            ["subtasks.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_index("workload_uuid", "workloads", ["uuid"], unique=True)

    workloaddata_table = op.create_table(
        "workloaddata", sa.Column("created_at", sa.DateTime()),
        sa.Column("updated_at", sa.DateTime()),
        sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("task_uuid", sa.String(length=36), nullable=False),
        sa.Column("workload_uuid", sa.String(length=36), nullable=False),
        sa.Column("chunk_order", sa.Integer(), nullable=False),
        sa.Column("iteration_count", sa.Integer(), nullable=False),
        sa.Column("failed_iteration_count", sa.Integer(), nullable=False),
        sa.Column("chunk_size", sa.Integer(), nullable=False),
        sa.Column("compressed_chunk_size", sa.Integer(), nullable=False),
        sa.Column("started_at", sa.DateTime(), nullable=False),
        sa.Column("finished_at", sa.DateTime(), nullable=False),
        sa.Column("chunk_data",
                  sa_types.MutableJSONEncodedDict(),
                  default={},
                  nullable=False),
        sa.ForeignKeyConstraint(
            ["task_uuid"],
            ["tasks.uuid"],
        ), sa.ForeignKeyConstraint(
            ["workload_uuid"],
            ["workloads.uuid"],
        ), sa.PrimaryKeyConstraint("id"))

    op.create_index("workload_data_uuid",
                    "workloaddata", ["uuid"],
                    unique=True)

    tag_table = op.create_table(
        "tags", sa.Column("created_at", sa.DateTime()),
        sa.Column("updated_at", sa.DateTime()),
        sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
        sa.Column("uuid", sa.String(length=36), nullable=False),
        sa.Column("tag", sa.String(length=255), nullable=False),
        sa.Column("type",
                  sa.Enum("task", "subtask", name="enum_tag_types"),
                  nullable=False), sa.PrimaryKeyConstraint("id"))

    op.create_index("d_type_tag", "tags", ["uuid", "type", "tag"], unique=True)

    with op.batch_alter_table("tasks") as batch_op:
        batch_op.add_column(
            sa.Column("title", sa.String(length=64), default=""))

        batch_op.add_column(sa.Column("description", sa.Text(), default=""))

        batch_op.add_column(sa.Column("input_task", sa.Text(), default=""))

        batch_op.add_column(sa.Column("validation_duration", sa.Float()))

        batch_op.add_column(sa.Column("task_duration", sa.Float()))

        batch_op.add_column(sa.Column("pass_sla", sa.Boolean()))

        batch_op.add_column(
            sa.Column("validation_result",
                      sa_types.MutableJSONEncodedDict(),
                      default={}))

    for task in conn.execute(taskhelper.select()):
        if task.tag:
            conn.execute(tag_table.insert(), [{
                "uuid": task.uuid,
                "type": "task",
                "tag": task.tag,
                "created_at": task.created_at,
                "updated_at": task.updated_at
            }])

        task_results = conn.execute(task_result_helper.select().where(
            task_result_helper.c.task_uuid == task.uuid))

        pass_sla = True
        task_duration = 0

        for task_result in task_results:
            raw_data = task_result.data.get("raw", [])
            iter_count = len(raw_data)

            failed_iter_count = 0
            max_duration = 0
            min_duration = -1

            for d in raw_data:
                if d.get("error"):
                    failed_iter_count += 1

                duration = d.get("duration", 0)

                if duration > max_duration:
                    max_duration = duration

                if min_duration < 0 or min_duration > duration:
                    min_duration = duration

            sla = task_result.data.get("sla", [])
            success = all([s.get("success") for s in sla])

            if not success:
                pass_sla = False

            task_duration += task_result.data.get("full_duration", 0)

            delta = dt.timedelta(
                seconds=task_result.data.get("full_duration", 0))
            start = task_result.created_at - delta

            subtask_uuid = str(uuid.uuid4())

            conn.execute(
                subtask_table.insert(),
                [{
                    "uuid": subtask_uuid,
                    "task_uuid": task.uuid,
                    "created_at": task_result.created_at,
                    "updated_at": task_result.updated_at,
                    # NOTE(ikhudoshyn) We don't have info on subtask status
                    "status": "finished",
                    "duration": task_result.data.get("full_duration", 0),
                    "pass_sla": success
                }])

            workload_uuid = str(uuid.uuid4())

            conn.execute(
                workload_table.insert(),
                [{
                    "created_at": task_result.created_at,
                    "updated_at": task_result.updated_at,
                    "uuid": workload_uuid,
                    "task_uuid": task.uuid,
                    "subtask_uuid": subtask_uuid,
                    "name": task_result.key["name"],
                    "position": task_result.key["pos"],
                    "runner_type": task_result.key["kw"]["runner"]["type"],
                    "runner": task_result.key["kw"]["runner"],
                    "context": task_result.key["kw"].get("context", {}),
                    "sla": task_result.key["kw"].get("sla", {}),
                    "args": task_result.key["kw"].get("args", {}),
                    "sla_results": {
                        "sla": sla
                    },
                    "context_execution": {},
                    "load_duration": task_result.data.get("load_duration", 0),
                    "full_duration": task_result.data.get("full_duration", 0),
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "pass_sla": success,
                    "statistics": {},
                    "start_time": start,
                }])

            conn.execute(
                workloaddata_table.insert(),
                [{
                    "uuid": str(uuid.uuid4()),
                    "task_uuid": task.uuid,
                    "workload_uuid": workload_uuid,
                    "chunk_order": 0,
                    "iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "chunk_data": {
                        "raw": raw_data
                    },
                    # TODO(ikhudoshyn)
                    "chunk_size": 0,
                    "compressed_chunk_size": 0,
                    "started_at": start,
                    "finished_at": task_result.created_at
                }])

        task_verification_log = {}
        if task.verification_log:
            task_verification_log = json.loads(task.verification_log)

        conn.execute(
            taskhelper.update().where(taskhelper.c.uuid == task.uuid), {
                "pass_sla": pass_sla,
                "task_duration": task_duration,
                "validation_duration": 0,
                "validation_result": task_verification_log
            })

    # TODO(ikhudoshyn) update workload's statistics

    with op.batch_alter_table("tasks") as batch_op:
        batch_op.drop_column("tag")
        batch_op.drop_column("verification_log")
        batch_op.alter_column("validation_result",
                              existing_type=sa_types.MutableJSONEncodedDict(),
                              nullable=False)

    op.drop_table("task_results")
                      "setting up",
                      "soft_aborting",
                      "verifying",
                      name="enum_tasks_status"),
              nullable=False),
    sa.Column("verification_log", sa.Text(), nullable=True),
    sa.Column("tag", sa.String(length=64), nullable=True),
    sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
    sa.Column("title", sa.String(length=64), default=""),
    sa.Column("description", sa.Text(), default=""),
    sa.Column("input_task", sa.Text(), default=""),
    sa.Column("validation_duration", sa.Float()),
    sa.Column("task_duration", sa.Float()), sa.Column("pass_sla",
                                                      sa.Boolean()),
    sa.Column("validation_result",
              sa_types.MutableJSONEncodedDict(),
              default={},
              nullable=False))

task_result_helper = sa.Table(
    "task_results", sa.MetaData(), sa.Column("created_at", sa.DateTime()),
    sa.Column("updated_at", sa.DateTime()),
    sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
    sa.Column("key", sa_types.MutableJSONEncodedDict(), nullable=False),
    sa.Column("data", sa_types.MutableJSONEncodedDict(), nullable=False),
    sa.Column("task_uuid", sa.String(length=36), nullable=True))

taghelper = sa.Table(
    "tags", sa.MetaData(), sa.Column("created_at", sa.DateTime()),
    sa.Column("updated_at", sa.DateTime()),
    sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
    sa.Column("status", sa.Enum(
        "aborted", "aborting", "cleaning up", "failed", "finished",
        "init", "paused", "running", "setting up", "soft_aborting",
        "verifying", name="enum_tasks_status"), nullable=False),
    sa.Column("verification_log", sa.Text(), nullable=True),
    sa.Column("tag", sa.String(length=64), nullable=True),
    sa.Column("deployment_uuid", sa.String(length=36), nullable=False),
    sa.Column("title", sa.String(length=64), default=""),
    sa.Column("description", sa.Text(), default=""),
    sa.Column("input_task", sa.Text(), default=""),
    sa.Column("validation_duration", sa.Float()),
    sa.Column("task_duration", sa.Float()),
    sa.Column("pass_sla", sa.Boolean()),
    sa.Column(
        "validation_result",
        sa_types.MutableJSONEncodedDict(),
        default={},
        nullable=False
    )
)

task_result_helper = sa.Table(
    "task_results",
    sa.MetaData(),
    sa.Column("created_at", sa.DateTime()),
    sa.Column("updated_at", sa.DateTime()),
    sa.Column("id", sa.Integer(), nullable=False, autoincrement=True),
    sa.Column(
        "key",
        sa_types.MutableJSONEncodedDict(),
        nullable=False),