Esempio n. 1
0
class ServerModel(generic.Model):
    """This is a model for the server.

    Server is a model, which presents physical servers in Decapod.
    Servers are grouped into clusters. Please remember, that
    it is forbidden to create the model using API, it has to
    be created using Ansible playbook invocation.
    """

    MODEL_NAME = "server"
    COLLECTION_NAME = "server"
    DEFAULT_SORT_BY = [("name", generic.SORT_ASC)]

    def __init__(self):
        super().__init__()

        self.name = None
        self.username = None
        self.fqdn = None
        self.ip = None
        self._state = None
        self.cluster_id = None
        self._cluster = None
        self.facts = {}
        self.lock = None

    _cluster = properties.ModelProperty(
        "decapod_common.models.cluster.ClusterModel",
        "cluster_id"
    )

    state = properties.ChoicesProperty("_state", ServerState)

    @classmethod
    def create(cls, server_id, name, username, fqdn, ip, facts=None,
               initiator_id=None):
        model = cls.find_by_model_id(server_id)
        changed = False
        facts = facts or {}

        if not model:
            changed = True
            model = cls()
            model.model_id = server_id
            model.name = name
            model.initiator_id = initiator_id
            model.state = ServerState.operational
            model.lock = None

        if model.username != username:
            model.username = username
            changed = True
        if model.fqdn != fqdn:
            model.fqdn = fqdn
            changed = True
        if model.ip != ip:
            model.ip = ip
            changed = True
        if model.facts != facts:
            model.facts = facts
            changed = True
        if model.time_deleted:
            LOG.info(
                "Server %s was previously deleted at %s (UNIX %s). Undelete.",
                model.model_id, time.ctime(model.time_deleted),
                model.time_deleted)
            model.time_deleted = 0
            changed = True

        if changed:
            model.save()
            if model.cluster_id:
                model.cluster.update_servers([model])
                model.cluster.save()

        return model

    @classmethod
    def find_by_ip(cls, ips):
        servers = []
        query = {
            "ip": {"$in": ips},
            "time_deleted": 0,
            "is_latest": True
        }

        for srv in cls.collection().find(query):
            model = cls()
            model.update_from_db_document(srv)
            servers.append(model)

        return servers

    @classmethod
    def get_model_id_version(cls, server_ids):
        cursor = cls.collection().find(
            {"_id": {"$in": list(server_ids)}},
            ["_id", "fqdn", "ip", "name", "model_id", "version"]
        )
        return {item["_id"]: item for item in cursor}

    @classmethod
    def get_model_server_ids(cls, server_ids):
        """Returns a list of all related server IDs for a set."""

        model_ids = cls.get_model_id_version(server_ids)
        model_ids = {v["model_id"]: k for k, v in model_ids.items()}
        cursor = cls.collection().find(
            {"model_id": {"$in": list(model_ids.keys())}},
            ["_id", "model_id"]
        )

        result = {}
        for item in cursor:
            result.setdefault(item["model_id"], []).append(item["_id"])

        return result

    @classmethod
    def cluster_servers(cls, cluster_id):
        query = {
            "cluster_id": cluster_id,
            "is_latest": True,
            "time_deleted": 0
        }

        servers = []
        for srv in cls.list_raw(query):
            model = cls()
            model.update_from_db_document(srv)
            servers.append(model)

        return servers

    @classmethod
    def lock_servers(cls, servers):
        if not servers:
            return

        server_ids = [srv._id for srv in servers]
        lock = str(uuid.uuid4())
        update_method = retryutils.mongo_retry()(cls.collection().update_many)
        result = update_method(
            {"_id": {"$in": server_ids}, "lock": None},
            {"$set": {"lock": lock}}
        )
        if result.modified_count == len(server_ids):
            return

        if result.modified_count:
            update_method(
                {"_id": {"$in": server_ids}, "lock": lock},
                {"$set": {"lock": None}}
            )
        raise exceptions.CannotLockServers()

    @classmethod
    def unlock_servers(cls, servers):
        if not servers:
            return

        server_ids = [srv._id for srv in servers]
        update_method = retryutils.mongo_retry()(cls.collection().update_many)

        update_method(
            {"_id": {"$in": server_ids}, "lock": {"$ne": None}},
            {"$set": {"lock": None}}
        )

    @property
    def locked(self):
        return self.lock is not None

    @property
    def cluster(self):
        return self._cluster

    @cluster.setter
    def cluster(self, value):
        old_cluster_id = self.cluster_id
        self._cluster = value

        if old_cluster_id is not None and self.cluster_id is not None:
            if self.cluster_id != old_cluster_id:
                self._cluster = old_cluster_id
                raise ValueError(
                    "Already defined cluster {0}. "
                    "Set to None first".format(self.cluster_id))

    @classmethod
    def ensure_index(cls):
        super().ensure_index()

        collection = cls.collection()
        for fieldname in "name", "fqdn", "ip", "state", "cluster_id":
            collection.create_index(
                [
                    (fieldname, generic.SORT_ASC),
                ],
                name="index_{0}".format(fieldname)
            )

    def check_constraints(self):
        super().check_constraints()

        query = {
            "time_deleted": 0,
            "$or": [
                {"name": self.name},
                {"fqdn": self.fqdn},
                {"ip": self.ip},
            ]
        }
        if self.model_id:
            query["model_id"] = {"$ne": self.model_id}

        if self.collection().find_one(query):
            raise exceptions.UniqueConstraintViolationError()

    def update_from_db_document(self, structure):
        super().update_from_db_document(structure)

        self.name = structure["name"]
        self.username = structure["username"]
        self.fqdn = structure["fqdn"]
        self.ip = structure["ip"]
        self.state = ServerState[structure["state"]]
        self.initiator_id = structure["initiator_id"]
        self.cluster = structure["cluster_id"]
        self.facts = generic.dot_unescape(structure["facts"])
        self.lock = structure["lock"]

    def delete(self):
        if self.cluster_id:
            raise exceptions.CannotDeleteServerInCluster(
                "Server {0.model_id} still belongs to cluster "
                "{0.cluster_id}".format(self))
        if self.lock:
            raise exceptions.CannotDeleteLockedServer(
                "Server {0.model_id} still locked by ongoing operation".format(
                    self))

        super().delete()

    def make_db_document_specific_fields(self):
        return {
            "name": self.name,
            "username": self.username,
            "fqdn": self.fqdn,
            "ip": self.ip,
            "state": self.state.name,
            "initiator_id": self.initiator_id,
            "cluster_id": self.cluster_id,
            "facts": generic.dot_escape(self.facts),
            "lock": self.lock
        }

    def make_api_specific_fields(self, expand_facts=True):
        facts = self.facts if expand_facts else {}

        return {
            "name": self.name,
            "username": self.username,
            "fqdn": self.fqdn,
            "ip": self.ip,
            "state": self.state.name,
            "cluster_id": self.cluster_id,
            "facts": facts
        }
Esempio n. 2
0
class PlaybookConfigurationModel(generic.Model):
    """This is a model for a Playbook configuration."""

    MODEL_NAME = "playbook_configuration"
    COLLECTION_NAME = "playbook_configuration"
    DEFAULT_SORT_BY = [("time_created", generic.SORT_DESC)]

    def __init__(self):
        super().__init__()

        self.name = None
        self._playbook_id = None
        self.cluster = None
        self.configuration = {}

    playbook_id = properties.ChoicesProperty(
        "_playbook_id", plugins.get_public_playbook_plugins)

    cluster = properties.ModelProperty(
        "decapod_common.models.cluster.ClusterModel", "cluster_id")

    @classmethod
    def create(cls,
               name,
               playbook_id,
               cluster,
               servers,
               hints=None,
               initiator_id=None):
        hints = hints or []

        model = cls()
        model.name = name
        model.playbook_id = playbook_id
        model.cluster = cluster
        model.configuration = model.make_configuration(cluster, servers, hints)
        model.initiator_id = initiator_id
        model.save()

        return model

    @property
    def servers(self):
        ips = set()
        config = copy.deepcopy(self.configuration.get("inventory", {}))

        config.pop("_meta", None)
        for group in config.values():
            if isinstance(group, dict):
                ips.update(group.get("hosts", []))
            else:
                ips.update(group)

        return server.ServerModel.find_by_ip(list(ips))

    def make_configuration(self, cluster, servers, hints):
        plug = plugins.get_public_playbook_plugins()
        plug = plug[self.playbook_id]()
        configuration = plug.build_playbook_configuration(
            cluster, servers, hints)

        return configuration

    def update_from_db_document(self, structure):
        super().update_from_db_document(structure)

        self.name = structure["name"]
        self.playbook_id = structure["playbook_id"]
        self.configuration = generic.dot_unescape(structure["configuration"])
        self.cluster = structure["cluster_id"]

    def make_db_document_specific_fields(self):
        return {
            "name": self.name,
            "initiator_id": self.initiator_id,
            "playbook_id": self.playbook_id,
            "cluster_id": self.cluster_id,
            "configuration": generic.dot_escape(self.configuration)
        }

    def make_api_specific_fields(self):
        return {
            "name": self.name,
            "playbook_id": self.playbook_id,
            "cluster_id": self.cluster_id,
            "configuration": self.configuration
        }

    def delete(self):
        if not self.possible_to_delete():
            raise exceptions.CannotDeleteLockedPlaybookConfiguration(self)

        return super().delete()

    def possible_to_delete(self):
        document = self.collection().find_one({
            "model_id": self.model_id,
            "locked": True
        })
        return not bool(document)
Esempio n. 3
0
class Task(generic.Base):
    """This is a class for basic task.

    Similar to model, Task has some common set of fields (including
    its type) and data payload, different for every type of task.
    """

    COLLECTION_NAME = "task"

    @staticmethod
    def new_update_marker():
        """Generates new marker for model updates using CAS.

        Basically such marker is required because UNIX timestamp is not
        good enough for compare-and-swap (to discreet).
        """

        return str(uuid.uuid4())

    @classmethod
    def make_task(cls, db_document):
        if not db_document:
            return None

        if db_document["task_type"] == TaskType.server_discovery.name:
            model = ServerDiscoveryTask("", "", "", "")
        elif db_document["task_type"] == TaskType.playbook.name:
            model = PlaybookPluginTask("", "", "")
        elif db_document["task_type"] == TaskType.cancel.name:
            model = CancelPlaybookPluginTask("")
        else:
            raise ValueError("Unknown task type {0}".format(
                db_document["task_type"]))

        model.set_state(db_document)

        return model

    @classmethod
    def get_by_execution_id(cls, execution_id, task_type):
        """Returns a task model by execution ID and task type."""

        if hasattr(task_type, "name"):
            task_type = task_type.name

        query = {"execution_id": execution_id, "task_type": task_type}
        document = cls.collection().find_one(query)

        return cls.make_task(document)

    @classmethod
    def find_by_id(cls, task_id):
        task_id = bson.objectid.ObjectId(task_id)
        document = cls.collection().find_one({"_id": task_id})

        return cls.make_task(document)

    def __init__(self, task_type, execution_id):
        self._id = None
        self.task_type = task_type
        self.time_started = 0
        self.time_created = 0
        self.time_completed = 0
        self.time_cancelled = 0
        self.time_updated = 0
        self.time_failed = 0
        self.time_bounced = 0
        self.bounced = 0
        self.execution_id = execution_id
        self.update_marker = ""
        self.executor_host = ""
        self.executor_pid = 0
        self.error = ""
        self.data = {}

    task_type = properties.ChoicesProperty("_task_type", TaskType)

    def __str__(self):
        return "{0} (execution_id: {1})".format(self._id, self.execution_id)

    @property
    def id(self):
        return self._id

    @property
    def default_ttl(self):
        return CONF["cron"]["clean_finished_tasks_after_seconds"]

    def new_time_bounce(self):
        left_bound = timeutils.current_unix_timestamp() + BOUNCE_TIMEOUT
        right_bound = left_bound + self.bounced * BOUNCE_TIMEOUT
        bounce_time = random.triangular(left_bound, right_bound)
        bounce_time = int(bounce_time)

        return bounce_time

    def _update(self, query, setfields, exc):
        """Updates task in place.

        Tasks are not versioned so it is possible to update in place
        query is a filter for elements to search, setfields is a
        dictionary for $set ({"$set": setfields}), exc is an exception
        to raise if no suitable documents for update are found.
        """

        document = self._cas_update(query, setfields)
        if not document:
            raise exc()

        self.set_state(document)

        return self

    def _cas_update(self, query, setfields):
        """Does CAS update of the task."""

        query = copy.deepcopy(query)
        setfields = copy.deepcopy(setfields)

        query["_id"] = self._id
        query["time.completed"] = 0
        query["time.cancelled"] = 0
        query["time.failed"] = 0
        query["update_marker"] = self.update_marker

        setfields["update_marker"] = self.new_update_marker()
        setfields["time.updated"] = timeutils.current_unix_timestamp()

        method = self.collection().find_one_and_update
        method = retryutils.mongo_retry()(method)

        return method(
            query, {"$set": setfields},
            return_document=pymongo.ReturnDocument.AFTER
        )

    def get_execution(self):
        from decapod_common.models import execution

        return execution.ExecutionModel.find_by_model_id(self.execution_id)

    def get_state(self):
        """Extracts DB state from the model."""

        template = copy.deepcopy(TASK_TEMPLATE)

        template["_id"] = self._id
        template["task_type"] = self.task_type.name
        template["execution_id"] = self.execution_id
        template["time"]["created"] = self.time_created
        template["time"]["started"] = self.time_started
        template["time"]["completed"] = self.time_completed
        template["time"]["cancelled"] = self.time_cancelled
        template["time"]["updated"] = self.time_updated
        template["time"]["failed"] = self.time_failed
        template["time"]["bounced"] = self.time_bounced
        template["executor"]["host"] = self.executor_host
        template["executor"]["pid"] = self.executor_pid
        template["update_marker"] = self.update_marker
        template["bounced"] = self.bounced
        template["error"] = self.error
        template["data"] = copy.deepcopy(self.data)

        return template

    def set_state(self, state):
        """Sets DB state to model updating it in place."""

        self._id = state["_id"]
        self.task_type = TaskType[state["task_type"]]
        self.execution_id = state["execution_id"]
        self.time_created = state["time"]["created"]
        self.time_started = state["time"]["started"]
        self.time_completed = state["time"]["completed"]
        self.time_cancelled = state["time"]["cancelled"]
        self.time_updated = state["time"]["updated"]
        self.time_failed = state["time"]["failed"]
        self.time_bounced = state["time"]["bounced"]
        self.executor_host = state["executor"]["host"]
        self.executor_pid = state["executor"]["pid"]
        self.update_marker = state["update_marker"]
        self.bounced = state["bounced"]
        self.error = state["error"]
        self.data = copy.deepcopy(state["data"])

    def create(self):
        """Creates model in database."""

        state = self.get_state()

        state.pop("_id", None)
        state["time"]["created"] = timeutils.current_unix_timestamp()
        state["time"]["updated"] = state["time"]["created"]
        state["update_marker"] = self.new_update_marker()

        collection = self.collection()
        insert_method = retryutils.mongo_retry()(collection.insert_one)
        find_method = retryutils.mongo_retry()(collection.find_one)

        try:
            document = insert_method(state)
        except pymongo.errors.DuplicateKeyError as exc:
            raise exceptions.UniqueConstraintViolationError from exc

        document = find_method({"_id": document.inserted_id})
        self.set_state(document)

        return self

    def bounce(self):
        """Bounce task.

        This sets internal timer when task can be fetched next time."""

        query = {
            "time.failed": 0,
            "time.completed": 0,
            "time.cancelled": 0,
            "time.started": 0
        }
        setfields = {
            "time.bounced": self.new_time_bounce(),
            "bounced": self.bounced + 1
        }

        return self._update(query, setfields, exceptions.CannotBounceTaskError)

    def start(self):
        """Starts task execution."""

        query = {
            "time.failed": 0,
            "time.completed": 0,
            "time.cancelled": 0,
            "time.started": 0
        }
        setfields = {"time.started": timeutils.current_unix_timestamp()}

        return self._update(query, setfields,
                            exceptions.CannotStartTaskError)

    def cancel(self):
        """Cancels task execution."""

        query = {
            "time.failed": 0,
            "time.completed": 0,
            "time.cancelled": 0,
        }
        setfields = {
            "time.cancelled": timeutils.current_unix_timestamp(),
            TTL_FIELDNAME: timeutils.ttl(self.default_ttl)
        }

        return self._update(query, setfields,
                            exceptions.CannotCancelTaskError)

    def complete(self):
        """Completes task execution."""

        query = {
            "time.failed": 0,
            "time.completed": 0,
            "time.cancelled": 0,
            "time.started": {"$ne": 0}
        }
        setfields = {
            "time.completed": timeutils.current_unix_timestamp(),
            TTL_FIELDNAME: timeutils.ttl(self.default_ttl)
        }

        return self._update(query, setfields,
                            exceptions.CannotCompleteTaskError)

    def fail(self, error_message="Internal error"):
        """Fails task execution."""

        query = {
            "time.failed": 0,
            "time.completed": 0,
            "time.cancelled": 0,
            "time.started": {"$ne": 0}
        }
        setfields = {
            "time.failed": timeutils.current_unix_timestamp(),
            "error": error_message,
            TTL_FIELDNAME: timeutils.ttl(self.default_ttl)
        }

        return self._update(query, setfields, exceptions.CannotFailTask)

    def set_executor_data(self, host, pid):
        """Sets executor data to the task."""

        query = {
            "executor.host": "",
            "executor.pid": 0,
            "time.started": {"$ne": 0},
            "time.completed": 0,
            "time.cancelled": 0,
            "time.failed": 0,
            "executor.host": "",
            "executor.pid": 0
        }
        setfields = {
            "executor.host": host,
            "executor.pid": pid
        }

        return self._update(query, setfields,
                            exceptions.CannotSetExecutorError)

    def refresh(self):
        document = self.collection().find_one({"_id": self._id})
        self.set_state(document)

        return self

    @classmethod
    def ensure_index(cls):
        collection = cls.collection()
        collection.create_index(
            [
                ("execution_id", generic.SORT_ASC),
                ("task_type", generic.SORT_ASC)
            ],
            name="index_execution_id",
            unique=True
        )
        collection.create_index(
            [
                ("time.created", generic.SORT_ASC),
                ("time.started", generic.SORT_ASC),
                ("time.completed", generic.SORT_ASC),
                ("time.cancelled", generic.SORT_ASC),
                ("time.failed", generic.SORT_ASC)
            ],
            name="index_time"
        )
        collection.create_index(
            TTL_FIELDNAME,
            expireAfterSeconds=0,
            name="index_task_ttl"
        )

    @classmethod
    def watch(cls, stop_condition=None, exit_on_empty=False):
        """Watch for a new tasks appear in queue.

        It is a generator, which yields tasks in correct order to be managed.
        It looks like an ideal usecase for MongoDB capped collections and
        tailable cursors, but in fact, due to limitations (not possible
        to change size of document -> cannot set error message etc) it is
        a way easier to maintain classic collections.
        """

        query = {
            "time.started": 0,
            "time.completed": 0,
            "time.cancelled": 0,
            "time.failed": 0,
            "time.bounced": {"$lte": 0}
        }
        sortby = [
            ("bounced", generic.SORT_DESC),
            ("time.bounced", generic.SORT_ASC),
            ("time.created", generic.SORT_ASC)
        ]
        collection = cls.collection()
        stop_condition = stop_condition or threading.Event()

        find_method = retryutils.mongo_retry()(collection.find_one)

        try:
            while not stop_condition.is_set():
                fetched_at = timeutils.current_unix_timestamp()
                query["time.bounced"]["$lte"] = fetched_at
                document = find_method(query, sort=sortby)

                if stop_condition.is_set():
                    raise StopIteration()
                if document:
                    yield cls.make_task(document)
                elif exit_on_empty:
                    raise StopIteration()

                watch_again = timeutils.current_unix_timestamp()
                if fetched_at == watch_again:
                    stop_condition.wait(1)
        except pymongo.errors.OperationFailure as exc:
            LOG.exception("Cannot continue to listen to queue: %s", exc)
            raise exceptions.InternalDBError() from exc
Esempio n. 4
0
class ExecutionModel(generic.Model):
    """This is a model of Execution."""

    MODEL_NAME = "execution"
    COLLECTION_NAME = "execution"
    DEFAULT_SORT_BY = [("time_created", generic.SORT_DESC)]

    @classmethod
    def log_storage(cls):
        return ExecutionLogStorage(cls.database())

    def __init__(self):
        super().__init__()

        self.playbook_configuration_model_id = None
        self.playbook_configuration_version = None
        self._playbook_configuration = None
        self.state = ExecutionState.created

    @property
    def logfile(self):
        return self.log_storage().get(self.model_id)

    @property
    def new_logfile(self):
        storage = self.log_storage()
        storage.delete(self.model_id)

        return storage.new_file(
            self.model_id,
            filename="{0}.log".format(self.model_id),
            content_type="text/plain"
        )

    state = properties.ChoicesProperty("_state", ExecutionState)

    @property
    def playbook_configuration(self):
        if self._playbook_configuration:
            return self._playbook_configuration

        model = playbook_configuration.PlaybookConfigurationModel
        model = model.find_version(
            self.playbook_configuration_model_id,
            self.playbook_configuration_version
        )
        self._playbook_configuration = model

        return model

    @property
    def servers(self):
        if not self.playbook_configuration:
            return []

        return self.playbook_configuration.servers

    @playbook_configuration.setter
    def playbook_configuration(self, value):
        self._playbook_configuration = None

        self.playbook_configuration_model_id = value.model_id
        self.playbook_configuration_version = value.version

    @classmethod
    def create(cls, playbook_config, initiator_id=None):
        model = cls()
        model.playbook_configuration = playbook_config
        model.initiator_id = initiator_id
        model.save()

        return model

    def update_from_db_document(self, structure):
        super().update_from_db_document(structure)

        self.playbook_configuration_model_id = structure["pc_model_id"]
        self.playbook_configuration_version = structure["pc_version"]
        self.state = ExecutionState[structure["state"]]

    def make_db_document_specific_fields(self):
        return {
            "pc_model_id": self.playbook_configuration_model_id,
            "pc_version": self.playbook_configuration_version,
            "state": self.state.name
        }

    def make_api_specific_fields(self):
        return {
            "playbook_configuration": {
                "id": self.playbook_configuration_model_id,
                "version": self.playbook_configuration_version,
                "playbook_name": self.playbook_configuration.playbook_id
            },
            "state": self.state.name
        }
class ExecutionStep(generic.Base):
    """This is a class for Execution step.

    This is the most lightweight model because it has to be
    the most performant model to create.
    """

    MODEL_NAME = "execution_step"
    COLLECTION_NAME = "execution_step"
    DEFAULT_SORT_BY = [
        ("time_finished", generic.SORT_DESC),
        ("time_started", generic.SORT_DESC)
    ]

    def __init__(self):
        self._id = None
        self.execution_id = ""
        self.role = ""
        self.name = ""
        self.result = ExecutionStepState.unknown
        self.error = {}
        self.server_id = ""
        self.time_started = 0
        self.time_finished = 0

    result = properties.ChoicesProperty("_result", ExecutionStepState)

    def update_from_db_document(self, value):
        """Sets DB state to model, updating it in place."""

        self._id = value["_id"]
        self.execution_id = value["execution_id"]
        self.role = value["role"]
        self.name = value["name"]
        self.result = ExecutionStepState(value["result"])
        self.error = value["error"]
        self.server_id = value["server_id"]
        self.time_started = value["time_started"]
        self.time_finished = value["time_finished"]

    def make_api_structure(self):
        return {
            "id": str(self._id),
            "model": self.MODEL_NAME,
            "time_updated": max(self.time_started, self.time_finished),
            "time_deleted": 0,
            "version": 1,
            "initiator_id": self.execution_id,
            "data": {
                "execution_id": self.execution_id,
                "role": self.role,
                "name": self.name,
                "error": self.error,
                "server_id": self.server_id,
                "time_started": self.time_started,
                "time_finished": self.time_finished,
                "result": self.result.name
            }
        }

    @classmethod
    def find_by_id(cls, task_id):
        document = cls.collection().find_one({"_id": task_id})
        if not document:
            return None

        model = cls()
        model.update_from_db_document(document)

        return model

    @classmethod
    def list_models(cls, execution_id, pagination):
        query = {}
        query.update(pagination["filter"])
        query["execution_id"] = execution_id

        if pagination["sort_by"]:
            sort_by = pagination["sort_by"]
        else:
            sort_by = cls.DEFAULT_SORT_BY

        result = cls.collection().find(query, sort=sort_by)
        result = wrappers.PaginationResult(
            cls, result, pagination
        )

        return result

    @classmethod
    def ensure_index(cls, *args, **kwargs):
        collection = cls.collection()
        collection.create_index(
            [
                ("execution_id", generic.SORT_ASC),
                ("time_finished", generic.SORT_DESC),
                ("time_started", generic.SORT_DESC)
            ],
            name="index_for_listing"
        )