예제 #1
0
    def _execute(self, params):
        generated_sql, query_params = self._generate_sql(params)

        row = self._db_query("""
            SELECT
                id,
                created,
                spec,
                IFNULL(tasks_total, 0)                                      AS tasks_total,
                IFNULL(tasks_queued, 0)                                     AS tasks_queued,
                -- Tasks that are cancelled count as finished also.
                -- It is always true that if one is null, all are null
                IFNULL(tasks_total - tasks_queued - tasks_finished, 0)      AS tasks_running,
                IFNULL(tasks_finished, 0)                                   AS tasks_finished,
                IFNULL(tasks_cancelled, 0)                                  AS tasks_cancelled,
                IFNULL(tasks_succeeded, 0)                                  AS tasks_succeeded,
                IFNULL(tasks_errored, 0)                                    AS tasks_errored,
                %(state_projection)s                                        AS state
            FROM
                jobs
                LEFT JOIN(
                    SELECT
                        tasks.job_id,
                        -- counts
                        COUNT(tasks.id)                                     AS tasks_total,
                        CAST(SUM(%(success_cond)s) AS SIGNED)               AS tasks_succeeded,
                        CAST(SUM(%(error_cond)s) AS SIGNED)                 AS tasks_errored,
                        CAST(SUM(%(cancelled_cond)s) AS SIGNED)             AS tasks_cancelled,
                        CAST(SUM(%(finished_cond)s) AS SIGNED)              AS tasks_finished,
                        CAST(SUM(%(queued_cond)s) AS SIGNED)                AS tasks_queued
                    FROM tasks
                    WHERE %(task_job_id_predicate)s
                    GROUP BY tasks.job_id
                ) AS job_tasks ON job_tasks.job_id = jobs.id
            WHERE %(job_id_predicate)s
            LIMIT 2
        """ % generated_sql, **query_params)

        if len(row) == 0:
            raise exceptions.ApiException('No job found with id `%s`' % params['job_id'])
        elif len(row) > 1:
            raise exceptions.ApiException('More than one job matches id `%s`, try using a more specific prefix' % params['job_id'])

        row = row[0]

        return shared.job_load_row(row)
예제 #2
0
    def _execute(self, params):
        generated_sql, query_params = self._generate_sql(params)

        rows = self._db_query("""
            SELECT
                id,
                created,
                last_contact,
                spec,
                IFNULL(tasks_total, 0)                                          AS tasks_total,
                IFNULL(tasks_cancelled, 0)                                      AS tasks_cancelled,
                IFNULL(tasks_errored, 0)                                        AS tasks_errored,
                IFNULL(tasks_queued, 0)                                         AS tasks_queued,
                -- Tasks that are cancelled count as finished also
                -- It is always true that if one is null, all are null
                IFNULL(tasks_total - tasks_queued - tasks_finished, 0)          AS tasks_running,
                IFNULL(tasks_finished, 0)                                       AS tasks_finished,
                %(state_projection)s                                            AS state,
                bytes_total,
                bytes_downloaded,
                download_rate,
                first_task_start
            FROM
                jobs
                LEFT JOIN(
                    SELECT
                        tasks.job_id,

                        MIN(tasks.started)                                      AS first_task_start,
                        MAX(tasks.last_contact)                                 AS last_contact,

                        -- counts
                        -- These casts are necessary because MemSQL makes arbitrary choices
                        COUNT(tasks.id)                                         AS tasks_total,
                        CAST(SUM(%(cancelled_cond)s) AS SIGNED)                 AS tasks_cancelled,
                        CAST(SUM(%(error_cond)s) AS SIGNED)                     AS tasks_errored,
                        CAST(SUM(%(finished_cond)s) AS SIGNED)                  AS tasks_finished,
                        CAST(SUM(%(queued_cond)s) AS SIGNED)                    AS tasks_queued,

                        -- download information
                        -- CAST because JSON number types are always floats
                        CAST(SUM(tasks.bytes_total) AS SIGNED)        AS bytes_total,
                        CAST(SUM(tasks.bytes_downloaded) AS SIGNED)   AS bytes_downloaded,
                        CAST(SUM(tasks.download_rate) AS SIGNED)      AS download_rate
                    FROM tasks
                    GROUP BY tasks.job_id
                ) AS job_tasks ON job_tasks.job_id = jobs.id
            %(where_expr)s
            ORDER BY %(order_by)s %(order)s
            %(paging)s
        """ % generated_sql, **query_params)

        # calculate time_left for each job
        for row in rows:
            time_left = -1
            no_nulls = None not in (row.last_contact, row.first_task_start, row.bytes_downloaded)
            if row.state == shared.JobState.RUNNING and no_nulls and row.bytes_downloaded != 0:
                last_contact = dateutil.parser.parse(row.last_contact)
                first_task_start = dateutil.parser.parse(row.first_task_start)
                time_since_start = last_contact - first_task_start
                overall_download_rate = row.bytes_downloaded / max(time_since_start.total_seconds(), 1)
                bytes_remaining = row.bytes_total - row.bytes_downloaded
                if overall_download_rate > 0:
                    time_left = bytes_remaining / overall_download_rate

            row['time_left'] = time_left

        return [ shared.job_load_row(row) for row in rows ]