コード例 #1
0
 def tearDown(self):
     with Connection(rq_redis_connection):
         for queue_name in default_queues:
             Queue(queue_name).empty()
コード例 #2
0
def enqueue_query(
    query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={}
):
    query_id = metadata.get("Query ID", "unknown")
    query_hash = gen_query_hash(query)
    get_logger().info("[query_id=%s] [query_hash=%s] Inserting job", query_id, query_hash)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                job_status = "UNKNOWN"
                job_complete = False
                job_cancelled = "False"

                try:
                    job = Job.fetch(job_id)
                    job_exists = True
                    job_status = job.get_status()
                    job_complete = job_status in [JobStatus.FINISHED, JobStatus.FAILED]
                    if job.is_cancelled:
                        job_cancelled = "True"
                except NoSuchJobError:
                    job_exists = False
                    job_status = "EXPIRED"

                get_logger().info("[query_id=%s] [query_hash=%s] Found existing job [job.id=%s] [job_status=%s] [job_cancelled=%s]", query_id, query_hash, job_id, job_status, job_cancelled)

                if job_complete or (not job_exists):
                    #get_logger().info("[query_id=%s] [query_hash=%s] [job.id=%s], removing redis lock", query_id, query_hash, job_id)
                    redis_connection.delete(_job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                if scheduled_query:
                    queue_name = data_source.scheduled_queue_name  #默认都是scheduled_queries
                    scheduled_query_id = scheduled_query.id
                else:
                    queue_name = data_source.queue_name #默认都是queries
                    scheduled_query_id = None

                time_limit = settings.dynamic_settings.query_time_limit(
                    scheduled_query, user_id, data_source.org_id
                )
                metadata["Queue"] = queue_name
                metadata["Enqueue Time"] = time.time()

                queue = Queue(queue_name)
                enqueue_kwargs = {
                    "user_id": user_id,
                    "scheduled_query_id": scheduled_query_id,
                    "is_api_key": is_api_key,
                    "job_timeout": time_limit,
                    "meta": {
                        "data_source_id": data_source.id,
                        "org_id": data_source.org_id,
                        "scheduled": scheduled_query_id is not None,
                        "query_id": query_id,
                        "user_id": user_id,
                    },
                }

                if not scheduled_query:
                    enqueue_kwargs["result_ttl"] = settings.JOB_EXPIRY_TIME

                job = queue.enqueue(
                    execute_query, query, data_source.id, metadata, **enqueue_kwargs
                )

                get_logger().info("[query_id=%s] [query_hash=%s] Created new job [job.id=%s]", query_id, query_hash, job.id)
                pipe.set(
                    _job_lock_id(query_hash, data_source.id),
                    job.id,
                    settings.JOB_EXPIRY_TIME,
                )
                pipe.execute()
            break

        except redis.WatchError:
            get_logger().error("[query_id=%s] [query_hash=%s] redis.WatchError, try_count = %d", query_id, query_hash, try_count)
            continue

    if not job:
        get_logger().error("[Manager] [query_id=%s] [query_hash=%s] Failed adding job for query.", query_id, query_hash)

    return job
コード例 #3
0
ファイル: execution.py プロジェクト: zzybaba/redash
def enqueue_query(query,
                  data_source,
                  user_id,
                  is_api_key=False,
                  scheduled_query=None,
                  metadata={}):
    query_hash = gen_query_hash(query)
    logger.info("Inserting job for %s with metadata=%s", query_hash, metadata)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                logger.info("[%s] Found existing job: %s", query_hash, job_id)
                job_complete = None

                try:
                    job = Job.fetch(job_id)
                    job_exists = True
                    status = job.get_status()
                    job_complete = status in [
                        JobStatus.FINISHED, JobStatus.FAILED
                    ]

                    if job_complete:
                        message = "job found is complete (%s)" % status
                except NoSuchJobError:
                    message = "job found has expired"
                    job_exists = False

                if job_complete or not job_exists:
                    logger.info("[%s] %s, removing lock", query_hash, message)
                    redis_connection.delete(
                        _job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                if scheduled_query:
                    queue_name = data_source.scheduled_queue_name
                    scheduled_query_id = scheduled_query.id
                else:
                    queue_name = data_source.queue_name
                    scheduled_query_id = None

                time_limit = settings.dynamic_settings.query_time_limit(
                    scheduled_query, user_id, data_source.org_id)
                metadata["Queue"] = queue_name

                queue = Queue(queue_name)
                enqueue_kwargs = {
                    "user_id": user_id,
                    "scheduled_query_id": scheduled_query_id,
                    "is_api_key": is_api_key,
                    "job_timeout": time_limit,
                    "meta": {
                        "data_source_id": data_source.id,
                        "org_id": data_source.org_id,
                        "scheduled": scheduled_query_id is not None,
                        "query_id": metadata.get("Query ID"),
                        "user_id": user_id,
                    },
                }

                if not scheduled_query:
                    enqueue_kwargs["result_ttl"] = settings.JOB_EXPIRY_TIME

                job = queue.enqueue(execute_query, query, data_source.id,
                                    metadata, **enqueue_kwargs)

                logger.info("[%s] Created new job: %s", query_hash, job.id)
                pipe.set(
                    _job_lock_id(query_hash, data_source.id),
                    job.id,
                    settings.JOB_EXPIRY_TIME,
                )
                pipe.execute()
            break

        except redis.WatchError:
            continue

    if not job:
        logger.error("[Manager][%s] Failed adding job for query.", query_hash)

    return job
コード例 #4
0
def enqueue_query(query,
                  data_source,
                  user_id,
                  is_api_key=False,
                  scheduled_query=None,
                  metadata={}):
    query_hash = gen_query_hash(query)
    logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
    try_count = 0
    job = None

    while try_count < 5:
        try_count += 1

        pipe = redis_connection.pipeline()
        try:
            pipe.watch(_job_lock_id(query_hash, data_source.id))
            job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
            if job_id:
                logging.info("[%s] Found existing job: %s", query_hash, job_id)

                job = Job.fetch(job_id)

                status = job.get_status()
                if status in [JobStatus.FINISHED, JobStatus.FAILED]:
                    logging.info(
                        "[%s] job found is ready (%s), removing lock",
                        query_hash,
                        status,
                    )
                    redis_connection.delete(
                        _job_lock_id(query_hash, data_source.id))
                    job = None

            if not job:
                pipe.multi()

                if scheduled_query:
                    queue_name = data_source.scheduled_queue_name
                    scheduled_query_id = scheduled_query.id
                else:
                    queue_name = data_source.queue_name
                    scheduled_query_id = None

                time_limit = settings.dynamic_settings.query_time_limit(
                    scheduled_query, user_id, data_source.org_id)
                metadata["Queue"] = queue_name

                queue = Queue(queue_name)
                job = queue.enqueue(
                    execute_query,
                    query,
                    data_source.id,
                    metadata,
                    user_id=user_id,
                    scheduled_query_id=scheduled_query_id,
                    is_api_key=is_api_key,
                    job_timeout=time_limit,
                )

                logging.info("[%s] Created new job: %s", query_hash, job.id)
                pipe.set(
                    _job_lock_id(query_hash, data_source.id),
                    job.id,
                    settings.JOB_EXPIRY_TIME,
                )
                pipe.execute()
            break

        except redis.WatchError:
            continue

    if not job:
        logging.error("[Manager][%s] Failed adding job for query.", query_hash)

    return job