示例#1
0
    def test_worker_registration(self):
        """Ensure worker.key is correctly set in Redis."""
        foo_queue = Queue(name='foo')
        bar_queue = Queue(name='bar')
        worker = Worker([foo_queue, bar_queue])

        register(worker)
        redis = worker.connection

        self.assertTrue(redis.sismember(worker.redis_workers_keys, worker.key))
        self.assertEqual(Worker.count(connection=redis), 1)
        self.assertTrue(
            redis.sismember(WORKERS_BY_QUEUE_KEY % foo_queue.name, worker.key)
        )
        self.assertEqual(Worker.count(queue=foo_queue), 1)
        self.assertTrue(
            redis.sismember(WORKERS_BY_QUEUE_KEY % bar_queue.name, worker.key)
        )
        self.assertEqual(Worker.count(queue=bar_queue), 1)

        unregister(worker)
        self.assertFalse(redis.sismember(worker.redis_workers_keys, worker.key))
        self.assertFalse(
            redis.sismember(WORKERS_BY_QUEUE_KEY % foo_queue.name, worker.key)
        )
        self.assertFalse(
            redis.sismember(WORKERS_BY_QUEUE_KEY % bar_queue.name, worker.key)
        )
示例#2
0
    def post(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            return HttpResponseForbidden()

        job_class = get_job(class_path)
        if job_class is None:
            raise Http404
        job = job_class()
        grouping, module, class_name = class_path.split("/", 2)
        form = job.as_form(request.POST, request.FILES)

        # Allow execution only if RQ worker process is running
        if not Worker.count(get_connection("default")):
            messages.error(
                request, "Unable to run job: RQ worker process not running.")

        elif form.is_valid():
            # Run the job. A new JobResult is created.
            commit = form.cleaned_data.pop("_commit")

            job_content_type = ContentType.objects.get(app_label="extras",
                                                       model="job")
            job_result = JobResult.enqueue_job(
                run_job,
                job.class_path,
                job_content_type,
                request.user,
                data=form.cleaned_data,
                request=copy_safe_request(request),
                commit=commit,
            )

            return redirect("extras:job_jobresult", pk=job_result.pk)

        return render(
            request,
            "extras/job.html",
            {
                "grouping": grouping,
                "module": module,
                "job": job,
                "form": form,
            },
        )
示例#3
0
    def run(self, request, class_path):
        if not request.user.has_perm("extras.run_job"):
            raise PermissionDenied(
                "This user does not have permission to run jobs.")

        # Check that at least one RQ worker is running
        if not Worker.count(get_connection("default")):
            raise RQWorkerNotRunningException()

        job_class = self._get_job_class(class_path)
        job = job_class()
        input_serializer = serializers.JobInputSerializer(data=request.data)

        if not input_serializer.is_valid():
            return Response(input_serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)

        data = input_serializer.data["data"]
        commit = input_serializer.data["commit"]
        if commit is None:
            commit = getattr(job_class.Meta, "commit_default", True)

        job_content_type = ContentType.objects.get(app_label="extras",
                                                   model="job")

        job_result = JobResult.enqueue_job(
            run_job,
            job.class_path,
            job_content_type,
            request.user,
            data=data,
            request=copy_safe_request(request),
            commit=commit,
        )
        job.result = job_result

        serializer = serializers.JobDetailSerializer(
            job, context={"request": request})

        return Response(serializer.data)
示例#4
0
    def post(self, request, module, name):

        # Permissions check
        if not request.user.has_perm('extras.run_script'):
            return HttpResponseForbidden()

        script = self._get_script(name, module)
        form = script.as_form(request.POST, request.FILES)

        # Allow execution only if RQ worker process is running
        if not Worker.count(get_connection('default')):
            messages.error(
                request,
                "Unable to run script: RQ worker process not running.")

        elif form.is_valid():
            commit = form.cleaned_data.pop('_commit')

            script_content_type = ContentType.objects.get(app_label='extras',
                                                          model='script')
            job_result = JobResult.enqueue_job(
                run_script,
                script.full_name,
                script_content_type,
                request.user,
                data=form.cleaned_data,
                request=copy_safe_request(request),
                commit=commit)

            return redirect('extras:script_result',
                            job_result_pk=job_result.pk)

        return render(request, 'extras/script.html', {
            'module': module,
            'script': script,
            'form': form,
        })
示例#5
0
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils.timezone import make_aware
from django_rq.queues import get_connection
from rest_framework import status
from rq import Worker

from dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Rack, RackGroup, RackRole, Site
from extras.api.views import ReportViewSet, ScriptViewSet
from extras.models import ConfigContext, ExportTemplate, Graph, ImageAttachment, Tag
from extras.reports import Report
from extras.scripts import BooleanVar, IntegerVar, Script, StringVar
from utilities.testing import APITestCase, APIViewTestCases

rq_worker_running = Worker.count(get_connection('default'))


class AppTest(APITestCase):
    def test_root(self):

        url = reverse('extras-api:api-root')
        response = self.client.get('{}?format=api'.format(url), **self.header)

        self.assertEqual(response.status_code, 200)


class GraphTest(APIViewTestCases.APIViewTestCase):
    model = Graph
    brief_fields = ['id', 'name', 'url']
    create_data = [
示例#6
0
    CustomLink,
    ExportTemplate,
    GitRepository,
    ImageAttachment,
    JobResult,
    Relationship,
    RelationshipAssociation,
    Status,
    Tag,
    Webhook,
)
from nautobot.extras.jobs import Job, BooleanVar, IntegerVar, StringVar
from nautobot.utilities.testing import APITestCase, APIViewTestCases
from nautobot.utilities.testing.utils import disable_warnings

rq_worker_running = Worker.count(get_connection("default"))

THIS_DIRECTORY = os.path.dirname(__file__)


class AppTest(APITestCase):
    def test_root(self):

        url = reverse("extras-api:api-root")
        response = self.client.get("{}?format=api".format(url), **self.header)

        self.assertEqual(response.status_code, 200)


class CustomFieldTest(APIViewTestCases.APIViewTestCase):
    model = CustomField
示例#7
0
    def count_workers(self):
        """Count how many workers are active

        (Note: this assumes that all workers process the first (high-priority) queue.)
        """
        return Worker.count(queue=self.queues[0])
示例#8
0
from redis import Redis
from rq import Queue, Worker

redis = Redis()

# 获取worker的信息 rq
workers = Worker.all(connection=redis)
# print(workers)
for worker in workers:
    print(worker.successful_job_count)
    print(worker.failed_job_count)
    print(worker.total_working_time)

# worker 的总数
worker = Worker.count(connection=redis)
print(worker)

# 计算队列的工作数量
queue = Queue('low', connection=redis)
workers = Worker.count(queue=queue)
print(workers)

# # 获取worker的失败率
# worker = Worker.all_keys(connection=redis)
# print(worker)

# q = Queue('low',connection=redis)
# print(q.count)
# worker = Worker.find_by_key('rq:worker:MacBook-Pro-1.18076', connection=redis)
# print(worker.successful_job_count)
# print(worker.failed_job_count)
def job_receiver():
    """
    Accepts POST requests and checks the (json) payload

    Queues the approved jobs at redis instance at global redis_hostname:6379.
    Queue name is our_adjusted_convertHTML_queue_name (may have been prefixed).
    """
    #assert request.method == 'POST'
    stats_client.incr('posts.attempted')
    logger.info(
        f"tX {'('+PREFIX+')' if PREFIX else ''} enqueue received request: {request}"
    )

    # Collect and log some helpful information for all three queues
    HTML_queue = Queue(our_adjusted_convertHTML_queue_name,
                       connection=redis_connection)
    len_HTML_queue = len(HTML_queue)
    stats_client.gauge(f'{TX_JOB_HANDLER_QUEUE_NAME}.queue.length.current',
                       len_HTML_queue)
    len_HTML_failed_queue = handle_failed_queue(
        our_adjusted_convertHTML_queue_name)
    stats_client.gauge(f'{TX_JOB_HANDLER_QUEUE_NAME}.queue.length.failed',
                       len_HTML_failed_queue)
    OBSPDF_queue = Queue(our_adjusted_convertOBSPDF_queue_name,
                         connection=redis_connection)
    len_OBSPDF_queue = len(OBSPDF_queue)
    stats_client.gauge(f'{OBS_PDF_QUEUE_NAME}.queue.length.current',
                       len_OBSPDF_queue)
    len_OBSPDF_failed_queue = handle_failed_queue(
        our_adjusted_convertOBSPDF_queue_name)
    stats_client.gauge(f'{OBS_PDF_QUEUE_NAME}.queue.length.failed',
                       len_OBSPDF_failed_queue)
    otherPDF_queue = Queue(our_adjusted_convertOtherPDF_queue_name,
                           connection=redis_connection)
    len_otherPDF_queue = len(otherPDF_queue)
    stats_client.gauge(f'{OTHER_PDF_QUEUE_NAME}.queue.length.current',
                       len_otherPDF_queue)
    len_otherPDF_failed_queue = handle_failed_queue(
        our_adjusted_convertOtherPDF_queue_name)
    stats_client.gauge(f'{OTHER_PDF_QUEUE_NAME}.queue.length.failed',
                       len_otherPDF_failed_queue)

    # Find out how many workers we have
    total_worker_count = Worker.count(connection=redis_connection)
    logger.debug(f"Total rq workers = {total_worker_count}")
    queue1_worker_count = Worker.count(queue=HTML_queue)
    logger.debug(
        f"Our {our_adjusted_convertHTML_queue_name} queue workers = {queue1_worker_count}"
    )
    stats_client.gauge('workers.HTML.available', queue1_worker_count)
    if queue1_worker_count < 1:
        logger.critical(
            f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} has no HTML job handler workers running!"
        )
        # Go ahead and queue the job anyway for when a worker is restarted
    queue2_worker_count = Worker.count(queue=OBSPDF_queue)
    logger.debug(
        f"Our {our_adjusted_convertOBSPDF_queue_name} queue workers = {queue2_worker_count}"
    )
    stats_client.gauge('workers.OBSPDF.available', queue2_worker_count)
    if queue2_worker_count < 1:
        logger.critical(
            f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} has no OBSPDF job handler workers running!"
        )
        # Go ahead and queue the job anyway for when a worker is restarted
    queue3_worker_count = Worker.count(queue=otherPDF_queue)
    logger.debug(
        f"Our {our_adjusted_convertOtherPDF_queue_name} queue workers = {queue3_worker_count}"
    )
    stats_client.gauge('workers.otherPDF.available', queue3_worker_count)
    if queue3_worker_count < 1:
        logger.critical(
            f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} has no otherPDF job handler workers running!"
        )
        # Go ahead and queue the job anyway for when a worker is restarted

    response_ok_flag, response_dict = check_posted_tx_payload(request, logger)
    # response_dict is json payload if successful, else error info
    if response_ok_flag:
        logger.debug("tX_job_receiver processing good payload…")

        our_job_id = response_dict['job_id'] if 'job_id' in response_dict \
                        else get_unique_job_id()

        # Determine which worker to queue this request for
        if response_dict['output_format'] == 'html':
            job_type, not_job_type = 'HTML', 'PDF'
            our_adjusted_queue_name = our_adjusted_convertHTML_queue_name
            our_other_adjusted_queue_name = our_other_adjusted_convertHTML_queue_name
            our_adjusted_queue_name2 = our_adjusted_convertOBSPDF_queue_name
            our_other_adjusted_queue_name2 = our_other_adjusted_convertOBSPDF_queue_name
            our_adjusted_queue_name3 = our_adjusted_convertOtherPDF_queue_name
            our_other_adjusted_queue_name3 = our_other_adjusted_convertOtherPDF_queue_name
            our_queue = HTML_queue
            expected_output_URL = f"{TX_JOB_CDN_BUCKET}{our_job_id}.zip"
        elif response_dict['output_format'] == 'pdf':
            not_job_type = 'HTML'
            our_adjusted_queue_name2 = our_adjusted_convertHTML_queue_name
            our_other_adjusted_queue_name2 = our_other_adjusted_convertHTML_queue_name

            # Try to guess where the output PDF will end up
            expected_output_URL = 'UNKNOWN'
            if 'identifier' in response_dict \
            and '/' not in response_dict['identifier']:
                if response_dict['identifier'].count('--') == 2:
                    # Expected identifier in form '<repo_owner_username>/<repo_name>--<branch_or_tag_name>'
                    #  e.g. 'unfoldingWord/en_obs--v1'
                    logger.debug(
                        "Using1 'identifier' field to determine expected_output_URL…"
                    )
                    repo_owner_username, repo_name, branch_or_tag_name = response_dict[
                        'identifier'].split('--')
                    expected_output_URL = f"{PDF_CDN_BUCKET}{repo_owner_username}/{repo_name}/{branch_or_tag_name}/{response_dict['identifier']}.pdf"
                elif response_dict['identifier'].count('--') == 3:
                    # Expected identifier in form '<repo_owner_username>/<repo_name>--<branch_name>--<commit_hash>'
                    #  e.g. 'unfoldingWord/en_obs--master--7dac1e5ba2'
                    logger.debug(
                        "Using2 'identifier' field to determine expected_output_URL…"
                    )
                    repo_owner_username, repo_name, branch_or_tag_name, _commit_hash = response_dict[
                        'identifier'].split('--')
                    expected_output_URL = f"{PDF_CDN_BUCKET}{repo_owner_username}/{repo_name}/{branch_or_tag_name}/{repo_owner_username}--{repo_name}--{branch_or_tag_name}.pdf"
            elif response_dict['source'].count('/') == 6 \
            and response_dict['source'].endswith('.zip'):
                # Expected URL in form 'https://git.door43.org/<repo_owner_username>/<repo_name>/archive/<branch_or_tag_name>.zip'
                #  e.g. 'https://git.door43.org/unfoldingWord/en_obs/archive/master.zip'
                logger.debug(
                    "Using 'source' field to determine expected_output_URL…")
                parts = response_dict['source'][:-4].split(
                    '/')  # Remove the .zip first
                if len(parts) != 7:
                    logger.critical(
                        f"Source field is in unexpected form: '{response_dict['source']}' -> {parts}.zip"
                    )
                expected_output_URL = f"{PDF_CDN_BUCKET}{parts[3]}/{parts[4]}/{parts[6]}/{parts[3]}--{parts[4]}--{parts[6]}"
            logger.info(f"Got expected_output_URL = {expected_output_URL}")

            # Determine the correct PDF creation queue
            if response_dict[
                    'resource_type'] == 'Open_Bible_Stories':  # subject
                job_type = 'OBS-PDF'
                our_adjusted_queue_name = our_adjusted_convertOBSPDF_queue_name
                our_other_adjusted_queue_name = our_other_adjusted_convertOBSPDF_queue_name
                our_adjusted_queue_name3 = our_adjusted_convertOtherPDF_queue_name
                our_other_adjusted_queue_name3 = our_other_adjusted_convertOtherPDF_queue_name
                our_queue = OBSPDF_queue
            else:  # not OBS
                job_type = 'other-PDF'
                our_adjusted_queue_name = our_adjusted_convertOtherPDF_queue_name
                our_other_adjusted_queue_name = our_other_adjusted_convertOtherPDF_queue_name
                our_adjusted_queue_name3 = our_adjusted_convertOBSPDF_queue_name
                our_other_adjusted_queue_name3 = our_other_adjusted_convertOBSPDF_queue_name
                our_queue = otherPDF_queue

        # Extend the given payload (dict) to add our required fields
        #logger.debug("Building our response dict…")
        our_response_dict = dict(response_dict)
        our_response_dict.update({ \
                            'success': True,
                            'status': 'queued',
                            'queue_name': our_adjusted_queue_name,
                            'tx_job_queued_at': datetime.utcnow(),
                            })
        if 'job_id' not in our_response_dict:
            our_response_dict['job_id'] = our_job_id
        if 'identifier' not in our_response_dict:
            our_response_dict['identifier'] = our_job_id
        our_response_dict['output'] = expected_output_URL
        our_response_dict['expires_at'] = our_response_dict[
            'tx_job_queued_at'] + timedelta(days=1)
        our_response_dict['eta'] = our_response_dict[
            'tx_job_queued_at'] + timedelta(minutes=5)
        our_response_dict['tx_retry_count'] = 0
        logger.debug(f"About to queue {job_type} job: {our_response_dict}")

        # NOTE: No ttl specified on the next line—this seems to cause unrun jobs to be just silently dropped
        #           (For now at least, we prefer them to just stay in the queue if they're not getting processed.)
        #       The timeout value determines the max run time of the worker once the job is accessed
        our_queue.enqueue(
            'webhook.job', our_response_dict, job_timeout=JOB_TIMEOUT
        )  # A function named webhook.job will be called by the worker
        # NOTE: The above line can return a result from the webhook.job function. (By default, the result remains available for 500s.)

        # Find out who our workers are
        #workers = Worker.all(connection=redis_connection) # Returns the actual worker objects
        #logger.debug(f"Total rq workers ({len(workers)}): {workers}")
        #our_queue_workers = Worker.all(queue=our_queue)
        #logger.debug(f"Our {our_adjusted_queue_name} queue workers ({len(our_queue_workers)}): {our_queue_workers}")

        # Find out how many workers we have
        #worker_count = Worker.count(connection=redis_connection)
        #logger.debug(f"Total rq workers = {worker_count}")
        #our_queue_worker_count = Worker.count(queue=our_queue)
        #logger.debug(f"Our {our_adjusted_queue_name} queue workers = {our_queue_worker_count}")

        len_our_queue = len(our_queue)  # Update
        other_queue = Queue(our_other_adjusted_queue_name,
                            connection=redis_connection)
        queue2 = Queue(our_adjusted_queue_name2, connection=redis_connection)
        other_queue2 = Queue(our_other_adjusted_queue_name2,
                             connection=redis_connection)
        queue3 = Queue(our_adjusted_queue_name3, connection=redis_connection)
        other_queue3 = Queue(our_other_adjusted_queue_name3,
                             connection=redis_connection)
        logger.info(f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} queued valid {job_type} job to {our_adjusted_queue_name} queue " \
                    f"({len_our_queue} {job_type} jobs now " \
                        f"for {Worker.count(queue=our_queue)} workers, " \
                    f"{len(other_queue)} {job_type} jobs in {our_other_adjusted_queue_name} queue " \
                        f"for {Worker.count(queue=other_queue)} workers, " \
                    f"{len_HTML_failed_queue} failed {job_type} jobs), " \
                    f"({len(queue2)} {not_job_type} jobs in {our_adjusted_queue_name2} queue, " \
                    f"{len(other_queue2)} {not_job_type} jobs in {our_other_adjusted_queue_name2} queue) " \
                    f"({len(queue3)} {not_job_type} jobs in {our_adjusted_queue_name3} queue, " \
                    f"{len(other_queue3)} {not_job_type} jobs in {our_other_adjusted_queue_name3} queue) " \
                    f"at {datetime.utcnow()}\n")
        stats_client.incr('posts.succeeded')
        return jsonify(our_response_dict)
    else:
        stats_client.incr('posts.invalid')
        response_dict['status'] = 'invalid'
        logger.error(
            f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} ignored invalid payload; responding with {response_dict}\n"
        )
        return jsonify(response_dict), 400
# NOTE: The prefixed version must also listen at a different port (specified in gunicorn run command)
#our_callback_name = our_adjusted_convertHTML_queue_name + CALLBACK_SUFFIX
#our_other_adjusted_callback_name = our_other_adjusted_convertHTML_queue_name + CALLBACK_SUFFIX

prefix_string = f" with prefix '{PREFIX}'" if PREFIX else ""
logger.info(
    f"tx_enqueue_main.py{prefix_string}{test_string} running on Python v{sys.version}"
)

# Connect to Redis now so it fails at import time if no Redis instance available
logger.info(f"redis_hostname is '{redis_hostname}'")
logger.debug(f"{PREFIXED_DOOR43_JOB_HANDLER_QUEUE_NAME} connecting to Redis…")
redis_connection = StrictRedis(host=redis_hostname)
logger.debug(
    "Getting total worker count in order to verify working Redis connection…")
total_rq_worker_count = Worker.count(connection=redis_connection)
logger.debug(f"Total rq workers = {total_rq_worker_count}")

# Get the Graphite URL from the environment, otherwise use a local test instance
graphite_url = getenv('GRAPHITE_HOSTNAME', 'localhost')
logger.info(f"graphite_url is '{graphite_url}'")
stats_prefix = f"tx.{'dev' if PREFIX else 'prod'}.enqueue-job"
stats_client = StatsClient(host=graphite_url, port=8125, prefix=stats_prefix)

TX_JOB_CDN_BUCKET = f'https://{PREFIX}cdn.door43.org/tx/job/'
PDF_CDN_BUCKET = f'https://{PREFIX}cdn.door43.org/u/'

app = Flask(__name__)
# Not sure that we need this Flask logging
# app.logger.addHandler(watchtower_log_handler)
# logging.getLogger('werkzeug').addHandler(watchtower_log_handler)
def job_receiver():
    """
    Accepts POST requests and checks the (json) payload

    Queues the approved jobs at redis instance at global redis_hostname:6379.
    Queue name is our_adjusted_webhook_queue_name (may have been prefixed).
    """
    #assert request.method == 'POST'
    stats_client.incr('webhook.posts.attempted')
    logger.info(f"WEBHOOK received by {prefixed_our_name}: {request}")
    # NOTE: 'request' above typically displays something like "<Request 'http://git.door43.org/' [POST]>"

    our_queue = Queue(our_adjusted_webhook_queue_name,
                      connection=redis_connection)

    # Collect and log some helpful information
    len_our_queue = len(our_queue)  # Should normally sit at zero here
    stats_client.gauge('webhook.queue.length.current', len_our_queue)
    len_our_failed_queue = handle_failed_queue(our_adjusted_webhook_queue_name)
    stats_client.gauge('webhook.queue.length.failed', len_our_failed_queue)

    # Find out how many workers we have
    total_worker_count = Worker.count(connection=redis_connection)
    logger.debug(f"Total rq workers = {total_worker_count}")
    our_queue_worker_count = Worker.count(queue=our_queue)
    logger.debug(
        f"Our {our_adjusted_webhook_queue_name} queue workers = {our_queue_worker_count}"
    )
    stats_client.gauge('webhook.workers.available', our_queue_worker_count)
    if our_queue_worker_count < 1:
        logger.critical(
            f'{prefixed_our_name} has no job handler workers running!')
        # Go ahead and queue the job anyway for when a worker is restarted

    response_ok_flag, response_dict = check_posted_payload(request, logger)
    # response_dict is json payload if successful, else error info
    if response_ok_flag:
        logger.debug(f"{prefixed_our_name} queuing good payload…")

        # Check for special switch to echo production requests to dev- chain
        global echo_prodn_to_dev_flag
        if not prefix:  # Only apply to production chain
            try:
                repo_name = response_dict['repository']['full_name']
            except (KeyError, AttributeError):
                repo_name = None
            if repo_name == 'tx-manager-test-data/echo_prodn_to_dev_on':
                echo_prodn_to_dev_flag = True
                logger.info("TURNED ON 'echo_prodn_to_dev_flag'!\n")
                stats_client.incr('webhook.posts.succeeded')
                return jsonify({'success': True, 'status': 'echo ON'})
            if repo_name == 'tx-manager-test-data/echo_prodn_to_dev_off':
                echo_prodn_to_dev_flag = False
                logger.info("Turned off 'echo_prodn_to_dev_flag'.\n")
                stats_client.incr('webhook.posts.succeeded')
                return jsonify({'success': True, 'status': 'echo off'})

        # Add our fields
        response_dict[
            'door43_webhook_retry_count'] = 0  # In case we want to retry failed jobs
        response_dict['door43_webhook_received_at'] = datetime.utcnow(
        ).strftime(
            '%Y-%m-%dT%H:%M:%SZ')  # Used to calculate total elapsed time

        # NOTE: No ttl specified on the next line -- this seems to cause unrun jobs to be just silently dropped
        #           (For now at least, we prefer them to just stay in the queue if they're not getting processed.)
        #       The timeout value determines the max run time of the worker once the job is accessed
        our_queue.enqueue(
            'webhook.job', response_dict, job_timeout=WEBHOOK_TIMEOUT
        )  # A function named webhook.job will be called by the worker
        # NOTE: The above line can return a result from the webhook.job function. (By default, the result remains available for 500s.)

        # See if we want to echo this job to the dev- queue (used for dev- code testing)
        other_queue = Queue(our_other_adjusted_queue_name,
                            connection=redis_connection)
        if echo_prodn_to_dev_flag:  # Should only be set on production chain (so repo_name should be set)
            if repo_name == 'acceptance_test/test':
                logger.info(
                    f"Not echoing '{repo_name}' to {our_other_adjusted_queue_name} queue."
                )
            else:  # for all others
                logger.info(
                    f"ALSO ECHOING JOB to {our_other_adjusted_queue_name} queue…"
                )
                logger.info(
                    "  (Use https://git.door43.org/tx-manager-test-data/echo_prodn_to_dev_off/settings/hooks/44079 to turn this off.)"
                )
                response_dict['echoed_from_production'] = True
                other_queue.enqueue(
                    'webhook.job', response_dict, job_timeout=WEBHOOK_TIMEOUT
                )  # A function named webhook.job will be called by the worker

        # Find out who our workers are
        #workers = Worker.all(connection=redis_connection) # Returns the actual worker objects
        #logger.debug(f"Total rq workers ({len(workers)}): {workers}")
        #our_queue_workers = Worker.all(queue=our_queue)
        #logger.debug(f"Our {our_adjusted_webhook_queue_name} queue workers ({len(our_queue_workers)}): {our_queue_workers}")

        len_our_queue = len(our_queue)  # Update
        logger.info(f"{prefixed_our_name} queued valid job to {our_adjusted_webhook_queue_name} queue " \
                    f"({len_our_queue} jobs now " \
                        f"for {Worker.count(queue=our_queue)} workers, " \
                    f"{len(other_queue)} jobs in {our_other_adjusted_queue_name} queue " \
                        f"for {Worker.count(queue=other_queue)} workers, " \
                    f"{len_our_failed_queue} failed jobs) at {datetime.utcnow()}\n")

        webhook_return_dict = {
            'success': True,
            'status': 'queued',
            'queue_name': our_adjusted_webhook_queue_name,
            'door43_job_queued_at': datetime.utcnow()
        }
        if echo_prodn_to_dev_flag:
            webhook_return_dict[
                'echoed_queue_name'] = our_other_adjusted_queue_name
        stats_client.incr('webhook.posts.succeeded')
        return jsonify(webhook_return_dict)
    #else:
    stats_client.incr('webhook.posts.invalid')
    response_dict['status'] = 'invalid'
    try:
        detail = request.headers['X-Gitea-Event']
    except KeyError:
        detail = "No X-Gitea-Event"
    logger.error(
        f"{prefixed_our_name} ignored invalid '{detail}' payload; responding with {response_dict}\n"
    )
    return jsonify(response_dict), 400