Esempio n. 1
0
def revoke_p_task(pr_id):
    """
    中止指定流程所有taskid:name=faconstor.tasks.exec_process的最新任务中止
    return status{bool}: 1 成功 2 失败 0 任务不存在
    """
    status = 0
    try:
        task_url = "http://127.0.0.1:5555/api/tasks"

        try:
            task_json_info = requests.get(task_url).text
        except:
            status = 2
        else:
            task_dict_info = json.loads(task_json_info)
            c_control = Control(app=app)

            for key, value in task_dict_info.items():
                try:
                    task_process_id = value["args"][1:-1].split(',')[0][1:-1]
                except:
                    task_process_id = ""
                # 终止指定流程的异步任务
                if task_process_id == pr_id and value[
                        "name"] == "drm.tasks.run_workflow":
                    task_id = key
                    print(key)
                    c_control.revoke(str(task_id), terminate=True)
                    status = 1
    except Exception as e:
        print(e)
        status = 2
    return status
Esempio n. 2
0
def stop(request, task_id=None):
    control = Control(app=app)
    control.revoke(task_id, terminate=True)
    file_obj = FileData.objects.get(task_id=task_id)
    file_obj.status = 'Canceled'
    file_obj.save()
    return Response({'success': True}, status=200)
Esempio n. 3
0
 def cancel_celery_task(self, kill=False):
     """
     Make sure we cancel the task (if in queue/scheduled).
     :param: kill Also kill the task if it's running, defaults to False.
     """
     celery_control = Control(current_app)
     celery_control.revoke(task_id=self.celery_task_id, terminate=kill)
Esempio n. 4
0
 def __init__(self):
     path = getattr(settings, 'CELERY_APPLICATION_PATH', None)
     if path is None:
         raise ValueError(
             'You need to define "CELERY_APPLICATION_PATH" on settings.')
     self._application = import_object(path)
     self._control = Control(self._application)
     self._default_queue = self._application.amqp.default_queue.name
     self._routes = getattr(settings, 'CELERY_ROUTES', {})
Esempio n. 5
0
    def check(self, **config):
        timeout = config.get('timeout', self.timeout)

        app = CeleryApp('birder', loglevel='info', broker=self.broker)
        c = Control(app)
        insp = c.inspect(timeout=timeout)
        d = insp.stats()
        # d = insp.ping()
        return bool(d)
Esempio n. 6
0
 def run(self):
     time.sleep(3)
     app = Celery(self.app_name,
                  broker=self.cfg.celery_broker,
                  backend=self.cfg.celery_backend)
     c = Control(app)
     self.up.up.logger.debug(
         c.ping(destination=[self.worker_name]))
     self.event.wait()
     c.shutdown(destination=[self.worker_name])
Esempio n. 7
0
def stop_process():
    try:
        global do_process
        if do_process:
            print('终止任务')
            ctrl = Control(app=app)
            ctrl.revoke(str(do_process.id), terminate=True)
            do_process = None
    except Exception as e:
        logger.error(e)
Esempio n. 8
0
def redis_status(app):
    """
    Raises ServiceDown if the Redis server used as a celery broker is down.
    Since our application should not have access to the Redis server, we test
    this by instantiating a Celery Control and attempting to ping it.
    """
    try:
        Control(app=app).ping(timeout=1)
    except RedisConnectionError:
        raise ServiceDown()
Esempio n. 9
0
def worker_abort(arguments):
    nodename = f"celery@{airflow.utils.get_hostname()}"

    control = Control(app=airflow.app.celery)
    results = control.broadcast("stats", reply=True, destination=[nodename])

    proc_id = results[0][nodename]["pid"]

    logging.info("Sending SIGABRT (%s) to worker process ID: %s",
                 Signals.SIGABRT, proc_id)
    os.kill(proc_id, Signals.SIGABRT)
Esempio n. 10
0
def stop_calibrate():
    try:
        global calibrate_process
        print(calibrate_process)
        if calibrate_process:
            print('终止任务')
            ctrl = Control(app=app)
            ctrl.revoke(str(calibrate_process.id), terminate=True)
            calibrate_process = None
    except Exception as e:
        logger.error(e)
Esempio n. 11
0
def celery_status(app):
    """
    Raises ServiceDown if any Celery worker servers are down, if any clusters
    have no workers, or if any workers are down.
    """
    clusters = Control(app=app).ping(timeout=1)
    if not clusters:
        raise ServiceDown()
    for cluster in clusters:
        if not cluster:
            raise ServiceDown()
        for host, status in cluster.items():
            if 'ok' not in status or status['ok'] != 'pong':
                raise ServiceDown()
Esempio n. 12
0
def stats(arguments):
    control = Control(app=airflow.app.celery)

    nodename = f"celery@{airflow.utils.get_hostname()}"

    logging.info("Getting node stats")
    logging.info(control.broadcast("stats", reply=True,
                                   destination=[nodename]))

    logging.info("Getting active queues")
    logging.info(
        control.broadcast("active_queues", reply=True, destination=[nodename]))

    logging.info("Getting active tasks")
    logging.info(
        control.broadcast("active", reply=True, destination=[nodename]))
Esempio n. 13
0
def revoke_p_task(pr_id):
    """
    中止指定流程所有taskid:name=faconstor.tasks.exec_process的最新任务中止
    return status{bool}: 1 成功 2 失败 0 任务不存在
    """
    status = 0
    try:
        task_url = "http://127.0.0.1:5555/api/tasks"

        try:
            task_json_info = requests.get(task_url).text
        except:
            status = 2
        else:
            task_dict_info = json.loads(task_json_info)
            c_control = Control(app=app)

            for key, value in task_dict_info.items():
                try:
                    task_process_id = int(value["args"][1:-1])
                except:
                    task_process_id = ""
                # 终止指定流程的异步任务
                if task_process_id == pr_id and value["name"] == "faconstor.tasks.exec_process":
                    task_id = key
                    print(key)
                    c_control.revoke(str(task_id), terminate=True)
                    status = 1
    except Exception as e:
        print(e)
        status = 2
    if status == 1:  # 修改processrun.walkthoughstate
        try:
            ProcessRun.objects.filter(id=pr_id).update(**{
                "walkthroughstate": "STOP"
            })
        except:
            pass
    return status
Esempio n. 14
0
    def __init__(self, worker):
        self.worker = worker
        self.nodename = worker.hostname
        self.app = worker.app

        self.control = Control(app=self.app)
Esempio n. 15
0
RESULTS_BACKEND_HOST_IP = os.getenv(POSTGRES_HOST_IP_ENV, '172.17.0.2')
RESULTS_BACKEND_HOST_PORT = os.getenv(POSTGRES_HOST_PORT_ENV,
                                      DEFAULT_POSTGRES_PORT)
RESULTS_BACKEND_USERNAME = os.getenv(POSTGRES_USERNAME_ENV, 'postgres')
RESULTS_BACKEND_PASSWORD = os.getenv(POSTGRES_PASSWORD_ENV, 'postgres')

broker_url = 'amqp://{0}:{1}@{2}:{3}//'.format(MESSAGE_BROKER_USERNAME,
                                               MESSAGE_BROKER_PASSWORD,
                                               MESSAGE_BROKER_HOST_IP,
                                               MESSAGE_BROKER_HOST_PORT)
backend_url = 'db+postgresql://{0}:{1}@{2}:{3}/{4}'.format(
    RESULTS_BACKEND_USERNAME, RESULTS_BACKEND_PASSWORD,
    RESULTS_BACKEND_HOST_IP, RESULTS_BACKEND_HOST_PORT, POSTGRES_RESULTS_DB)

app = Celery(broker=broker_url, backend=backend_url)
remote_control = Control(app=app)

tasks_queue = Queue(name=MINION_TASKS_QUEUE,
                    exchange=Exchange(''),
                    routing_key=MINION_TASKS_QUEUE,
                    no_declare=True)


def send_task_message(message, producer=None):
    with app.producer_or_acquire(producer) as producer:
        producer.publish(
            message,
            serializer='json',
            exchange=tasks_queue.exchange,
            routing_key=tasks_queue.routing_key,
            declare=[tasks_queue],
Esempio n. 16
0
def drop_celery_tasks(
    task_name: str,
    queue_name,
    celery_app: Celery,
    redis_client: StrictRedis,
    in_workers: bool = False,
):
    """
    Drop all **tasks queued** that match the `task_name` and `queue_name` passed as parameter. There is no
    celery command available atm for this purpose, therefore we need to
    read the celery queue (Redis backend), identify the IDs of the tasks and then revoke them.

    Params:
    #:param task_name: Path to the celery task.
    #:param queue_name: Name of the queue from which you which to delete the the queued tasks.
    #:param celery_app: Main celery application.
    #:param redis_client: Redis client.
    #:param in_workers: Specify whether the tasks pre-fetched or fetched by the workers should be revoked. If the value
        is set to `1`, it will revoke active, scheduled, and reserved tasks fetched by the workers.
        The tasks that are currently executing will not be terminated, instead the new tasks in the queue will not be
        accepted. Use with caution, this option might take a while to execute and is not recommended for prod env.
        More information in: https://docs.celeryproject.org/en/stable/userguide/monitoring.html.

    For reference a Redis item on the queue looks like:
    "{\"body\": \"gAIpfXEAfXEBKFgJAAAAY2FsbGJhY2tzcQJOWAgAAABlcnJiYWNrc3EDTlgFAAAAY2hhaW5xBE5YBQAAAGNob3JkcQ
    VOdYdxBi4=\", \"content-encoding\": \"binary\", \"content-type\": \"application/x-python-serialize\",
    \"headers\": {\"lang\": \"py\", \"task\": \"hi.tasks.on\",
    \"id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"shadow\": null, \"eta\": null,
    \"expires\": null, \"group\": null, \"retries\": 0, \"timelimit\": [null, null],
    \"root_id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"parent_id\": null, \"argsrepr\": \"()\",
    \"kwargsrepr\": \"{}\", \"origin\": \"gen1@c60fdf6f1554\", \"span_map\":
    {\"uber-trace-id\": \"635914c782f0c52f:8a07796eaedf05d1:0:1\"}},
    \"properties\": {\"correlation_id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"reply_to\":
    \"ac8ee0ea-4d30-3065-97da-5a527f7a1fc5\", \"delivery_mode\": 2, \"delivery_info\":
    {\"exchange\": \"\", \"routing_key\": \"default\"}, \"priority\": 0,
        \"body_encoding\": \"base64\", \"delivery_tag\": \"5626fd36-bfc6-4ac5-b137-943a6067fcf1\"}}"
    """
    def _get_tasks_id(workers: list, tasks_ids: list, task_name: str):
        """
        Get task ids with the given name included inside the given `workers` tasks.
        {'worker1.example.com': [
             {'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf',
              'args': '(8,)', 'kwargs': '{}'}]
        }
        """
        for worker in workers:
            if not workers[worker]:
                continue
            for _task in workers[worker]:
                if _task["name"].split(".")[-1] == task_name:
                    tasks_ids.append(_task["id"])

    i = Inspect(app=celery_app)  # Inspect all nodes.
    registered = i.registered()
    if not registered:
        raise Exception("No registered tasks found")

    if not any(task_name == _task
               for _task in chain(*list(registered.values()))):
        logging.error(
            f"Command could not be executed, because task is not registered: {task_name}"
        )
        return

    tasks_ids = []

    # Revoke tasks already in the broker.
    if in_workers:
        _get_tasks_id(i.active(), tasks_ids, task_name)
        _get_tasks_id(i.scheduled(), tasks_ids, task_name)
        _get_tasks_id(i.reserved(), tasks_ids, task_name)

        if tasks_ids:
            for task_id in tasks_ids:
                Control(app=celery_app).revoke(task_id)
        else:
            logging.info(
                f"No active/scheduled/registered task found with the name {task_name}"
            )

    # revoke tasks in the redis queue.
    queue_length = redis_client.llen(queue_name)
    if queue_length == 0:
        logging.info(f"No items found in queue: {queue_name}")
        return

    n = 0
    batch_size = 10
    while True:
        items = redis_client.lrange(queue_name, n, n + batch_size)
        n += batch_size
        if not items:
            break

        for item in items:
            try:
                queued_item = json.loads(item)
            except JSONDecodeError as e:
                logging.info(f"Error decoding item from queue: {e.msg}")
                continue
            header = queued_item.get("headers")
            if header and header["task"] == task_name:
                task_id = queued_item["headers"]["id"]
                logging.info(f"revoking task id {task_id}")
                Control(app=celery_app).revoke(task_id)
Esempio n. 17
0
from celery.result import AsyncResult

# 获取app队列信息
queue_info = app.connection().channel().queue_declare('proj', passive=True)
print('message count:', queue_info.message_count)
# 清空队列
app.connection().channel().queue_purge('proj')

result = add.delay(4, 4)
# 获取task id
print("task id: ", str(result.id))
# 获取task对象
task = AsyncResult(str(result.id))
# 获取task状态,进入开始执行状态
time.sleep(1)
print("task status: ", task.status)

celery_control = Control(app=app)
# 属于强制终止,后台会有报错信息
celery_control.revoke(str(result.id), terminate=True, signal='SIGKILL')
# 进入任务撤销状态
time.sleep(1)
print("task done: ", task.status)

# # 同步阻塞等待结果
# print("result: ", result.get(timeout=1))

# 参数签名
s1 = mul.s(2, 2)
res = s1.delay()
print("signature result: ", res.get())
Esempio n. 18
0
from __future__ import absolute_import, unicode_literals

import os

from celery import Celery
from celery.app.control import Control

# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parking_project.settings')

from django.conf import settings  # noqa

app = Celery('parking_project')

# Used for revoking tasks.
control = Control(app)

# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
Esempio n. 19
0
            try:
                client = pymongo.MongoClient(port=self.mongod_port)
                client.server_info()
                break
            except:
                print("Mongo d not online yet")


# mm = MongoManager()
# mm.start()
# mm.wait_for_mongo_online()

print("Everythinh started. Waiting for task finishing")
ans = tasks.hello.delay()
import time
print("Waiting 5 seconds before revoking")
time.sleep(5)

celery_control = Control(tasks.app)
celery_control.revoke(ans.id, terminate=True, signal=signal.SIGHUP)
revoke(ans.id, terminate=True, signal=signal.SIGHUP)
tasks.app.control.revoke(ans.id, terminate=True, signal=signal.SIGHUP)
ans.revoke(terminate=True, signal=signal.SIGHUP)
tasks.app.control.revoke(ans.id, terminate=True, signal='SIGKILL')
AsyncResult(ans.id, app=tasks.app).revoke(terminate=True, signal='SIGKILL')
ans.revoke(terminate=True, signal='SIGKILL')
print("Revoked function")
print(ans.get())

# mm.kill()
Esempio n. 20
0
from flask import Flask
from pymongo import MongoClient
from .Celery import make_celery
from .config import MONGODB
from flask_cors import *
from celery.app.control import Control,Inspect
app=Flask(__name__)
CORS(app, supports_credentials=True)
app.config.from_pyfile('config.py')
celery=make_celery(app)
control=Control(app=celery)
inspect=Inspect(app=celery)
mongo=MongoClient(MONGODB)
db=mongo.celery
tasks=db.tasks
mirrors=db.mirrors
from .view import *