Пример #1
0
def revoke_p_task(pr_id):
    """
    中止指定流程所有taskid:name=faconstor.tasks.exec_process的最新任务中止
    return status{bool}: 1 成功 2 失败 0 任务不存在
    """
    status = 0
    try:
        task_url = "http://127.0.0.1:5555/api/tasks"

        try:
            task_json_info = requests.get(task_url).text
        except:
            status = 2
        else:
            task_dict_info = json.loads(task_json_info)
            c_control = Control(app=app)

            for key, value in task_dict_info.items():
                try:
                    task_process_id = value["args"][1:-1].split(',')[0][1:-1]
                except:
                    task_process_id = ""
                # 终止指定流程的异步任务
                if task_process_id == pr_id and value[
                        "name"] == "drm.tasks.run_workflow":
                    task_id = key
                    print(key)
                    c_control.revoke(str(task_id), terminate=True)
                    status = 1
    except Exception as e:
        print(e)
        status = 2
    return status
Пример #2
0
def stop(request, task_id=None):
    control = Control(app=app)
    control.revoke(task_id, terminate=True)
    file_obj = FileData.objects.get(task_id=task_id)
    file_obj.status = 'Canceled'
    file_obj.save()
    return Response({'success': True}, status=200)
Пример #3
0
 def cancel_celery_task(self, kill=False):
     """
     Make sure we cancel the task (if in queue/scheduled).
     :param: kill Also kill the task if it's running, defaults to False.
     """
     celery_control = Control(current_app)
     celery_control.revoke(task_id=self.celery_task_id, terminate=kill)
Пример #4
0
 def __init__(self):
     path = getattr(settings, 'CELERY_APPLICATION_PATH', None)
     if path is None:
         raise ValueError(
             'You need to define "CELERY_APPLICATION_PATH" on settings.')
     self._application = import_object(path)
     self._control = Control(self._application)
     self._default_queue = self._application.amqp.default_queue.name
     self._routes = getattr(settings, 'CELERY_ROUTES', {})
Пример #5
0
    def check(self, **config):
        timeout = config.get('timeout', self.timeout)

        app = CeleryApp('birder', loglevel='info', broker=self.broker)
        c = Control(app)
        insp = c.inspect(timeout=timeout)
        d = insp.stats()
        # d = insp.ping()
        return bool(d)
Пример #6
0
 def run(self):
     time.sleep(3)
     app = Celery(self.app_name,
                  broker=self.cfg.celery_broker,
                  backend=self.cfg.celery_backend)
     c = Control(app)
     self.up.up.logger.debug(
         c.ping(destination=[self.worker_name]))
     self.event.wait()
     c.shutdown(destination=[self.worker_name])
Пример #7
0
def stop_process():
    try:
        global do_process
        if do_process:
            print('终止任务')
            ctrl = Control(app=app)
            ctrl.revoke(str(do_process.id), terminate=True)
            do_process = None
    except Exception as e:
        logger.error(e)
Пример #8
0
def stop_calibrate():
    try:
        global calibrate_process
        print(calibrate_process)
        if calibrate_process:
            print('终止任务')
            ctrl = Control(app=app)
            ctrl.revoke(str(calibrate_process.id), terminate=True)
            calibrate_process = None
    except Exception as e:
        logger.error(e)
Пример #9
0
def worker_abort(arguments):
    nodename = f"celery@{airflow.utils.get_hostname()}"

    control = Control(app=airflow.app.celery)
    results = control.broadcast("stats", reply=True, destination=[nodename])

    proc_id = results[0][nodename]["pid"]

    logging.info("Sending SIGABRT (%s) to worker process ID: %s",
                 Signals.SIGABRT, proc_id)
    os.kill(proc_id, Signals.SIGABRT)
Пример #10
0
def task_e(tid):
    control = Control(app)     
    inspect = control.inspect()
    if True:
        active = inspect.active()
        running_jobs = []
        if active != None:
            for key, value in active.items():
                running_jobs.extend(value)
            if len(running_jobs) == 1:
                control.revoke(tid,terminate=True)
                print 'revoke ', tid
Пример #11
0
def stats(arguments):
    control = Control(app=airflow.app.celery)

    nodename = f"celery@{airflow.utils.get_hostname()}"

    logging.info("Getting node stats")
    logging.info(control.broadcast("stats", reply=True,
                                   destination=[nodename]))

    logging.info("Getting active queues")
    logging.info(
        control.broadcast("active_queues", reply=True, destination=[nodename]))

    logging.info("Getting active tasks")
    logging.info(
        control.broadcast("active", reply=True, destination=[nodename]))
Пример #12
0
class WorkerShutdownHandler(object):
    def __init__(self, worker):
        self.worker = worker
        self.nodename = worker.hostname
        self.app = worker.app

        self.control = Control(app=self.app)

    def __call__(self, *args):
        print(f"Status of: {self.nodename}")
        with self.app.connection_for_read() as connection:
            print(self.control.broadcast("stats",
                connection=connection,
                destination=[self.nodename],
                reply=True
            ))

        print(f"Consumers closed for: {self.nodename}")
        self.halt()
        print(f"Marking tasks failed under: {self.nodename}")
        self.terminate()

    def halt(self):
        with self.app.connection_for_read() as connection:
            result = self.control.broadcast("active_queues",
                connection=connection,
                destination=[self.nodename],
                reply=True
            )

            print(result)

            result = self.control.broadcast("active",
                connection=connection,
                destination=[self.nodename],
                reply=True
            )

            print(result)

        # self.worker.pool.stop()

    def terminate(self):
        print("******************")
Пример #13
0
def redis_status(app):
    """
    Raises ServiceDown if the Redis server used as a celery broker is down.
    Since our application should not have access to the Redis server, we test
    this by instantiating a Celery Control and attempting to ping it.
    """
    try:
        Control(app=app).ping(timeout=1)
    except RedisConnectionError:
        raise ServiceDown()
Пример #14
0
def revoke_p_task(pr_id):
    """
    中止指定流程所有taskid:name=faconstor.tasks.exec_process的最新任务中止
    return status{bool}: 1 成功 2 失败 0 任务不存在
    """
    status = 0
    try:
        task_url = "http://127.0.0.1:5555/api/tasks"

        try:
            task_json_info = requests.get(task_url).text
        except:
            status = 2
        else:
            task_dict_info = json.loads(task_json_info)
            c_control = Control(app=app)

            for key, value in task_dict_info.items():
                try:
                    task_process_id = int(value["args"][1:-1])
                except:
                    task_process_id = ""
                # 终止指定流程的异步任务
                if task_process_id == pr_id and value["name"] == "faconstor.tasks.exec_process":
                    task_id = key
                    print(key)
                    c_control.revoke(str(task_id), terminate=True)
                    status = 1
    except Exception as e:
        print(e)
        status = 2
    if status == 1:  # 修改processrun.walkthoughstate
        try:
            ProcessRun.objects.filter(id=pr_id).update(**{
                "walkthroughstate": "STOP"
            })
        except:
            pass
    return status
Пример #15
0
def celery_status(app):
    """
    Raises ServiceDown if any Celery worker servers are down, if any clusters
    have no workers, or if any workers are down.
    """
    clusters = Control(app=app).ping(timeout=1)
    if not clusters:
        raise ServiceDown()
    for cluster in clusters:
        if not cluster:
            raise ServiceDown()
        for host, status in cluster.items():
            if 'ok' not in status or status['ok'] != 'pong':
                raise ServiceDown()
Пример #16
0
def clear_twitter():
    control = Control(app)
    #control.cancel_consumer('streamapi') # queue name, must probably be specified once per queue, but my app uses a single queue
     
    inspect = control.inspect()
    if True:
        active = inspect.active()
        print active
        running_jobs = []
        if active != None:
            for key, value in active.items():
                running_jobs.extend(value)
            if len(running_jobs) > 0:
                print("{} jobs running: {}".format(len(running_jobs), ", ".join(job["name"] for job in running_jobs)))
                for job in running_jobs:
                    #if job['name'] == 'fetch':
                    control.revoke(job["id"],terminate=True)
                discard_all()
                #time.sleep(10)
            else:
                print("No running jobs")
         
        
    app2 = state.State()
    cont = Control()
    i = Inspect()
    query = app2.tasks_by_type('fetch')
    print 'query ' ,query

    for uuid, task in query:
        #cont.revoke(uuid, terminate=True)
        print uuid, task

    #cont.purge() 
    a = discard_all() 
    print a
Пример #17
0
class CeleryClient(object):
    _application = None
    _control = None
    _default_queue = None

    def __init__(self):
        path = getattr(settings, 'CELERY_APPLICATION_PATH', None)
        if path is None:
            raise ValueError(
                'You need to define "CELERY_APPLICATION_PATH" on settings.')
        self._application = import_object(path)
        self._control = Control(self._application)
        self._default_queue = self._application.amqp.default_queue.name
        self._routes = getattr(settings, 'CELERY_ROUTES', {})

    @property
    def application(self):
        return self._application

    @property
    def default_queue(self):
        return self._default_queue

    @property
    def routes(self):
        return self._routes

    def enable_events(self):
        self._control.enable_events()

    def disable_events(self):
        self._control.disable_events()

    def workers(self):
        response = self._control.inspect().stats()
        if not response:
            return []
        statuses = self.worker_statuses()
        queues = self.active_queues()
        workers = []
        for name, info in response.iteritems():
            worker = dict()
            worker['name'] = name
            worker['status'] = statuses[worker['name']]
            worker['concurrency'] = info['pool']['max-concurrency']
            worker['broker'] = {'transport': info['broker']['transport'],
                                'hostname': info['broker']['hostname'],
                                'port': info['broker']['port']}
            worker['queues'] = queues[worker['name']]
            workers.append(worker)
        return workers

    def worker_statuses(self):
        """
        get worker statuses
        :return:
        """
        response = self._control.ping()
        if not response:
            return []
        workers = {}
        for w in response:
            for k, v in w.iteritems():
                for k_inner, v_inner in v.iteritems():
                    if k_inner == 'ok' and v_inner == 'pong':
                        workers[k] = 'Active'
                    else:
                        workers[k] = 'Passive'
                    break
        return workers

    def active_queues(self):
        """

        get queue mappings with workers
        :return:
        """
        response = self._control.inspect().active_queues()
        if not response:
            return []
        workers = {}
        for w, queues in response.iteritems():
            workers[w] = list()
            for q in queues:
                workers[w].append(q['name'])
        return workers

    def registered_tasks(self):
        """
        get registered task list
        :return:
        """
        response = self._control.inspect().registered()
        if not response:
            return []
        all_tasks = set()
        for worker, tasks in response.iteritems():
            for task in tasks:
                all_tasks.add(task)

        registered_tasks = {}
        for task in all_tasks:
            if task in self.routes:
                queue = self.routes[task].get('queue', self.default_queue)
            else:
                queue = self.default_queue
            registered_tasks[task] = queue
        return registered_tasks

    def active_tasks(self):
        """
        get active tasks which is running currently
        :return:
        """
        response = self._control.inspect().active()
        if not response:
            return []
        tasks = []
        for worker, task_list in response.iteritems():
            for task in task_list:
                t = dict()
                t['queue'] = task['delivery_info']['routing_key']
                t['name'] = task['name']
                t['id'] = task['id']
                t['worker'] = worker
                tasks.append(t)
        return tasks

    def reserved_tasks(self):
        """
        get reserved tasks which is in queue but still waiting to be executed
        :return:
        """

        response = self._control.inspect().reserved()
        if not response:
            return []
        tasks = []
        for worker, task_list in response.iteritems():
            for task in task_list:
                t = dict()
                t['queue'] = task['delivery_info']['routing_key']
                t['name'] = task['name']
                t['id'] = task['id']
                t['worker'] = worker
                tasks.append(t)
        return tasks

    def execute(self, command, parameter):

        def run(*args):
            task_verbose = args[1]
            task = import_object(task_verbose)
            task.delay()

        def revoke(*args):
            ctrl = args[0]
            task_id = args[1]
            ctrl.revoke(task_id, terminate=True, signal="SIGKILL")

        control = self._control
        nested = nested_method(self, 'execute', command)
        return nested(*(control, parameter))
Пример #18
0
def drop_celery_tasks(
    task_name: str,
    queue_name,
    celery_app: Celery,
    redis_client: StrictRedis,
    in_workers: bool = False,
):
    """
    Drop all **tasks queued** that match the `task_name` and `queue_name` passed as parameter. There is no
    celery command available atm for this purpose, therefore we need to
    read the celery queue (Redis backend), identify the IDs of the tasks and then revoke them.

    Params:
    #:param task_name: Path to the celery task.
    #:param queue_name: Name of the queue from which you which to delete the the queued tasks.
    #:param celery_app: Main celery application.
    #:param redis_client: Redis client.
    #:param in_workers: Specify whether the tasks pre-fetched or fetched by the workers should be revoked. If the value
        is set to `1`, it will revoke active, scheduled, and reserved tasks fetched by the workers.
        The tasks that are currently executing will not be terminated, instead the new tasks in the queue will not be
        accepted. Use with caution, this option might take a while to execute and is not recommended for prod env.
        More information in: https://docs.celeryproject.org/en/stable/userguide/monitoring.html.

    For reference a Redis item on the queue looks like:
    "{\"body\": \"gAIpfXEAfXEBKFgJAAAAY2FsbGJhY2tzcQJOWAgAAABlcnJiYWNrc3EDTlgFAAAAY2hhaW5xBE5YBQAAAGNob3JkcQ
    VOdYdxBi4=\", \"content-encoding\": \"binary\", \"content-type\": \"application/x-python-serialize\",
    \"headers\": {\"lang\": \"py\", \"task\": \"hi.tasks.on\",
    \"id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"shadow\": null, \"eta\": null,
    \"expires\": null, \"group\": null, \"retries\": 0, \"timelimit\": [null, null],
    \"root_id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"parent_id\": null, \"argsrepr\": \"()\",
    \"kwargsrepr\": \"{}\", \"origin\": \"gen1@c60fdf6f1554\", \"span_map\":
    {\"uber-trace-id\": \"635914c782f0c52f:8a07796eaedf05d1:0:1\"}},
    \"properties\": {\"correlation_id\": \"9fbcc18e-45d5-4b9f-b667-bd351568a361\", \"reply_to\":
    \"ac8ee0ea-4d30-3065-97da-5a527f7a1fc5\", \"delivery_mode\": 2, \"delivery_info\":
    {\"exchange\": \"\", \"routing_key\": \"default\"}, \"priority\": 0,
        \"body_encoding\": \"base64\", \"delivery_tag\": \"5626fd36-bfc6-4ac5-b137-943a6067fcf1\"}}"
    """
    def _get_tasks_id(workers: list, tasks_ids: list, task_name: str):
        """
        Get task ids with the given name included inside the given `workers` tasks.
        {'worker1.example.com': [
             {'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf',
              'args': '(8,)', 'kwargs': '{}'}]
        }
        """
        for worker in workers:
            if not workers[worker]:
                continue
            for _task in workers[worker]:
                if _task["name"].split(".")[-1] == task_name:
                    tasks_ids.append(_task["id"])

    i = Inspect(app=celery_app)  # Inspect all nodes.
    registered = i.registered()
    if not registered:
        raise Exception("No registered tasks found")

    if not any(task_name == _task
               for _task in chain(*list(registered.values()))):
        logging.error(
            f"Command could not be executed, because task is not registered: {task_name}"
        )
        return

    tasks_ids = []

    # Revoke tasks already in the broker.
    if in_workers:
        _get_tasks_id(i.active(), tasks_ids, task_name)
        _get_tasks_id(i.scheduled(), tasks_ids, task_name)
        _get_tasks_id(i.reserved(), tasks_ids, task_name)

        if tasks_ids:
            for task_id in tasks_ids:
                Control(app=celery_app).revoke(task_id)
        else:
            logging.info(
                f"No active/scheduled/registered task found with the name {task_name}"
            )

    # revoke tasks in the redis queue.
    queue_length = redis_client.llen(queue_name)
    if queue_length == 0:
        logging.info(f"No items found in queue: {queue_name}")
        return

    n = 0
    batch_size = 10
    while True:
        items = redis_client.lrange(queue_name, n, n + batch_size)
        n += batch_size
        if not items:
            break

        for item in items:
            try:
                queued_item = json.loads(item)
            except JSONDecodeError as e:
                logging.info(f"Error decoding item from queue: {e.msg}")
                continue
            header = queued_item.get("headers")
            if header and header["task"] == task_name:
                task_id = queued_item["headers"]["id"]
                logging.info(f"revoking task id {task_id}")
                Control(app=celery_app).revoke(task_id)
Пример #19
0
from flask import Flask
from pymongo import MongoClient
from .Celery import make_celery
from .config import MONGODB
from flask_cors import *
from celery.app.control import Control,Inspect
app=Flask(__name__)
CORS(app, supports_credentials=True)
app.config.from_pyfile('config.py')
celery=make_celery(app)
control=Control(app=celery)
inspect=Inspect(app=celery)
mongo=MongoClient(MONGODB)
db=mongo.celery
tasks=db.tasks
mirrors=db.mirrors
from .view import *
Пример #20
0
    def __init__(self, worker):
        self.worker = worker
        self.nodename = worker.hostname
        self.app = worker.app

        self.control = Control(app=self.app)
Пример #21
0
from __future__ import absolute_import, unicode_literals

import os

from celery import Celery
from celery.app.control import Control

# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parking_project.settings')

from django.conf import settings  # noqa

app = Celery('parking_project')

# Used for revoking tasks.
control = Control(app)

# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
Пример #22
0
            try:
                client = pymongo.MongoClient(port=self.mongod_port)
                client.server_info()
                break
            except:
                print("Mongo d not online yet")


# mm = MongoManager()
# mm.start()
# mm.wait_for_mongo_online()

print("Everythinh started. Waiting for task finishing")
ans = tasks.hello.delay()
import time
print("Waiting 5 seconds before revoking")
time.sleep(5)

celery_control = Control(tasks.app)
celery_control.revoke(ans.id, terminate=True, signal=signal.SIGHUP)
revoke(ans.id, terminate=True, signal=signal.SIGHUP)
tasks.app.control.revoke(ans.id, terminate=True, signal=signal.SIGHUP)
ans.revoke(terminate=True, signal=signal.SIGHUP)
tasks.app.control.revoke(ans.id, terminate=True, signal='SIGKILL')
AsyncResult(ans.id, app=tasks.app).revoke(terminate=True, signal='SIGKILL')
ans.revoke(terminate=True, signal='SIGKILL')
print("Revoked function")
print(ans.get())

# mm.kill()
Пример #23
0
RESULTS_BACKEND_HOST_IP = os.getenv(POSTGRES_HOST_IP_ENV, '172.17.0.2')
RESULTS_BACKEND_HOST_PORT = os.getenv(POSTGRES_HOST_PORT_ENV,
                                      DEFAULT_POSTGRES_PORT)
RESULTS_BACKEND_USERNAME = os.getenv(POSTGRES_USERNAME_ENV, 'postgres')
RESULTS_BACKEND_PASSWORD = os.getenv(POSTGRES_PASSWORD_ENV, 'postgres')

broker_url = 'amqp://{0}:{1}@{2}:{3}//'.format(MESSAGE_BROKER_USERNAME,
                                               MESSAGE_BROKER_PASSWORD,
                                               MESSAGE_BROKER_HOST_IP,
                                               MESSAGE_BROKER_HOST_PORT)
backend_url = 'db+postgresql://{0}:{1}@{2}:{3}/{4}'.format(
    RESULTS_BACKEND_USERNAME, RESULTS_BACKEND_PASSWORD,
    RESULTS_BACKEND_HOST_IP, RESULTS_BACKEND_HOST_PORT, POSTGRES_RESULTS_DB)

app = Celery(broker=broker_url, backend=backend_url)
remote_control = Control(app=app)

tasks_queue = Queue(name=MINION_TASKS_QUEUE,
                    exchange=Exchange(''),
                    routing_key=MINION_TASKS_QUEUE,
                    no_declare=True)


def send_task_message(message, producer=None):
    with app.producer_or_acquire(producer) as producer:
        producer.publish(
            message,
            serializer='json',
            exchange=tasks_queue.exchange,
            routing_key=tasks_queue.routing_key,
            declare=[tasks_queue],
Пример #24
0
def tasksrevoke(task_id):
    inspecter = Control(celery)
    inspecter.revoke(task_id, terminate=True)
    return jsonify({}), 202, {'terminated':True}
Пример #25
0
from celery.result import AsyncResult

# 获取app队列信息
queue_info = app.connection().channel().queue_declare('proj', passive=True)
print('message count:', queue_info.message_count)
# 清空队列
app.connection().channel().queue_purge('proj')

result = add.delay(4, 4)
# 获取task id
print("task id: ", str(result.id))
# 获取task对象
task = AsyncResult(str(result.id))
# 获取task状态,进入开始执行状态
time.sleep(1)
print("task status: ", task.status)

celery_control = Control(app=app)
# 属于强制终止,后台会有报错信息
celery_control.revoke(str(result.id), terminate=True, signal='SIGKILL')
# 进入任务撤销状态
time.sleep(1)
print("task done: ", task.status)

# # 同步阻塞等待结果
# print("result: ", result.get(timeout=1))

# 参数签名
s1 = mul.s(2, 2)
res = s1.delay()
print("signature result: ", res.get())