Exemple #1
0
def queue_poll (request):
    task_list = []
    if request.is_ajax():
        # Query active tasks:
        active_tasks = inspect().active() or inspect().scheduled()
        print active_tasks
        for t in active_tasks['celery@solus']:  # TODO: get queue name dynamically.
            task_id = t.get('id')
            task = AsyncResult(task_id)

            # Associate ID to task_state so the client end of the polling
            # mechanism can identify task one by one, like this:
            # [{task_stateN('task_idN')}]
            state = task.result or task.state
            if active_tasks:
                state.update({'id': task_id})
            task_list.append(state)
        print 'TASK_LIST'
        print task_list
    else:
        task_list = 'Not an AJAXed task list.'

    json_data = json.dumps(task_list, ensure_ascii=False, encoding='utf-8')
    print '\nJSON_DATA'
    print json_data
    print '\n'
    return HttpResponse(json_data, content_type='application/json')

	
Exemple #2
0
def check_broker_status():
    sleep_time = 5
    total_wait_time = 15
    total_tries = total_wait_time / sleep_time
    current_try = 0

    logging.info("About to check broker at: {0}".format(celery.current_app.conf['BROKER_URL']))

    while True:
        try:
            inspect().stats()

        except IOError as e:
            current_try += 1

            logging.info("Broker down, try: {0}, exception: {1}".format(current_try, e))
            if current_try >= total_tries:
                logging.info("Broker unreachable for {0} seconds.".format(total_wait_time))
                return False, e, traceback.format_exc()

            time.sleep(sleep_time)
            continue

        logging.info("Broker {0} up!".format(celery.current_app.conf['BROKER_URL']))
        break

    return True, None, None
def health_check_celery():
    """
    Check health status of celery and redis broker
    :return:
    """
    try:
        d = inspect().stats()
        if not d:
            sentry.captureMessage('No running Celery workers were found.')
            return False, 'No running Celery workers were found.'
    except ConnectionError as e:
        sentry.captureException()
        return False, 'cannot connect to redis server'
    except IOError as e:
        msg = "Error connecting to the backend: " + str(e)
        if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED':
            msg += ' Check that the Redis server is running.'
        sentry.captureException()
        return False, msg
    except ImportError as e:
        sentry.catureException()
        return False, str(e)
    except Exception:
        sentry.captureException()
        return False, 'celery not ok'
    return True, 'celery ok'
Exemple #4
0
def update_tasks():
    ''' Enable the use of memcache to save tasks and queues when possible '''
    i = inspect()
    if memcache:
        mc = memcache.Client(['127.0.0.1:11211'])
        tasks = 'REGISTERED_TASKS'
        queues = 'AVAILABLE_QUEUES'
        REGISTERED_TASKS = mc.get(tasks)
        AVAILABLE_QUEUES = mc.get(queues)
        if not REGISTERED_TASKS:
            REGISTERED_TASKS = set()
            for item in i.registered().values():
                REGISTERED_TASKS.update(item)
            mc.set(tasks, REGISTERED_TASKS, 10)
            REGISTERED_TASKS = mc.get('REGISTERED_TASKS')
        if not AVAILABLE_QUEUES:
            mc.set(queues, set([ item[0]['exchange']['name'] for item in i.active_queues().values() ]), 10)
            AVAILABLE_QUEUES = mc.get('AVAILABLE_QUEUES')
    else:
        REGISTERED_TASKS = set()
        for item in i.registered().values():
            REGISTERED_TASKS.update(item)

        AVAILABLE_QUEUES = set([ item[0]['exchange']['name'] for item in i.active_queues().values() ])
    return (REGISTERED_TASKS,AVAILABLE_QUEUES)
def get_worker_status():
    status_message = None
    try:
        status = WORKER_READY

        from celery.task.control import inspect
        insp = inspect()

        if insp.active():
            status = WORKER_READY
        else:
            status = WORKER_OFFLINE
            status_message = "No running Celery workers were found."
    except IOError as e:
        from errno import errorcode
        status_message = "Error connecting to the backend: " + str(e)
        if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED':
            status_message += ' Check that the RabbitMQ server is running.'
        status = WORKER_OFFLINE
    except ImportError as e:
        status = WORKER_ERROR
        status_message = e.message

    d = {
        'status_code': status,
        'status': get_worker_status_display(status)
    }
    if status_message:
        d['status_message'] = status_message
    return d
Exemple #6
0
def get_task_running_count(task):
    """ Determine number of running tasks matching task name

        :param task: celery Task object or Name of task string

        :returns: number of instances running
    """
    inspector = inspect()
    active = inspector.active()

    if isinstance(task, basestring):
        task_name = task
    else:
        task_name = task.name

    found = 0
    for host_tasks in active.values():
        for cur_task in host_tasks:
            ixiacrlogger.debug('Found running task: {0}'.
                             format(cur_task['name']))
            if cur_task['name'] == task_name:
                found += 1

    ixiacrlogger.debug('Found {0} matching tasks running for task name={1}'.
                     format(found, task_name))
    return found
Exemple #7
0
    def test_prepare_reply(self):
        self.assertDictEqual(
            self.i._prepare([{"w1": {"ok": 1}}, {"w2": {"ok": 1}}]), {"w1": {"ok": 1}, "w2": {"ok": 1}}
        )

        i = control.inspect(destination="w1")
        self.assertEqual(i._prepare([{"w1": {"ok": 1}}]), {"ok": 1})
def inspect_workers():
    """Display information about workers and queues"""
    
    i = inspect()
    
    print i.scheduled()
    print i.active()
Exemple #9
0
    def run(self, *args, **kwargs):
        self.quiet = kwargs.get("quiet", False)
        if not args:
            raise Error("Missing inspect command. See --help")
        command = args[0]
        if command not in self.choices:
            raise Error("Unknown inspect command: %s" % command)
        from celery.task.control import inspect

        destination = kwargs.get("destination")
        timeout = kwargs.get("timeout") or self.choices[command]
        if destination and isinstance(destination, basestring):
            destination = map(str.strip, destination.split(","))

        def on_reply(message_data):
            node = message_data.keys()[0]
            reply = message_data[node]
            status, preply = prettify(reply)
            self.say("->", t.cyan(node, ": ") + t.reset() + status,
                    indent(preply))

        self.say("<-", command)
        i = inspect(destination=destination,
                    timeout=timeout,
                    callback=on_reply)
        replies = getattr(i, command)()
        if not replies:
            raise Error("No nodes replied within time constraint.")
        return replies
    def is_waiting_to_run(self):
        if self.status != self.WAITING:
            LOG.info("Migration: {} has already run!".format(self))
            return False

        inspect = control.inspect()
        scheduled_tasks = inspect.scheduled()
        try:
            hosts = scheduled_tasks.keys()
        except Exception as e:
            LOG.info("Could not retrieve celery scheduled tasks: {}".format(e))
            return False

        for host in hosts:
            try:
                scheduled_tasks = scheduled_tasks[host]
            except TypeError:
                LOG.warn("There are no scheduled tasks")
                LOG.info(scheduled_tasks)
                continue

            for task in scheduled_tasks:
                if task['request']['id'] == self.celery_task_id:
                    return True

        return False
Exemple #11
0
def get_task_status(system_id, system_ip, task_list):
    """
    Check if there is any task within the 'task_list' running or pending for the given system.
    
    The format of the list of tasks to check is the following:
    {
        <Name of the task>: {'task': <name of the celery task>, 'process': <name of the process>, 'param_value': <task condition>, 'param_argnum': <position of the condition>}
    }   
    
    Args:
        system_id (str) : The system_id where you want to check if it's running
        system_ip (str) : The system_ip where you want to check if it's running
        task_list (dict): The list of task to check.
    
    Returns:
        success (bool) : True if successful, False otherwise
        result (dict)  : Dic with the status and the job id for each task.

    """
    result = {}
    
    try:
        i = inspect()
        # Retrieve the list of active tasks.
        running_tasks = i.active().values()
        # Retrieve the list of pending tasks.
        pending_tasks = i.scheduled().values() + i.reserved().values()
    except Exception, e:
        logger.error("[celery.utils.get_task_status]: An error occurred: %s" % (str(e)))
        return False, {}
Exemple #12
0
 def run(self, *args, **kwargs):
     replies = inspect().run("ping", quiet=True)
     if not replies:
         raise Error("No nodes replied within time constraint")
     nodecount = len(replies)
     print("\n%s %s online." % (nodecount,
                                nodecount > 1 and "nodes" or "node"))
Exemple #13
0
def ajax_get_queue():
    labels = {
        'platform_flask.backend_git.git_clone_task': 'Cloning git repository',
        'platform_flask.platform.python.create_platform_python27': 'Creating new Python 2.7 platform',
        'platform_flask.platform.python.create_platform_python34': 'Creating new Python 3.4 platform',
    }
    celery_inspector = inspect()
    active = celery_inspector.active()
    if active is None:
        return jsonify(error="No celery workers active")
    response = []
    for host in active:
        for message in active[host]:
            message_id = message['id']
            name = message['name']
            label = labels[name]
            args = message['args']
            response.append({
                'id': message_id,
                'name': name,
                'args': args,
                'label': label,
                'host': host
            })
    return jsonify(response=response)
Exemple #14
0
def node_active_list(request):
    check_permissions(request.user, [PERMISSION_OCR_DOCUMENT])

    i = inspect()
    active_tasks = []
    try:
        active_nodes = i.active()
        if active_nodes:
            for node, tasks in active_nodes.items():
                for task in tasks:
                    task_info = {
                        'node': node,
                        'task_name': task['name'],
                        'task_id': task['id'],
                        'related_object': None,
                    }
                    if task['name'] == u'ocr.tasks.task_process_queue_document':
                        task_info['related_object'] = QueueDocument.objects.get(pk=eval(task['args'])[0]).document
                    active_tasks.append(task_info)
    except socket.error:
        active_tasks = []

    return render_to_response('generic_list.html', {
        'object_list': active_tasks,
        'title': _(u'active tasks'),
        'hide_links': True,
        'hide_object': True,
        'extra_columns': [
            {'name': _(u'node'), 'attribute': 'node'},
            {'name': _(u'task id'), 'attribute': 'task_id'},
            {'name': _(u'task name'), 'attribute': 'task_name'},
            {'name': _(u'related object'), 'attribute': lambda x: display_link(x['related_object']) if x['related_object'] else u''}
        ],
    }, context_instance=RequestContext(request))
    def get(self, request, nnid, ver):
        """
        We can execute whole process from data extraction > data preprocessing > train model > eval
        Process of execute single graph flow is like bellow
        (1) Set Network Id  \n
        (2) Set Version Id  \n
        (3) Set set graph flow  \n
        (4) Set each nodes params on graph  \n
        (5) Run graph flow of certain version defined on (2)   <-- here .. this step    \n
        (6) Service output model    \n
        ---
        # Class Name : RunManagerTrainRequest

        # Description:
            get status of process (scheduled, active, reserved, done.. )
        """
        try:
            return_data = {}
            return_data['scheduled'] = []
            return_data['active'] = []
            return_data['reserved'] = []

            i = inspect()

            for req in i.active()[list(i.scheduled().keys())[0]]:
                return_data['scheduled'].append(req.get('args'))
            for req in i.active()[list(i.active().keys())[0]]:
                return_data['active'].append(req.get('args'))
            for req in i.active()[list(i.reserved().keys())[0]]:
                return_data['reserved'].append(req.get('args'))
            return Response(json.dumps(return_data))
        except Exception as e:
            return_data = {"status": "404", "result": str(e)}
            return Response(json.dumps(return_data))
Exemple #16
0
 def get_celery_tasks(self, active=False):
     items = []
     try:
         i = inspect()
         info = i.active() if active else i.scheduled()
         if info:
             # iterate queues
             for i, queue in enumerate(info):
                 tasks = []
                 # iterate tasks in queues
                 for task in info.get(queue):
                     tasks.append(dict({"queue": queue,
                                        "name": task['name'],
                                        "id": task['id'],
                                        "args": task['args']}))
                 if tasks:
                     items.append(tasks)
         else:
             raise Exception('Unable to return task queue information')
     except Exception, e:
         self.messages.append({
             'is_error': True,
             'header': 'Failed',
             'content': str(e)
         })
         return {'result': 'FAILURE', 'messages': self.messages}
Exemple #17
0
def is_task_in_celery(task_id):
    """Look whether a task is scheduled, reserved or active
    Args:
        The task id
    Returns the task dictionary or None
    """
    task = None
    try:
        i = inspect()
        task_list = {}
        active = i.active().copy()
        scheduled = i.scheduled().copy()
        reserved = i.reserved().copy()
        for node, tasks in active.iteritems():
            for task in tasks:
                task_list[str(task['id'])] = task
        for node, tasks in reserved.iteritems():
            for task in tasks:
                task_list[str(task['id'])] = task
        for node, tasks in scheduled.iteritems():
            for task in tasks:
                task_list[str(task['id'])] = task
        if task_id in task_list:
            found = task_list[task_id].copy()
            return found

    except Exception as exp:
        api_log.error("[is_task_in_celery] An error occurred while reading the task list")
    return task
def inspect_workers(workers):
    i = control.inspect(workers)
    return {
        'registered_tasks':i.registered_tasks(),
        'active':i.active(),
        'scheduled':i.scheduled(),
        'reserved':i.reserved()}
 def check_rabbitmq_ports(result_handler):
     """
     Checks all ports of Open vStorage components rabbitMQ and celery
     :param result_handler: logging object
     :type result_handler: ovs.extensions.healthcheck.result.HCResults
     :return: None
     :rtype: NoneType
     """
     # Check Celery and RabbitMQ
     if OpenvStorageHealthCheck.LOCAL_SR.node_type != 'MASTER':
         result_handler.skip('RabbitMQ is not running/active on this server!')
         return
     result_handler.info('Checking Celery.', add_to_result=False)
     from errno import errorcode
     try:
         # noinspection PyUnresolvedReferences
         from celery.task.control import inspect
         stats = inspect().stats()
         if stats:
             result_handler.success('Successfully connected to Celery on all nodes.', code=ErrorCodes.port_celery)
         else:
             result_handler.failure('No running Celery workers were found.', code=ErrorCodes.port_celery)
     except IOError as ex:
         msg = 'Could not connect to Celery. Got {0}.'.format(ex)
         if len(ex.args) > 0 and errorcode.get(ex.args[0]) == 'ECONNREFUSED':
             msg += ' Check that the RabbitMQ server is running.'
             result_handler.failure(msg, code=ErrorCodes.port_celery)
     except ImportError as ex:
         result_handler.failure('Could not import the celery module. Got {}'.format(str(ex)), code=ErrorCodes.port_celery)
Exemple #20
0
 def getworkerNodes(self):
     i = inspect()
     temp = i.stats()
     workernodes = []
     for k,v in temp.iteritems():
         workernodes.append(k)
     return workernodes
    def check_celery_backend(self):
        """Checks if Celery backend is running and configured properly."""

        print "Checking Celery Backend......",
        if 'celeryd' not in commands.getoutput('ps -ef'):
            self._set_status(0, "[%s]Error: celery is not running" % self.NAME)
            return True

        if not os.path.exists('/etc/compass/celeryconfig'):
            self._set_status(
                0,
                "[%s]Error: No celery config file found for Compass"
                % self.NAME)
            return True

        try:
            insp = inspect()
            celery_stats = inspect.stats(insp)
            print celery_stats,
        except IOError as error:
            self._set_status(
                0,
                "[%s]Error: Failed to connect to the backend: %s"
                % (self.NAME, str(error)))
            from errno import errorcode
            if (
                len(error.args) > 0 and
                errorcode.get(error.args[0]) == 'ECONNREFUSED'
            ):
                self.messages.append(
                    "[%s]Error: RabbitMQ server isn't running"
                    % self.NAME)
        return True
Exemple #22
0
 def setUp(self):
     """ 
     Check for at least one active Celery worker else skip the
     test.
     @param self Object reference.
     """
     self.__inspection = inspect()
     try:
         active_nodes = self.__inspection.active()
     except Exception: # pylint:disable = W0703
         unittest.TestCase.skipTest(self, 
                                    "Skip - RabbitMQ seems to be down")
     if (active_nodes == None):
         unittest.TestCase.skipTest(self, 
                                    "Skip - No active Celery workers")
     # Get the current MAUS version.
     configuration  = Configuration()
     self.config_doc = configuration.getConfigJSON()
     config_dictionary = json.loads(self.config_doc)
     self.__version = config_dictionary["maus_version"]
     # Reset the worker. Invoke twice in case the first attempt
     # fails due to mess left by previous test.
     self.reset_worker()
     self.reset_worker()
     if maus_cpp.globals.has_instance():
         maus_cpp.globals.death()
     maus_cpp.globals.birth(self.config_doc)
def startRequest(req):
	if 'connId' in req.GET.keys() and 'timeOut' in req.GET.keys() and len(req.GET) == 2:
		
		flag = True
		i = inspect()
		hostname = str(i.active().keys()[0])	
		for conn in i.active()[hostname]:
			if req.GET['connId'] == str(conn['id']):
				flag = False
				break

		if flag == True:		
			try:
				currentTimeStamp = datetime.datetime.now()
				endingTimeStamp = currentTimeStamp + datetime.timedelta(seconds = int(req.GET['timeOut']))
				res = test.apply_async(args = [str(endingTimeStamp)] , task_id = req.GET['connId'])
				try:
					res.get(timeout = int(req.GET['timeOut']))
					return JsonResponse({'status' : 'OK'})
				except TimeoutError:
					revoke(res.id, terminate=True)
					return JsonResponse({'status' : 'OK'})	
			except Exception:
					return JsonResponse({'Exception' : 'The request was either hard killed or was already processed'})	
		
		else:
			return JsonResponse({'Exception' : 'Request is already running'})

	else:
		return JsonResponse({'Exception' : 'Invalid API call'})
Exemple #24
0
def traffic_lines_geojson_async(request):
    
    deprecate_date = timedelta (minutes = 10)
    
    try:
        last = Traffic.objects.all().latest()
        
        dt = last.datetime.replace(tzinfo=None) # Remove time zone
        # Celery task inspector
        celery_inspector = inspect()
        workers = celery_inspector.active()
        name, value = workers.popitem()
    
        if not last: # Empty Cache
            running_task = process_traffic.apply_async()
            logging.info('runing task: ' + running_task.id)
            
        elif ((datetime.datetime.utcnow() - dt) > deprecate_date) and not value: # Deprecated Cache
            running_task = process_traffic.apply_async()
            logging.info('runing task: ' + running_task.id)
    
    except ObjectDoesNotExist:
        # Get traffic on demand
        traffic_job.get_trafic()
    
    result_all = client.get_bcn_traffic_current_geojson_async()
    
    st = 200
    mimetype = 'application/json'
    return HttpResponse(json.dumps(result_all), mimetype, st)
Exemple #25
0
def alienvault_reconfig(system_ip,operation,jobid):
    current_job_id = None
    is_finished = False
    job_status = None
    job_data = None
    jobs_active = None
    job = None
    msg = ""

    if operation == "start":
        print "Starting a new job..."
        job = alienvault_reconfigure.delay(system_ip)
        msg ="Job launched!"
    elif operation == "status":
        print "Status..."
        job = AsyncResult(jobid,backend=alienvault_reconfigure.backend)
    elif operation == "list":
        i = inspect()
        jobs_active = i.active()
    else:
        print "operation (%s) not allowed!!" % operation
    if job:
        current_job_id = job.id
        job_data = job.info
        job_status = job.status

    return make_ok(job_id=current_job_id, finished=is_finished, status=job_status, task_data=job_data,
                   active_jobs=jobs_active, message=msg)
def killRequest(req):
	if req.method == 'GET':
		pid = req.GET['connId']
	elif req.method == 'PUT':
		data = json.loads(req.body)
		pid = str(data['connId'])
	
	flag = True
	i = inspect()
	hostname = str(i.active().keys()[0])	
	for conn in i.active()[hostname]:
		if pid == str(conn['id']):
			flag = False
			break

			
	reqResult = AsyncResult(pid)
	if reqResult.result != 'OK' and str(reqResult.result) != 'revoked' and str(reqResult.result) != 'terminated':
		if flag == False:
			revoke(pid, terminate=True)
			return JsonResponse({'status' : 'Killed'})
		else:	
			return JsonResponse({'Exception' : 'Invalid connectionID : ' + pid})
	else:
		if reqResult.result == 'OK':
			return JsonResponse({'Exception' : 'Task was already completed'})
		else:
			return JsonResponse({'Exception' : 'Task was already revoked'})		
 def can_run():
     global reason
     """
     Checks whether a task is running/scheduled/reserved.
     The check is executed in stages, as querying the inspector is a slow call.
     """
     if tasknames:
         inspector = inspect()
         active = inspector.active()
         if active:
             for taskname in tasknames:
                 for worker in active.values():
                     for task in worker:
                         if task['id'] != task_id and taskname == task['name']:
                             reason = 'active'
                             return False
         scheduled = inspector.scheduled()
         if scheduled:
             for taskname in tasknames:
                 for worker in scheduled.values():
                     for task in worker:
                         request = task['request']
                         if request['id'] != task_id and taskname == request['name']:
                             reason = 'scheduled'
                             return False
         reserved = inspector.reserved()
         if reserved:
             for taskname in tasknames:
                 for worker in reserved.values():
                     for task in worker:
                         if task['id'] != task_id and taskname == task['name']:
                             reason = 'reserved'
                             return False
     return True
Exemple #28
0
 def info(self):
     ins = inspect()
     return {
         'workers': ins.ping(),
         'active': ins.active(),
         'reserved': ins.reserved(),
         'stats': ins.stats()
     }
Exemple #29
0
    def test_prepare_reply(self):
        self.assertDictEqual(self.i._prepare([{"w1": {"ok": 1}},
                                              {"w2": {"ok": 1}}]),
                             {"w1": {"ok": 1}, "w2": {"ok": 1}})

        i = control.inspect(destination="w1")
        self.assertEqual(i._prepare([{"w1": {"ok": 1}}]),
                         {"ok": 1})
Exemple #30
0
def exist_task_running(task_type, current_task_request, param_to_compare=None, argnum=0):
    """Check if there is any task of type 'task_type' running for the given system. 
       If the param_to_compare is None, returns only if there is a task of type <task_type> running. 
       In order to find a task running in a <param_to_compare>, you should specify, which argument of the
       task represent the <param_to_compare>, for example:
       If the task was launched by running: alienvualt_reconfigure("192.168.5.134") -> args[0] will be 
       the system ip, so you should specify argnum=0

       Args:
         task_type (str): The kind of task to look for (usually the method name)
         current_task_request(): The current task request (current_task.request from the caller)
         param_to_compare (str or None): Parameter to compare whithin the task, for example the system ip or the system id. 
         argnum (int): The argument number where we can find the system ip if needed.
       Returns:
         rt (True or False): True when a task matching the given criteria is running, false otherwise.
    """
    rt = True
    try:
        # Get the current task_id
        current_task_id= current_task_request.id #alienvault_reconfigure.request.id
        i = inspect()
        current_task_start_time = time.time()
        task_list = []
        # Retrieve the list of active tasks. 
        active_taks = i.active()
        for node, tasks_list in active_taks.iteritems():
            for task in tasks_list:
                # Is this task of the given type?
                if task['id'] == current_task_id:
                    current_task_start_time = float(task['time_start'])
                if task['name'].find(task_type) > 0:
                    task_list.append(task)

        previous_task_running = False
        for task in task_list:
            #1 - Is my own task?
            if task['id'] == current_task_id:
                continue
            task_start_time = task['time_start']
            #2 - if not, Does the task started before the current one?
            started_before_the_current_one = task_start_time!=current_task_start_time and task_start_time < current_task_start_time
            if started_before_the_current_one and param_to_compare is None: #An existing task is running
                previous_task_running  = True
                break
                 
            #3 - Does the task running in the same system?
            task_param_value = ast.literal_eval(task['args'])[argnum]
            if str(task_param_value) == str(param_to_compare) and started_before_the_current_one:
                previous_task_running = True
                break


        if previous_task_running:
            logger.info("A %s is running....waiting [%s]" % (task_type,current_task_id))

    except Exception, e:
        logger.error("An error occurred %s" % (str(e)))
        return True
Exemple #31
0
def jobs_running():
    i = inspect()

    try:
        active = sum(len(x) for x in i.active().values())
    except AttributeError:
        active = 0

    return active
def dataden(self):
    """
    run dataden jar via command line, in a single task and make sure
    we get a lock so it would be impossible to run two dataden
    instances at the same time.

    :return:
    """

    # create anonymous functions basically for getting and relinquishing a lock
    acquire_lock = lambda: cache.add(TASK_LOCK_DATADEN, 'true', LOCK_EXPIRE)
    release_lock = lambda: cache.delete(TASK_LOCK_DATADEN)

    if acquire_lock(
    ):  # we want a lock on the thing that fires the task so we never duplicated
        try:
            i = inspect()
            active_tasks = i.active()
            # {'celery@vagrant-ubuntu-trusty-64':
            #      [
            #        {'worker_pid': 17882,
            #        'args': '[]',
            #        'kwargs': '{}',
            #        'time_start': 1575959.732842815,
            #        'acknowledged': True,
            #        'delivery_info': {'priority': 0,
            #         'exchange': 'celery',
            #         'redelivered': None,
            #         'routing_key': 'celery'},
            #        'name': 'dataden.tasks.dataden_runner',
            #        'hostname': 'celery@vagrant-ubuntu-trusty-64',
            #        'id': '9eb99e90-d862-4f2c-87de-27893ede9713'
            #         }
            #      ]
            # }
            found = False
            for worker, running in active_tasks.items():
                for t in running:
                    if t.get(
                            'name'
                    ) == dataden_runner.name:  # compare with the name of the task
                        found = True
                        break
                if found:
                    break  # get out of the outter loop too, if it was found
            #
            # only fire it if it was not running
            if not found:
                # START DATADEN PROCESS
                dataden_runner.apply_async(queue='q_dataden')
            else:
                print('... monitoring dataden: its running.')

        finally:
            release_lock()
    else:
        pass  # lock couldnt be had,
Exemple #33
0
 def run(self, *args, **kwargs):
     replies = inspect(no_color=kwargs.get("no_color", False)) \
                         .run("ping", **dict(kwargs, quiet=True))
     if not replies:
         raise Error("No nodes replied within time constraint")
     nodecount = len(replies)
     if not kwargs.get("quiet", False):
         self.out("\n%s %s online." %
                  (nodecount, nodecount > 1 and "nodes" or "node"))
Exemple #34
0
def list_active_task():
    i = inspect()
    active = i.active()
    if active is not None:
        for v in active.values():
            for t in v:
                r = BaseAsyncResult(t['id'])
                r.task_name = t['name']
                tasks_info.append({'result': r})
Exemple #35
0
def is_harvest_running(id, task_id):
    """Check harvest running."""
    actives = inspect().active()
    for worker in actives:
        for task in actives[worker]:
            if task['name'] == 'invenio_oaiharvester.tasks.run_harvesting':
                if task['args'][0] == str(id) and task['id'] != task_id:
                    return True
    return False
Exemple #36
0
 def get(self, request):
     i_obj = inspect()
     try:
         registered_tasks_list = list(
             set(chain.from_iterable(i_obj.registered_tasks().values())))
     except Exception as e:
         registered_tasks_list = []
         wslog_error().error("通过 api 接口获取 注册的task信息失败,错误信息: %s" % (e.args))
     return JsonResponse(registered_tasks_list, safe=False)
Exemple #37
0
def celery_worker_check():
    try:
        insp = inspect()
        d = insp.stats()
        if not d:
            raise Exception('No running celery workers were found')
    except Exception as e:
        return {'worker': {'ok': False}}
    return {'worker': {'ok': True}}
def get_all_values_in_celery():
    insp = inspect()
    active = None
    scheduled = None
    while active is None:
        active = insp.active()
    while scheduled is None:
        scheduled = insp.scheduled()
    return active.values() + scheduled.values()
Exemple #39
0
def celery_has_workers():
    """
    The ``stats()`` call will return different stats/metadata information about
    celery worker(s).  An empty/None result will mean that there aren't any
    celery workers in use.
    """
    stats = inspect().stats()
    if not stats:
        raise SystemCheckError('No running Celery worker was found')
Exemple #40
0
def celery_has_workers():
    """
    The ``stats()`` call will return different stats/metadata information about
    celery worker(s).  An empty/None result will mean that there aren't any
    celery workers in use.
    """
    stats = inspect().stats()
    if not stats:
        raise SystemCheckError('No running Celery worker were found')
Exemple #41
0
def update_job_json(all_tasks, loc_user):
    completed = {}
    completed2 = {}
    jobnames = []
    jobstatus = []
    jobdate = []
    jobelapsed = []
    today = datetime.datetime.now()
    for t in all_tasks:
        if t.find(loc_user) > -1:
            t = t.replace("celery-task-meta-", '')
            stime = t[t.find('{') + 1:-1]
            cc = AsyncResult(t)
            completed[t] = cc.status
            jobnames.append(t)
            jobstatus.append(cc.status)
            jobdate.append(stime)
            ftime = datetime.datetime.strptime(stime, '%a %b %d %H:%M:%S %Y')
            jobelapsed.append((today - ftime).total_seconds())
    i = inspect()
    aa = i.active()
    running = {}
    aa = aa[aa.keys()[0]]
    if len(aa) > 0:
        for i in range(len(aa)):
            t = aa[i]['id']
            stime = t[t.find('{') + 1:-1]

            cc = AsyncResult(t)
            running[t] = cc.status
            jobnames.append(t)
            jobstatus.append(cc.status)
            jobdate.append(stime)
            ftime = datetime.datetime.strptime(stime, '%a %b %d %H:%M:%S %Y')
            jobelapsed.append((today - ftime).total_seconds())
    jobnames = numpy.array(jobnames)
    jobstatus = numpy.array(jobstatus)
    jobdate = numpy.array(jobdate)
    jobelapsed = numpy.array(jobelapsed)
    sort_time = numpy.argsort(jobelapsed)
    completed2['job'] = jobnames
    completed2['status'] = jobstatus
    username = loc_user
    user_folder = os.path.join(Settings.UPLOADS, loc_user) + '/'
    myjobs = user_folder + 'jobs.json'
    with open(myjobs, "w") as outfile:
        json.dump([
            dict(job=jobnames[sort_time[i]],
                 status=jobstatus[sort_time[i]],
                 time=jobdate[sort_time[i]],
                 elapsed=humantime(jobelapsed[sort_time[i]]))
            for i in range(len(jobnames))
        ],
                  outfile,
                  indent=4)
    return username, running, completed, completed2
Exemple #42
0
    def get_task_list(cls):
        from celery.task.control import inspect
        from itertools import chain

        if cls._task_list is None:
            i = inspect()
            i.registered_tasks()
            cls._task_list = set(
                chain.from_iterable(i.registered_tasks().values()))
        return cls._task_list
Exemple #43
0
 def _is_update_in_progress(cls, device_pk):
     active = inspect().active()
     if not active:
         return False
     # check if there's any other running task before adding it
     for task_list in active.values():
         for task in task_list:
             if task['name'] == _TASK_NAME and str(device_pk) in task['args']:
                 return True
     return False
Exemple #44
0
def check_basicbot2_running(review_id):
    """ determine if basicbot2 is already running for the specified review """
    i = inspect()
    active_tasks = i.active()
    if active_tasks:
        for task in active_tasks[config.CELERY_WORKER_ADDR]:
            if task['name'] == 'bot.basicbot2':
                if 'review_id' in task['kwargs'] and str(review_id) in task['kwargs']:
                    return True
    return False
Exemple #45
0
def get_running_tasks(system_ip):
    try:
        i = inspect()
        tasks = i.active()
    except Exception as e:
        error_msg = "[celery.utils.get_running_tasks]: " + \
                    "An error occurred: %s" % (str(e))
        logger.error(error_msg)
        return False, {}
    return (True, tasks)
Exemple #46
0
def check_celery_tasks(QUEUE_NAME):
    """
    Checks for scheduled tasks in queue.
    """
    inspector = inspect([QUEUE_NAME])
    scheduled_tasks = inspector.scheduled()
    if not scheduled_tasks:
        return True
    else:
        return False
Exemple #47
0
def get_celery_jobs(job_name):
    jobs = []
    active_celery_tasks = inspect().active()
    if active_celery_tasks:
        for k in active_celery_tasks.keys():
            host_tasks = active_celery_tasks[k]
            for host_task in host_tasks:
                if host_task.get('name') == job_name:
                    jobs.append(host_task.get('id'))
    return jobs
Exemple #48
0
 def list(self):
     """
     Overview of active, scheduled, reserved and revoked tasks
     """
     inspector = inspect()
     data = {'active'   : inspector.active(),
             'scheduled': inspector.scheduled(),
             'reserved' : inspector.reserved(),
             'revoked'  : inspector.revoked()}
     return Response(data, status=status.HTTP_200_OK)
Exemple #49
0
 def test_post_triggers_generate_medications_task(self):
     """
     Ensure POSTing correct csv file triggers generate_medications task.
     This test has a workaround due to I was unable to find a easier way
     to check if the pertinent task was triggerd. First I check in the
     celery queue the task that it is supossed to be triggered and get
     how many times it has been run. After making a request to a view
     that triggers the task I search again for the total of runs of this
     task. If every works as it should there should be 1 run more.
     TODO: Search a better way to do this.
     """
     pre_celery_stats = inspect().stats()
     pre_celery_key = [key for key in pre_celery_stats.keys()][0]
     pre_task_count = pre_celery_stats.get(
         pre_celery_key
     ).get(
         'total'
     ).get(
         'medications.tasks.generate_medications'
     )
     with open('temporal.csv', 'w+', newline='') as csv_file:
         filewriter = csv.writer(
             csv_file,
             delimiter=',',
             quotechar='|',
             quoting=csv.QUOTE_MINIMAL,
         )
         filewriter.writerow(field_rows)
         csv_file.seek(0)
         auth = self.header_prefix + self.token
         response = self.factory.post(
             self.path, {'csv_file': csv_file}, HTTP_AUTHORIZATION=auth
         )
         post_celery_stats = inspect().stats()
         post_celery_key = [key for key in post_celery_stats.keys()][0]
         post_task_count = post_celery_stats.get(
             post_celery_key
         ).get(
             'total'
         ).get(
             'medications.tasks.generate_medications'
         )
         assert pre_task_count + 1 == post_task_count
Exemple #50
0
 def info(self):
     """
 List task queues, as reported by Celery.
 """
     i = inspect()
     return jsonify({
         "scheduled": i.scheduled(),
         "active": i.active(),
         "queued": i.reserved(),
     })
    def get(self):

        res = inspect()
        rdata = {
            'active': res.active(),
            'scheduled': res.scheduled(),
            'reserved': res.reserved(),
            'stats': res.stats()
        }
        return rdata
Exemple #52
0
def create_records(records):
    """Records creation and indexing."""
    n_updated = 0
    n_created = 0
    uuids = []
    for record in records:
        record['$schema'] = \
            'https://ils.rero.ch/schema/documents/document-minimal-v0.0.1.json'

        # check if already harvested
        pid = None
        for identifier in record.get('identifiedBy'):
            if identifier.get('source') == 'cantook':
                harvested_id = identifier.get('value')
                query = DocumentsSearch().filter(
                    'term',
                    identifiedBy__value=harvested_id
                ).source(includes=['pid'])
                try:
                    pid = [r.pid for r in query.scan()].pop()
                except IndexError:
                    pid = None
        if pid:
            # update the record
            existing_record = Document.get_record_by_pid(pid)
            existing_record.clear()
            existing_record['pid'] = pid
            existing_record.update(
                record,
                dbcommit=True,
                reindex=False
            )
            n_updated += 1
            uuids.append(existing_record.id)
        else:
            # create a new record
            new_record = Document.create(
                record,
                dbcommit=True,
                reindex=False
            )
            n_created += 1
            uuids.append(new_record.id)
    bulk_index(uuids, process=True)
    # wait for bulk index task to finish
    inspector = inspect()
    reserved = inspector.reserved()
    if reserved:
        while any(a != [] for a in reserved.values()):
            reserved = inspector.reserved()
            sleep(1)

    current_app.logger.info('create_records: {} updated, {} new'
                            .format(n_updated, n_created))
    return n_created, n_updated
Exemple #53
0
def is_task_in_celery(task_id):
    """Look whether a task is scheduled, reserved or active
    Args:
        The task id
    Returns the task dictionary or None
    """
    try:
        # When celery is down, inspect will be None, in this case we will wait for a while.
        i = None
        tries = 0
        while tries < 3:
            try:
                i = inspect(timeout=10)
                if inspect is not None:
                    break
            except Exception as exp:
                api_log.warning("Cannot inspect the celery queue.. let's wait for while... %s" % str(exp))
            finally:
                tries = tries + 1
                time.sleep(5)
        if inspect is None:
            return None
        active = i.active()
        scheduled = i.scheduled()
        reserved = i.reserved()
        if active is not None:
            for node, tasks in active.iteritems():
                for task in tasks:
                    if str(task['id']) == task_id:
                        del i
                        return task.copy()
        if reserved is not None:
            for node, tasks in reserved.iteritems():
                for task in tasks:
                    if str(task['id']) == task_id:
                        del i
                        return task.copy()

        if scheduled is not None:
            for node, tasks in scheduled.iteritems():
                for task in tasks:
                    if str(task['id']) == task_id:
                        del i
                        return task.copy()
        del i
        # Wow, we have reached this point...
        # Maybe celery is to busy to get tasks from the queue, let's see whether the task is in rabbit.
        task_in_rabbit = is_task_in_rabbit(task_id)
        if task_in_rabbit is not None:
            return task_in_rabbit

    except Exception as exp:
        api_log.error("[is_task_in_celery] An error occurred while reading the task list %s" % str(exp))

    return None
Exemple #54
0
def get_tasks_old():
    _list = {
        "active": [],
        "active_number": 0,
        "reserved": [],
        "reserved_number": 0,
        "succeed": [],
        "succeed_number": 0,
        "data": []
    }
    try:
        _list["succeed_number"], _list["succeed"], temp_list = get_succeed()
        _list["data"] = get_stats(temp_list)
    except Exception:
        pass
    try:
        active_jobs = inspect().active()
        for hostname in active_jobs.keys():
            for task in active_jobs[hostname]:
                _list["active"].append("ID: {} Start Time: {}".format(
                    task["id"],
                    datetime.fromtimestamp(
                        task["time_start"]).strftime("%Y-%m-%d %H:%M:%S")))
        if len(_list["active"]) == 0:
            _list["active"].append("None")
        else:
            _list["active_number"] = len(_list["active"])
    except Exception:
        pass
    try:
        reserved_jobs = inspect().reserved()
        for hostname in reserved_jobs.keys():
            for task in reserved_jobs[hostname]:
                _list["reserved"].append("ID: {} Start Time: {}".format(
                    task["id"], "None"))
        if len(_list["reserved"]) == 0:
            _list["reserved"].append("None")
        else:
            _list["reserved_number"] = len(_list["reserved"])
    except Exception:
        pass
    return jsonify(_list)
Exemple #55
0
def celery_is_alive():
    try:
        from celery.task.control import inspect
        insp = inspect()
        d = insp.stats()
        if d:
            return True
    except IOError:
        return False

    return False
Exemple #56
0
def is_task_active(fun, task_id, args):
    from celery.task.control import inspect
    i = inspect()
    active_tasks = i.active()
    for _, tasks in active_tasks.items():
        for task in tasks:
            if task.get("id") == task_id:
                continue
            if task.get("name") == fun and task.get("args") == str(args):
                return True
    return False
Exemple #57
0
def is_task_scheduled(fun, args):
    from celery.task.control import inspect

    i = inspect()
    for _, tasks in i.scheduled().items():
        for task in tasks:
            task_req = task.get("request", {})
            if task_req.get("name") == fun and task_req.get("args") == list(
                    args):
                return True
    return False
Exemple #58
0
 def is_waiting(service_name):
     """
     Check if a task is waiting.
     """
     scheduled_tasks = list(inspect().scheduled().values())[0]
     for task in scheduled_tasks:
         if 'kwargs' in task:
             args = eval(task['kwargs'])
             if service_name == args.get('service_name',None):
                 return True
     return False
Exemple #59
0
def get_celery_worker_status():
    info = inspect()
    d = info.stats()  # type: dict
    if not d:
        d = {
            'message': 'No running Celery workers were found.',
            'status': False
        }
    else:
        d['status'] = True

    return d