Example #1
0
 def __init__(self, filename):
     """This reads the CSV file and runs the background tasks."""
     with open(filename) as csvfile:
         reader = csv.reader(csvfile)
         for row in reader:
             name, email = row
             message = {"name": name, "email": email}
             task.delay(message)
Example #2
0
    def executeTask(self, params):
        """
        This method instantiates celery tasks in the cloud.
	Returns return value from celery async call and the task ID
        """
        logging.info("inside execute task for cloud : Params - %s", str(params))
        try:
            from tasks import task, updateEntry

            # This is a celery task in tasks.py: @celery.task(name='stochss')

            taskid = str(uuid.uuid4())
            # create a celery task
            logging.debug("executeTask : executing task with uuid : %s ", taskid)
            timenow = datetime.now()
            data = {
                "status": "pending",
                "start_time": timenow.strftime("%Y-%m-%d %H:%M:%S"),
                "Message": "Task sent to Cloud",
            }
            updateEntry(taskid, data, backendservices.TABLENAME)
            # celery async task execution http://ask.github.io/celery/userguide/executing.html
            tmp = task.delay(taskid, params)  # calls task(taskid,params)

            logging.debug("executeTask :  result of task : %s", str(tmp))
            return tmp.id, taskid
        except Exception, e:
            logging.error("executeTask : error - %s", str(e))
 def enqueue_task(self, params):
     '''
     '''
     cloud_task = task.delay(params)
     result = {
         "status": self.TASK_STATE_PENDING,
         "task_pid": cloud_task.task_id
     }
     return result
Example #4
0
 def executeTask(self,params):
     '''
     This method instantiates celery tasks in the cloud.
     '''
     logging.info('inside execute task for cloud : Params - %s', str(params))
     try:
         from tasks import task,updateEntry
         taskid = str(uuid.uuid4())
         #create a celery task
         logging.debug("executeTask : executing task with uuid : %s ",
                        taskid)
         timenow = datetime.now() 
         data = {'status':"pending","start_time":timenow.strftime('%Y-%m-%d %H:%M:%S'), 'Message':"Task sent to Cloud"}
         updateEntry(taskid, data, self.tablename)
         tmp = task.delay(taskid, params)
         logging.debug("executeTask :  result of task : %s", str(tmp))
         return tmp,taskid
     except Exception, e:
         logging.error("executeTask : error - %s", str(e))
Example #5
0
    def executeTask(self,params):
        '''
        This method instantiates celery tasks in the cloud.
	Returns return value from celery async call and the task ID
        '''
        logging.info('inside execute task for cloud : Params - %s', str(params))
        result = {}
        try:
            from tasks import task,updateEntry
	    #This is a celery task in tasks.py: @celery.task(name='stochss')
            
            # Need to make sure that the queue is actually reachable because
            # we don't want the user to try to submit a task and have it
            # timeout because the broker server isn't up yet.
            sleep_time = 5
            total_wait_time = 15
            total_tries = total_wait_time / sleep_time
            current_try = 0
            logging.info("About to check broker at: {0}".format(celery.current_app.conf['BROKER_URL']))
            while True:
                try:
                    insp = inspect().stats()
                except IOError as e:
                    current_try += 1
                    logging.info("Broker down, try: {0}, exception: {1}".format(current_try, e))
                    if current_try >= total_tries:
                        logging.info("Broker unreachable for {0} seconds.".format(total_wait_time))
                        return {
                            "success": False,
                            "reason": "Cloud instances unavailable. Please wait a minute for their initialization to complete.",
                            "exception": str(e),
                            "traceback": traceback.format_exc()
                        }
                    time.sleep(sleep_time)
                    continue
                logging.info("Broker up")
                break

            taskid = str(uuid.uuid4())
            result["db_id"] = taskid
            #create a celery task
            logging.info("executeTask : executing task with uuid : %s ", taskid)
            timenow = datetime.now() 
            data = {
                'status': "pending",
                "start_time": timenow.strftime('%Y-%m-%d %H:%M:%S'),
                'Message': "Task sent to Cloud"
            }
            
            tmp = None
            if params["job_type"] == "mcem2":
                queue_name = taskid
                result["queue"] = queue_name
                data["queue"] = queue_name
                # How many cores?
                requested_cores = -1
                if "cores" in params:
                    requested_cores = int(params["cores"])
                
                ##################################################################################################################
                # The master task can run on any node...
                #TODO: master task might need to run on node with at least 2 cores...
                # launch_params["instance_type"] = "c3.large"
                # launch_params["num_vms"] = 1
                ##################################################################################################################
                
                celery_info = CelerySingleton().app.control.inspect()
                # How many active workers are there?
                active_workers = celery_info.active()
                # We will keep around a dictionary of the available workers, where
                # the key will be the workers name and the value will be how many
                # cores that worker has (i.e. how many tasks they can execute 
                # concurrently).
                available_workers = {}
                core_count = 0
                if active_workers:
                    for worker_name in active_workers:
                        # active_workers[worker_name] will be a list of dictionaries representing
                        # tasks that the worker is currently executing, so if it doesn't exist
                        # then the worker isn't busy
                        if not active_workers[worker_name]:
                            available_workers[worker_name] = celery_info.stats()[worker_name]['pool']['max-concurrency']
                            core_count += int(available_workers[worker_name])
                logging.info("All available workers:".format(available_workers))
                # We assume that at least one worker is already consuming from the main queue
                # so we just need to find that one worker and remove it from the list, since
                # we need one worker on the main queue for the master task.
                done = False
                for worker_name in available_workers:
                    worker_queues = celery_info.active_queues()[worker_name]
                    for queue in worker_queues:
                        if queue["name"] == "celery":
                            popped_cores = int(available_workers.pop(worker_name))
                            done = True
                            core_count -= popped_cores
                            break
                    if done:
                        break
                if core_count <= 0:
                    # Then theres only one worker available
                    return {
                        "success": False,
                        "reason": "You need to have at least two workers in order to run a parameter estimation job in the cloud."
                    }
                logging.info("Found {0} cores that can be used as slaves on the following workers: {1}".format(
                    core_count,
                    available_workers
                ))
                if requested_cores == -1:
                    params["paramstring"] += " --cores {0}".format(core_count)
                    # Now just use all available cores since the user didn't request
                    # a specific amount, i.e. re-route active workers to the new queue
                    worker_names = []
                    for worker_name in available_workers:
                        worker_names.append(worker_name)
                    logging.info("Rerouting all available workers: {0} to queue: {1}".format(worker_names, queue_name))
                    rerouteWorkers(worker_names, queue_name)
                else:
                    params["paramstring"] += " --cores {0}".format(requested_cores)
                    # Now loop through available workers and see if we have enough free to meet
                    # requested core count.
                    worker_names = []
                    unmatched_cores = requested_cores
                    if available_workers:
                        for worker_name in available_workers:
                            # We need to find out what the concurrency of the worker is.
                            worker_cores = available_workers[worker_name]
                            # Subtract this from our running count and save the workers name
                            unmatched_cores -= worker_cores
                            worker_names.append(worker_name)
                            if unmatched_cores <= 0:
                                # Then we have enough
                                break
                    # Did we get enough?
                    if unmatched_cores > 0:
                        # Nope...
                        return {
                            "success": False,
                            "reason": "Didn't find enough idle cores to meet requested core count of {0}. Still need {1} more.".format(
                                requested_cores,
                                unmatched_cores
                            )
                        }
                    logging.info("Found enough idle cores to meet requested core count of {0}".format(requested_cores))
                    # We have enough, re-route active workers to the new queue
                    logging.info("Rerouting workers: {0} to queue: {1}".format(worker_names, queue_name))
                    rerouteWorkers(worker_names, queue_name)
                
                # Update DB entry just before sending to worker
                updateEntry(taskid, data, backendservices.TABLENAME)
                params["queue"] = queue_name
                tmp = master_task.delay(taskid, params)
                #TODO: This should really be done as a background_thread as soon as the task is sent
                #      to a worker, but this would require an update to GAE SDK.
                # call the poll task process
                poll_task_path = os.path.join(
                    os.path.dirname(os.path.abspath(__file__)),
                    "poll_task.py"
                )
                logging.info("Task sent to cloud with celery id {0}...".format(tmp.id))
                poll_task_string = "python {0} {1} {2} > poll_task_{1}.log 2>&1".format(
                    poll_task_path,
                    tmp.id,
                    queue_name
                )
                p = subprocess.Popen(shlex.split(poll_task_string))
                result["celery_pid"] = tmp.id
            else:
                updateEntry(taskid, data, backendservices.TABLENAME)
                #celery async task execution http://ask.github.io/celery/userguide/executing.html
                tmp = task.delay(taskid, params)  #calls task(taskid,params)
                result["celery_pid"] = tmp.id

            logging.info("executeTask :  result of task : %s", str(tmp))
            result["success"] = True
            return result
        except Exception, e:
            logging.error("executeTask : error - %s", str(e))
            return {
                "success": False,
                "reason": str(e),
                "exception": str(e),
                "traceback": traceback.format_exc()
            }
from tasks import task
from time import sleep
p_list = ['Big Data', 'Data Analyst', 'Data Engineer']

for p in p_list:
    task.delay(p)
    sleep(10)

    # celery -A tasks worker -l info -P gevent
# cd Documents\env\myenv\Scripts