def process_responses(response_queue, msg_in):
    """ Pulls responses off of the queue. """

    log_name = '{0} :: {1}'.format(__name__, process_responses.__name__)
    logging.debug(log_name  + ' - STARTING...')

    while 1:
        stream = ''

        # Block on the response queue
        try:
            res = response_queue.get(True)
            request_meta = rebuild_unpacked_request(res)
        except Exception:
            logging.error(log_name + ' - Could not get request meta')
            continue

        data = response_queue.get(True)
        while data:
            stream += data
            try:
                data = response_queue.get(True, timeout=1)
            except Empty:
                break

        try:
            data = eval(stream)
        except Exception as e:

            # Report a fraction of the failed response data directly in the
            # logger
            if len(unicode(stream)) > 2000:
                excerpt = stream[:1000] + ' ... ' + stream[-1000:]
            else:
                excerpt = stream

            logging.error(log_name + ' - Request failed. {0}\n\n' \
                                     'data excerpt: {1}'.format(e.message, excerpt))

            # Format a response that will report on the failed request
            stream = "OrderedDict([('status', 'Request failed.'), " \
                     "('exception', '" + escape(unicode(e.message)) + "')," \
                     "('request', '" + escape(unicode(request_meta)) + "'), " \
                     "('data', '" + escape(unicode(stream)) + "')])"

        key_sig = build_key_signature(request_meta, hash_result=True)

        # Set request in list to "not alive"
        req_cb_flag_job_complete(key_sig, REQ_NCB_LOCK)

        logging.debug(log_name + ' - Setting data for {0}'.format(
            str(request_meta)))
        set_data(stream, request_meta)

    logging.debug(log_name + ' - SHUTTING DOWN...')
Ejemplo n.º 2
0
def job_control(request_queue, response_queue):
    """
        Controls the execution of user metrics requests

        Parameters
        ~~~~~~~~~~

        request_queue : multiprocessing.Queue
           Queues incoming API requests.

    """

    # Store executed and pending jobs respectively
    job_queue = list()
    wait_queue = list()

    # Global job ID number
    job_id = 0

    # Tallies the number of concurrently running jobs
    concurrent_jobs = 0

    log_name = '{0} :: {1}'.format(__name__, job_control.__name__)

    logging.debug('{0} - STARTING...'.format(log_name))

    while 1:

        # Request Queue Processing
        # ------------------------

        try:
            # Pull an item off of the queue

            req_item = request_queue.get(timeout=QUEUE_WAIT)

            logging.debug(log_name + ' :: PULLING item from request queue -> ' \
                                     '\n\tCOHORT = {0} - METRIC = {1}'
                .format(req_item['cohort_expr'], req_item['metric']))

        except Exception as e:
            req_item = None
            #logging.debug('{0} :: {1}  - Listening ...'
            #.format(__name__, job_control.__name__))


        # Process complete jobs
        # ---------------------

        for job_item in job_queue:

            # Look for completed jobs
            if not job_item.queue.empty():

                # Put request creds on res queue -- this goes to
                # response_handler asynchronously
                response_queue.put(unpack_fields(job_item.request),
                                   block=True)

                # Pull data off of the queue and add it to response queue
                while not job_item.queue.empty():
                    data = job_item.queue.get(True)
                    if data:
                        response_queue.put(data, block=True)

                del job_queue[job_queue.index(job_item)]

                concurrent_jobs -= 1

                logging.debug(log_name + ' :: RUN -> RESPONSE - Job ID {0}' \
                                         '\n\tConcurrent jobs = {1}'
                    .format(str(job_item.id), concurrent_jobs))


        # Process pending jobs
        # --------------------

        for wait_req in wait_queue:
            if concurrent_jobs <= MAX_CONCURRENT_JOBS:
                # prepare job from item

                req_q = Queue()
                proc = Process(target=process_metrics, args=(req_q, wait_req))
                proc.start()

                job_item = job_item_type(job_id, proc, wait_req, req_q)
                job_queue.append(job_item)

                del wait_queue[wait_queue.index(wait_req)]

                concurrent_jobs += 1
                job_id += 1

                logging.debug(log_name + ' :: WAIT -> RUN - Job ID {0}' \
                                         '\n\tConcurrent jobs = {1}, ' \
                                         'COHORT = {2} - METRIC = {3}'\
                    .format(str(job_id), concurrent_jobs,
                            wait_req.cohort_expr, wait_req.metric))


        # Add newest job to the queue
        # ---------------------------

        if req_item:

            # Build the request item
            rm = rebuild_unpacked_request(req_item)

            logging.debug(log_name + ' : REQUEST -> WAIT ' \
                                     '\n\tCOHORT = {0} - METRIC = {1}'
                .format(rm.cohort_expr, rm.metric))
            wait_queue.append(rm)

            # Communicate with request notification callback about new job
            key_sig = build_key_signature(rm, hash_result=True)
            url = get_url_from_keys(build_key_signature(rm), REQUEST_PATH)
            req_cb_add_req(key_sig, url, REQ_NCB_LOCK)


    logging.debug('{0} - FINISHING.'.format(log_name))