コード例 #1
0
def monitor_func(**kwargs):
    global click, running, last_run, queuename, system_user_auth

    timer = int(time.time())
    if click < 5:
        click = click + 1
        logger.debug("Analyzer starting in: " + str(5 - click))
        return (True)

    if round(time.time() - last_run) < kwargs['kick_timer']:
        logger.spew("timer hasn't kicked yet: " +
                    str(round(time.time() - last_run)) + " : " +
                    str(kwargs['kick_timer']))
        return (True)

    try:
        running = True
        last_run = time.time()
        logger.debug("FIRING: analyzer")

        localconfig = anchore_engine.configuration.localconfig.get_config()
        system_user_auth = localconfig['system_user_auth']

        queues = simplequeue.get_queues(system_user_auth)
        if not queues:
            logger.warn(
                "could not get any queues from simplequeue client, cannot do any work"
            )
        elif queuename not in queues:
            logger.error(
                "connected to simplequeue, but could not find queue (" +
                queuename + "), cannot do any work")
        else:
            try:

                try:
                    myconfig = localconfig['services']['analyzer']
                    max_analyze_threads = int(myconfig['max_threads'])
                except:
                    max_analyze_threads = 1

                logger.debug("max threads: " + str(max_analyze_threads))
                threads = []
                for i in range(0, max_analyze_threads):
                    if simplequeue.qlen(system_user_auth, queuename) > 0:
                        qobj = simplequeue.dequeue(system_user_auth, queuename)
                        if qobj:
                            myqobj = copy.deepcopy(qobj)
                            logger.spew("incoming queue object: " +
                                        str(myqobj))
                            logger.debug("incoming queue task: " +
                                         str(myqobj.keys()))
                            logger.debug("starting thread")
                            athread = threading.Thread(
                                target=process_analyzer_job,
                                args=(
                                    system_user_auth,
                                    myqobj,
                                ))
                            athread.start()
                            threads.append(athread)
                            logger.debug("thread started")

                            # rc = process_analyzer_job(system_user_auth, myqobj)

                    else:
                        logger.debug(
                            "analyzer queue is empty - no work this cycle")

                for athread in threads:
                    logger.debug("joining thread")
                    athread.join()
                    logger.debug("thread joined")

            except Exception as err:
                logger.error(str(err))
    except Exception as err:
        logger.error(str(err))
    finally:
        running = False
        logger.debug("FIRING DONE: analyzer: " + str(int(time.time()) - timer))

    return (True)
コード例 #2
0
def monitor_func(**kwargs):
    global click, running, last_run, queuename, system_user_auth

    timer = int(time.time())
    if click < 5:
        click = click + 1
        logger.debug("Analyzer starting in: " + str(5 - click))
        return (True)

    if round(time.time() - last_run) < kwargs['kick_timer']:
        logger.spew("timer hasn't kicked yet: " +
                    str(round(time.time() - last_run)) + " : " +
                    str(kwargs['kick_timer']))
        return (True)

    try:
        running = True
        last_run = time.time()
        logger.debug("FIRING: analyzer")

        localconfig = anchore_engine.configuration.localconfig.get_config()
        system_user_auth = localconfig['system_user_auth']

        if True:
            try:

                myconfig = localconfig['services']['analyzer']
                max_analyze_threads = int(myconfig.get('max_threads', 1))
                layer_cache_enable = myconfig.get('layer_cache_enable', False)
                layer_cache_dirty = False

                #try:
                #    max_analyze_threads = int(myconfig['max_threads'])
                #except:
                #    max_analyze_threads = 1

                logger.debug("max threads: " + str(max_analyze_threads))
                threads = []
                for i in range(0, max_analyze_threads):
                    qobj = simplequeue.dequeue(system_user_auth, queuename)
                    #if simplequeue.qlen(system_user_auth, queuename) > 0:
                    if qobj:
                        myqobj = copy.deepcopy(qobj)
                        logger.spew("incoming queue object: " + str(myqobj))
                        logger.debug("incoming queue task: " +
                                     str(myqobj.keys()))
                        logger.debug("starting thread")
                        athread = threading.Thread(target=process_analyzer_job,
                                                   args=(system_user_auth,
                                                         myqobj,
                                                         layer_cache_enable))
                        athread.start()
                        threads.append(athread)
                        logger.debug("thread started")
                        layer_cache_dirty = True
                    else:
                        logger.debug(
                            "analyzer queue is empty - no work this cycle")

                for athread in threads:
                    logger.debug("joining thread")
                    athread.join()
                    logger.debug("thread joined")

                # TODO - perform cache maint here, no analyzer threads running
                if layer_cache_enable and layer_cache_dirty:
                    logger.debug("running layer cache handler")
                    try:
                        handle_layer_cache()
                    except Exception as err:
                        logger.warn(
                            "layer cache management failed - exception: " +
                            str(err))

            except Exception as err:
                logger.error(str(err))
    except Exception as err:
        logger.error(str(err))
    finally:
        running = False
        logger.debug("FIRING DONE: analyzer: " + str(int(time.time()) - timer))

    return (True)
コード例 #3
0
def handle_image_analyzer(*args, **kwargs):
    global system_user_auth, queuename, servicename

    cycle_timer = kwargs['mythread']['cycle_timer']

    localconfig = anchore_engine.configuration.localconfig.get_config()
    system_user_auth = localconfig['system_user_auth']

    threads = []
    layer_cache_dirty = True
    while (True):
        logger.debug("analyzer thread cycle start")
        try:
            myconfig = localconfig['services']['analyzer']
            max_analyze_threads = int(myconfig.get('max_threads', 1))
            layer_cache_enable = myconfig.get('layer_cache_enable', False)

            logger.debug("max threads: " + str(max_analyze_threads))

            if len(threads) < max_analyze_threads:
                logger.debug("analyzer has free worker threads {} / {}".format(
                    len(threads), max_analyze_threads))
                qobj = simplequeue.dequeue(system_user_auth, queuename)
                if qobj:
                    logger.debug("got work from queue task Id: {}".format(
                        qobj.get('queueId', 'unknown')))
                    myqobj = copy.deepcopy(qobj)
                    logger.spew("incoming queue object: " + str(myqobj))
                    logger.debug("incoming queue task: " + str(myqobj.keys()))
                    logger.debug("starting thread")
                    athread = threading.Thread(target=process_analyzer_job,
                                               args=(system_user_auth, myqobj,
                                                     layer_cache_enable))
                    athread.start()
                    threads.append(athread)
                    logger.debug("thread started")
                    layer_cache_dirty = True
                else:
                    logger.debug(
                        "analyzer queue is empty - no work this cycle")
            else:
                logger.debug("all workers are busy")

            alive_threads = []
            while (threads):
                athread = threads.pop()
                if not athread.isAlive():
                    try:
                        logger.debug("thread completed - joining")
                        athread.join()
                        logger.debug("thread joined")
                    except Exception as err:
                        logger.warn("cannot join thread - exception: " +
                                    str(err))
                else:
                    alive_threads.append(athread)
            threads = alive_threads

            if layer_cache_enable and layer_cache_dirty and len(threads) == 0:
                logger.debug("running layer cache handler")
                try:
                    handle_layer_cache()
                    layer_cache_dirty = False
                except Exception as err:
                    logger.warn("layer cache management failed - exception: " +
                                str(err))

        except Exception as err:
            import traceback
            traceback.print_exc()
            logger.error(str(err))

        logger.debug("analyzer thread cycle complete: next in " +
                     str(cycle_timer))
        time.sleep(cycle_timer)
    return (True)