Ejemplo n.º 1
0
def initialize(config, prefix, _logger):
    """Initialize the worker system"""

    global pool
    global storages
    global urls
    global jinjaEnv
    global logger

    # save the logger
    logger = _logger

    # create the worker pool
    threads = config.getMulti(prefix, "daemon.threads", 1)
    pool = workerpool.WorkerPool(size = threads)

    logger.info("Worker pool size %d (# threads)", threads)

    # get the storages
    storages = ecommerce.storage.getStorages(config, prefix + ".repositories")

    logger.info("Repositories: %s", ", ".join(storages.keys()))

    # prepare the base urls
    urls = config.getMulti(prefix, "urls", { })

    # jinja environment
    folder = config.getMulti(prefix, "templates.folder", "./templates")
    jinjaEnv = jinja2.Environment(loader = jinja2.FileSystemLoader(folder))

    logger.info("Templates folder: %s", folder)
Ejemplo n.º 2
0
def get_logger(app_prefix, log_prefix, config = None):

    global _loggers
    global _logconf

    # be sure to have a config
    if config is None:
        config = ecommerce.config.getConfig()

    # configure logging
    if _loggers is None:

        # set basic parameters
        logging.basicConfig(level = logging.DEBUG, datefmt = '%Y-%m-%d %H:%M:%S')

        # get logging config
        _logconf = config.getMulti(app_prefix, "logging")
        if _logconf is not None:

            # config the logging
            logging.config.dictConfig(_logconf) 

        _loggers = { }

    # create the logger if not already created
    logger = _loggers[log_prefix] if log_prefix in _loggers else None
    if logger is None:

        # get a logger
        if _logconf is not None:
            # get a logger
            logger = logging.getLogger(log_prefix)
        else:
            logger = logging.getLogger('')

        # save the logger
        _loggers[log_prefix] = logger

    return logger
Ejemplo n.º 3
0
def daemon(config, prefix, log_prefix, queue):
    """The main daemon function
    
    It gets some options from the configuration and
    starts trying to pull jobs from the queue.
    """

    set_logger(prefix, log_prefix)

    # get some config options
    maxrun = config.getMulti(prefix, "daemon.maxrun", 3600)
    endBy  = time.time() + maxrun

    logger.info("Daemon started - maxrun %d", maxrun)

    # initialize the work
    work.initialize(config, prefix, logger)

    # set dataset application and preprocessing
    ecommerce.db.dataset.configApplication(application)
    ecommerce.db.dataset.setPreProcess(preprocess.preProcess)

    # start looping
    now  = time.time()
    logger.info("Daemon will end by %s", endBy)
    while not terminating() and now < endBy:

        # try to fetch a job from the queue (non blocking)
        item = queue.next()
        while item is None and now < endBy:
            time.sleep(1)
            item = queue.next()
            now  = time.time()

        # if we have a job, process it
        if item is not None:

            # decode the job
            job = jobs.decode(item.content)

            logger.info("Got job %s, job type %s", item.id, job.get("type", "unknown"))

            # get the job type and dispatch it
            jobType = job.get("type")
            if jobType not in jobTypes:
                handled = False
            else:
                tStart  = time.time()
                handled = jobTypes[jobType](item.id, job)
                tEnd    = time.time()

            # mark as done or error
            if handled:
                logger.info("Job %s processed ok", item.id)
                queue.done(item)
            else:
                logger.info("Job %s processed with ERROR", item.id)
                queue.error(item)
            logger.info("Took %3f seconds to complete job", tEnd - tStart)
        else:
            logger.debug("Job Queue empty")

        # sleep for 1 second (if queue is empty)
        if queue.isEmpty():
            logger.debug("Waiting 1 second...")
            time.sleep(1)

        # get current time
        now = time.time()

    logger.info("Daemon terminating - reason: %s",
                "command" if terminating() else "maxrun reached")

    # deinitialize the work
    work.deinitialize()

    return 0