Example #1
0
 def setUp(self):
     self.registry = TaskRegistry()
     self.registry.register(DuePeriodicTask)
     self.registry.register(PendingPeriodicTask)
     self.scheduler = beat.Scheduler(self.registry,
                                     max_interval=0.0001,
                                     logger=log.get_default_logger())
Example #2
0
def make_thumbnail(in_path, out_path):
    '''
    Generate a thumbnail from a video.

    Returns a tuple: [duration of the video, video resolution]
    '''
    logger = get_default_logger()

    ffmpeg_path = getattr(settings, "FFMPEG_PATH", 'ffmpeg')

    ffmpeg_cmd = '%s %s -i "%s" "%s"' % (ffmpeg_path,
                                         settings.ALBUMS_VIDEO_THUMB_ARGS,
                                         in_path,
                                         out_path)

    logger.info(ffmpeg_cmd)
    ffmpeg_output = pexpect.run(ffmpeg_cmd)
    logger.info(ffmpeg_output)

    # Extract the length of the video
    m = re.search("Duration:.*(\d\d):(\d\d):(\d\d\.\d\d)", ffmpeg_output)
    if(m is None or
       os.stat(out_path).st_size == 0):
        raise Exception("unable to find duration of video")
    groups = m.groups()
    duration = datetime.timedelta(hours=int(groups[0]), minutes=int(groups[1]), seconds=float(groups[2])).seconds

    m = re.search("Stream.*Video.* (\d+)x(\d+)", ffmpeg_output)
    if(m is None):
        raise Exception("Unable to determine resolution of video")
    resolution = [int(x) for x in m.groups()]
    logger.info("Resolution: %s" % resolution)

    return [duration, resolution]
 def setUp(self):
     self.registry = TaskRegistry()
     self.registry.register(DuePeriodicTask)
     self.registry.register(PendingPeriodicTask)
     self.scheduler = beat.Scheduler(self.registry,
                                     max_interval=0.0001,
                                     logger=log.get_default_logger())
Example #4
0
 def __init__(self, state, freq=1.0, maxrate=None,
         cleanup_freq=3600.0, logger=None):
     self.state = state
     self.freq = freq
     self.cleanup_freq = cleanup_freq
     self.logger = logger or log.get_default_logger(name="celery.cam")
     self.maxrate = maxrate and TokenBucket(rate(maxrate))
Example #5
0
def video_convert(format, size_arg, in_path, out_path):
    logger = get_default_logger()
    format_settings = settings.ALBUMS_VIDEO_FORMATS[format]

    logger.info("Starting conversion of %s" % in_path)

    threads_arg = '-threads %s' % settings.ALBUMS_CONVERSION_THREADS

    ffmpeg_path = getattr(settings, "FFMPEG_PATH", 'ffmpeg')

    for args in format_settings['args']:
        command = '%s -y -i "%s" %s %s %s "%s"' % (ffmpeg_path,
                                                   in_path,
                                                   args,
                                                   size_arg,
                                                   threads_arg,
                                                   out_path)

        logger.info(command)
        child = pexpect.spawn(command,
                              maxread=1,
                              timeout=settings.ALBUMS_CONVERSION_TIMEOUT,
                              cwd=os.path.dirname(in_path))
        child.expect(pexpect.EOF)

        exit_status = child.exitstatus

        if(exit_status == 1):
            raise Exception("Failed to convert %s to %s (ffmpeg exit status %s)" %
                            (in_path, format_settings['ext'], exit_status))
        elif(exit_status == 255):
            logger.info("Convert %s to %s with errors" %
                    (in_file, format_settings['ext']))

    logger.info("Successfully converted %s to %s" % (in_path, format_settings['ext']))
Example #6
0
    def __init__(
        self,
        task_name,
        task_id,
        args,
        kwargs,
        on_ack=noop,
        retries=0,
        delivery_info=None,
        hostname=None,
        email_subject=None,
        email_body=None,
        logger=None,
        eventer=None,
        **opts
    ):
        self.task_name = task_name
        self.task_id = task_id
        self.retries = retries
        self.args = args
        self.kwargs = kwargs
        self.on_ack = on_ack
        self.delivery_info = delivery_info or {}
        self.hostname = hostname or socket.gethostname()
        self.logger = logger or log.get_default_logger()
        self.eventer = eventer
        self.email_subject = email_subject or self.email_subject
        self.email_body = email_body or self.email_body

        self.task = tasks[self.task_name]
    def __init__(self,
                 task_name,
                 task_id,
                 args,
                 kwargs,
                 on_ack=noop,
                 retries=0,
                 delivery_info=None,
                 hostname=None,
                 email_subject=None,
                 email_body=None,
                 logger=None,
                 eventer=None,
                 **opts):
        self.task_name = task_name
        self.task_id = task_id
        self.retries = retries
        self.args = args
        self.kwargs = kwargs
        self.on_ack = on_ack
        self.delivery_info = delivery_info or {}
        self.hostname = hostname or socket.gethostname()
        self.logger = logger or log.get_default_logger()
        self.eventer = eventer
        self.email_subject = email_subject or self.email_subject
        self.email_body = email_body or self.email_body

        self.task = tasks[self.task_name]
Example #8
0
 def __init__(self, state, freq=1.0, maxrate=None,
         cleanup_freq=3600.0, logger=None):
     self.state = state
     self.freq = freq
     self.cleanup_freq = cleanup_freq
     self.logger = logger or log.get_default_logger(name="celery.cam")
     self.maxrate = maxrate and TokenBucket(rate(maxrate))
Example #9
0
 def run_periodic_tasks(self):
     logger = get_default_logger()
     applied = default_periodic_status_backend.run_periodic_tasks()
     for task, task_id in applied:
         logger.debug(
             "PeriodicWorkController: Periodic task %s applied (%s)" % (
                 task.name, task_id))
Example #10
0
 def __init__(self, eta_schedule, logger=None,
         precision=None):
     super(ScheduleController, self).__init__()
     self.logger = logger or log.get_default_logger()
     self._scheduler = iter(eta_schedule)
     self.precision = precision or conf.CELERYD_ETA_SCHEDULER_PRECISION
     self.debug = log.SilenceRepeated(self.logger.debug, max_iterations=10)
Example #11
0
 def __init__(self, logger=None, hostname=None, listener=None):
     self.logger = logger or log.get_default_logger()
     self.hostname = hostname
     self.listener = listener
     self.panel = self.panel_cls(self.logger,
                                 hostname=self.hostname,
                                 listener=self.listener)
Example #12
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     handled = log.setup_logging_subsystem(loglevel=self.loglevel,
                                           logfile=self.logfile)
     # Redirect stdout/stderr to our logger.
     if not handled:
         logger = log.get_default_logger()
         log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
Example #13
0
 def __init__(self, ready_queue, callback, logger=None):
     threading.Thread.__init__(self)
     self.logger = logger or log.get_default_logger()
     self.ready_queue = ready_queue
     self.callback = callback
     self._shutdown = threading.Event()
     self._stopped = threading.Event()
     self.setDaemon(True)
Example #14
0
    def setup_logging(self):
        from celery import log

        handled = log.setup_logging_subsystem(loglevel=self.loglevel, logfile=self.logfile)
        logger = log.get_default_logger(name="celery.beat")
        if self.redirect_stdouts and not handled:
            log.redirect_stdouts_to_logger(logger, loglevel=self.redirect_stdouts_level)
        return logger
 def __init__(self,
              ready_queue,
              logger=None,
              max_interval=DEFAULT_MAX_INTERVAL):
     self.max_interval = float(max_interval)
     self.ready_queue = ready_queue
     self.logger = logger or log.get_default_logger()
     self._queue = []
Example #16
0
    def __init__(self, registry=None, schedule=None, logger=None, max_interval=None):
        self.registry = registry or _registry.TaskRegistry()
        self.data = schedule or {}
        self.logger = logger or log.get_default_logger()
        self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL

        self.cleanup()
        self.schedule_registry()
Example #17
0
 def __init__(self, schedule=None, logger=None, max_interval=None,
         **kwargs):
     UserDict.__init__(self)
     if schedule is None:
         schedule = {}
     self.data = schedule
     self.logger = logger or log.get_default_logger(name="celery.beat")
     self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL
     self.setup_schedule()
Example #18
0
 def on_iteration(self):
     logger = get_default_logger()
     logger.debug("PeriodicWorkController: Running periodic tasks...")
     try:
         self.run_periodic_tasks()
     except Exception, exc:
         logger.error(
             "PeriodicWorkController got exception: %s\n%s" % (
                 exc, traceback.format_exc()))
Example #19
0
 def __init__(self, ready_queue, callback, logger=None):
     threading.Thread.__init__(self)
     self.logger = logger or log.get_default_logger()
     self.ready_queue = ready_queue
     self.callback = callback
     self._shutdown = threading.Event()
     self._stopped = threading.Event()
     self.setDaemon(True)
     self.setName(self.__class__.__name__)
Example #20
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     handled = log.setup_logging_subsystem(loglevel=self.loglevel,
                                           logfile=self.logfile)
     # Redirect stdout/stderr to our logger.
     if not handled:
         logger = log.get_default_logger()
         if self.redirect_stdouts:
             log.redirect_stdouts_to_logger(
                 logger, loglevel=self.redirect_stdouts_level)
Example #21
0
File: beat.py Project: clayg/celery
    def __init__(self, schedule=None, logger=None,
            max_interval=None):
        self.data = schedule
        if self.data is None:
            self.data = {}
        self.logger = logger or log.get_default_logger()
        self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL

        self.cleanup()
        self.setup_schedule()
Example #22
0
 def __init__(self, schedule=None, logger=None, max_interval=None,
         lazy=False, **kwargs):
     UserDict.__init__(self)
     if schedule is None:
         schedule = {}
     self.data = schedule
     self.logger = logger or log.get_default_logger(name="celery.beat")
     self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL
     if not lazy:
         self.setup_schedule()
Example #23
0
 def __init__(self, limit, logger=None, initializer=None,
         maxtasksperchild=None, timeout=None, soft_timeout=None,
         putlocks=True):
     self.limit = limit
     self.logger = logger or log.get_default_logger()
     self.initializer = initializer
     self.maxtasksperchild = maxtasksperchild
     self.timeout = timeout
     self.soft_timeout = soft_timeout
     self.putlocks = putlocks
     self._pool = None
Example #24
0
 def test_decrement(self):
     consumer = self.MockConsumer()
     qos = QoS(consumer, 10, log.get_default_logger())
     qos.update()
     self.assertEqual(int(qos.value), 10)
     self.assertEqual(consumer.prefetch_count, 10)
     qos.decrement()
     self.assertEqual(int(qos.value), 9)
     self.assertEqual(consumer.prefetch_count, 9)
     qos.decrement_eventually()
     self.assertEqual(int(qos.value), 8)
     self.assertEqual(consumer.prefetch_count, 9)
Example #25
0
 def test_decrement(self):
     consumer = self.MockConsumer()
     qos = QoS(consumer, 10, log.get_default_logger())
     qos.update()
     self.assertEqual(int(qos.value), 10)
     self.assertEqual(consumer.prefetch_count, 10)
     qos.decrement()
     self.assertEqual(int(qos.value), 9)
     self.assertEqual(consumer.prefetch_count, 9)
     qos.decrement_eventually()
     self.assertEqual(int(qos.value), 8)
     self.assertEqual(consumer.prefetch_count, 9)
Example #26
0
 def on_iteration(self):
     """Get tasks from bucket queue and apply the task callback."""
     logger = get_default_logger()
     try:
         # This blocks until there's a message in the queue.
         task = self.ready_queue.get(timeout=1)
     except QueueEmpty:
         time.sleep(1)
     else:
         logger.debug("Mediator: Running callback for task: %s[%s]" % (
             task.task_name, task.task_id))
         self.callback(task)
Example #27
0
 def __init__(self, limit, logger=None, initializer=None,
         maxtasksperchild=None, timeout=None, soft_timeout=None,
         putlocks=True, initargs=()):
     self.limit = limit
     self.logger = logger or log.get_default_logger()
     self.initializer = initializer
     self.maxtasksperchild = maxtasksperchild
     self.timeout = timeout
     self.soft_timeout = soft_timeout
     self.putlocks = putlocks
     self.initargs = initargs
     self._pool = None
Example #28
0
def s3_delete_folder(path):
    logger = get_default_logger()

    logger.info("Deleting S3 directory %s %s" % (settings.ALBUMS_AWS_S3_BUCKET,
                                                 path))

    conn = S3Connection(settings.ALBUMS_AMAZON_KEY, settings.ALBUMS_AMAZON_SECRET_KEY)
    bucket = conn.get_bucket(settings.ALBUMS_AWS_S3_BUCKET)

    for key in bucket.get_all_keys(prefix=path):
        logger.info("Deleting key %s %s" % (settings.ALBUMS_AWS_S3_BUCKET,
                                           key.name))
        key.delete()
Example #29
0
 def on_iteration(self):
     logger = get_default_logger()
     try:
         logger.debug("Mediator: Trying to get message from bucket_queue")
         # This blocks until there's a message in the queue.
         task = self.bucket_queue.get(timeout=1)
     except QueueEmpty:
         logger.debug("Mediator: Bucket queue is empty.")
         pass
     else:
         logger.debug("Mediator: Running callback for task: %s[%s]" % (
             task.task_name, task.task_id))
         self.callback(task)
Example #30
0
def process_individual_file(tag_file, game_file):
    logger = get_default_logger()
    gl = GameLog.create_new(game_file, tag_file)
    if gl:
        logger.debug("Game %u created." % gl.number)
    else:
        logger.debug("Game not created.")
    try:
        os.remove(tag_file)
        os.remove(game_file)
    except:
        pass
    return bool(gl)
Example #31
0
def albumitem_generate_thumbnails(image, sizes, send_notifications=True, delete_on_fail=True):
    logger = get_default_logger()

    try:
        old_dir, filename = os.path.split(image.preview.path)
    except ValueError:
        logger.info("Image ID %d has no preview associated with it." % image.id)
        return

    try:
        for size in sizes:
            sized_path = image._resized_path(size)
            new_file = os.path.join(settings.MEDIA_ROOT, sized_path)
            new_dir = os.path.dirname(new_file)
            
            if(image.preview.storage.exists(new_file) and
               key_on_cloudfront(sized_path)):
                logger.info("Skipping conversion of %s" % new_file)
                continue
            
            try:
                os.makedirs(new_dir)
            except OSError as e:
                if(e.errno != 17):
                    raise e
                
            logger.info("About to create %s" % new_file)
                
            thumb = Image.open(image.preview.path)
            thumb.thumbnail([size, size], Image.ANTIALIAS)
            thumb.save(new_file, thumb.format, quality=90, optimize=1)
                
            logger.info("Created thumbnail %s (%d bytes)" % (new_file, os.stat(new_file).st_size))

            image_to_cloudfront(new_file, sized_path)

        if (send_notifications and
            len(albums.models.Video.objects.filter(id=image.id)) == 0):
            notification.send([image.submitter,], "albums_conversion", {'success': True,
                                                                        'object': image})

        image.preview_ready = True
        image.save()
    except Exception as e:
        logger.info("Failed to convert image %s, %s" % (image.title, e))
        if len(albums.models.Video.objects.filter(id=image.id)) == 0:
            if(send_notifications):
                notification.send([image.submitter,], "albums_conversion", {'success': False,
                                                                            'title': image.title})
            if(delete_on_fail):
                image.delete()
Example #32
0
File: beat.py Project: mulka/celery
 def __init__(self, logger=None,
         max_interval=conf.CELERYBEAT_MAX_LOOP_INTERVAL,
         schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME):
     self.logger = logger or log.get_default_logger()
     self.max_interval = max_interval
     self.schedule_filename = schedule_filename
     self._shutdown = threading.Event()
     self._stopped = threading.Event()
     self._schedule = None
     self._scheduler = None
     self._in_sync = False
     silence = self.max_interval < 60 and 10 or 1
     self.debug = log.SilenceRepeated(self.logger.debug,
                                      max_iterations=silence)
Example #33
0
File: job.py Project: kmike/celery
    def __init__(self, task_name, task_id, args, kwargs, on_ack=noop, retries=0, delivery_info=None, **opts):
        self.task_name = task_name
        self.task_id = task_id
        self.retries = retries
        self.args = args
        self.kwargs = kwargs
        self.on_ack = on_ack
        self.delivery_info = delivery_info or {}
        self.task = tasks[self.task_name]

        for opt in ("success_msg", "fail_msg", "fail_email_subject", "fail_email_body", "logger", "eventer"):
            setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
        if not self.logger:
            self.logger = get_default_logger()
Example #34
0
 def __init__(self, task_name, task_id, task_func, args, kwargs,
         on_ack=noop, retries=0, **opts):
     self.task_name = task_name
     self.task_id = task_id
     self.task_func = task_func
     self.retries = retries
     self.args = args
     self.kwargs = kwargs
     self.logger = kwargs.get("logger")
     self.on_ack = on_ack
     self.executed = False
     for opt in ("success_msg", "fail_msg", "fail_email_subject",
             "fail_email_body"):
         setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
     if not self.logger:
         self.logger = get_default_logger()
Example #35
0
def file_to_cloudfront(path_on_disk, key_name, distribution):
    '''
    Upload the file to cloudfront. Will actually wind up in S3, but boto is
    smart enough to give it all the permissions we need when we do it this
    way.
    '''
    logger = get_default_logger()

    logger.info("Uploading %s to CloudFront %s %s" % (path_on_disk,
                                                      distribution.domain_name,
                                                      key_name))

    obj = distribution.add_object(key_name, open(path_on_disk, 'rb'))

    logger.info("Uploaded S3 %s %s" % (distribution.domain_name,
                                       key_name))
Example #36
0
    def __init__(self, logger=None,
            max_interval=conf.CELERYBEAT_MAX_LOOP_INTERVAL,
            schedule=conf.CELERYBEAT_SCHEDULE,
            schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME,
            scheduler_cls=None):
        self.max_interval = max_interval
        self.scheduler_cls = scheduler_cls or self.scheduler_cls
        self.logger = logger or log.get_default_logger(name="celery.beat")
        self.schedule = schedule
        self.schedule_filename = schedule_filename

        self._scheduler = None
        self._shutdown = threading.Event()
        self._stopped = threading.Event()
        silence = self.max_interval < 60 and 10 or 1
        self.debug = log.SilenceRepeated(self.logger.debug,
                                         max_iterations=silence)
Example #37
0
 def on_iteration(self):
     """Wake-up scheduler"""
     logger = get_default_logger()
     delay = self._scheduler.next()
     debug_log = True
     if delay is None:
         delay = 1
         if self.iterations == 10:
             self.iterations = 0
         else:
             debug_log = False
             self.iterations += 1
     if debug_log:
         logger.debug("ScheduleController: Scheduler wake-up")
         logger.debug(
             "ScheduleController: Next wake-up eta %s seconds..." % (
                 delay))
     time.sleep(delay)
Example #38
0
def file_to_s3(path_on_disk, key_name):
    '''
    Upload a file to our S3 bucket
    '''
    logger = get_default_logger()

    logger.info("Uploading %s to S3 %s %s" % (path_on_disk,
                                              settings.ALBUMS_AWS_S3_BUCKET,
                                              key_name))

    conn = S3Connection(settings.ALBUMS_AMAZON_KEY, settings.ALBUMS_AMAZON_SECRET_KEY)
    bucket = conn.get_bucket(settings.ALBUMS_AWS_S3_BUCKET)
    
    key = bucket.new_key(key_name)
    key.set_contents_from_filename(path_on_disk)

    logger.info("Uploaded S3 %s %s" % (settings.ALBUMS_AWS_S3_BUCKET,
                                       key_name))
Example #39
0
File: job.py Project: dmishe/celery
 def __init__(self, task_name, task_id, args, kwargs,
         on_ack=noop, retries=0, **opts):
     self.task_name = task_name
     self.task_id = task_id
     self.retries = retries
     self.args = args
     self.kwargs = kwargs
     self.logger = opts.get("logger")
     self.eventer = opts.get("eventer")
     self.on_ack = on_ack
     self.executed = False
     self.time_start = None
     for opt in ("success_msg", "fail_msg", "fail_email_subject",
             "fail_email_body"):
         setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
     if not self.logger:
         self.logger = get_default_logger()
     if self.task_name not in tasks:
         raise NotRegistered(self.task_name)
     self.task = tasks[self.task_name]
Example #40
0
def albumitem_delete_directory(directory, **kwargs):
    logger = get_default_logger()

    full_path = os.path.join(settings.MEDIA_ROOT, directory)
    if(os.path.exists(full_path)):
        logger.info("Deleting directory %s" % full_path)
        shutil.rmtree(full_path)
    else:
        logger.info("Skipping delete of directory %s" % full_path)

    album_path = full_path[:full_path.rfind(os.path.sep)]
    logger.info("Album path %s %s" % (album_path, os.path.exists(album_path)))
    if(os.path.exists(album_path) and
       len(os.listdir(album_path)) == 0):
        logger.info("Deleting album directory %s" % album_path)
        shutil.rmtree(album_path)

    if hasattr(settings, 'ALBUMS_AWS_S3_BUCKET'):
        s3_delete_folder(directory)
    else:
        logger.info("Skipping delete of AWS keys")
Example #41
0
File: job.py Project: jokar/minion
    def __init__(self, task_name, task_id, args, kwargs,
            on_ack=noop, retries=0, delivery_info=None, hostname=None,
            email_subject=None, email_body=None, logger=None,
            eventer=None, eta=None, expires=None, **opts):
        self.task_name = task_name
        self.task_id = task_id
        self.retries = retries
        self.args = args
        self.kwargs = kwargs
        self.eta = eta
        self.expires = expires
        self.on_ack = on_ack
        self.delivery_info = delivery_info or {}
        self.hostname = hostname or socket.gethostname()
        self.logger = logger or log.get_default_logger()
        self.eventer = eventer
        self.email_subject = email_subject or self.email_subject
        self.email_body = email_body or self.email_body

        self.task = tasks[self.task_name]
        self._store_errors = True
        if self.task.ignore_result:
            self._store_errors = self.task.store_errors_even_if_ignored
 def __init__(self, ready_queue, callback, logger=None):
     super(Mediator, self).__init__()
     self.logger = logger or log.get_default_logger()
     self.ready_queue = ready_queue
     self.callback = callback
Example #43
0
from celery.task import task
from celery.log import get_default_logger
log = get_default_logger()
from couchdb import Database
from redis import StrictRedis
from threading import Thread, active_count
from time import sleep
keys_to_remove = [
    unicode(x) for x in [
        'leaf', 'dcterms_language', "text", 'dcterms_educationLevel',
        'skos_exactMatch', 'dcterms_description', 'dcterms_subject',
        'asn_indexingStatus', 'asn_authorityStatus', 'asn_statementLabel',
        'asn_statementNotation', 'asn_altStatementNotation', 'cls',
        'asn_comment'
    ]
]


def process_doc(doc, client):
    doc['count'] = 0
    doc['childCount'] = 0
    if "asn_identifier" in doc:
        if 'uri' in doc['asn_identifier']:
            doc['id'] = doc['asn_identifier']['uri'].strip()
        else:
            doc['id'] = doc['asn_identifier'].strip()
    if 'id' in doc:
        url = doc['id']
        doc['id'] = url[url.rfind("/") + 1:].lower()
    if "text" in doc:
        doc['title'] = doc['text']
Example #44
0
 def __init__(self, logger=None, hostname=None, listener=None):
     self.logger = logger or log.get_default_logger()
     self.hostname = hostname
     self.listener = listener
     self.panel = self.Panel(self.logger, self.listener, self.hostname)
Example #45
0
 def __init__(self, limit, logger=None, **kwargs):
     self.limit = limit
     self.logger = logger or log.get_default_logger()
     self._pool = None
Example #46
0
 def setUp(self):
     self.ready_queue = FastQueue()
     self.eta_schedule = Timer()
     self.logger = log.get_default_logger()
     self.logger.setLevel(0)
Example #47
0
 def __init__(self, *args, **kwargs):
     self.logger = log.get_default_logger()
Example #48
0
    def __init__(self,
                 concurrency=None,
                 logfile=None,
                 loglevel=None,
                 send_events=conf.SEND_EVENTS,
                 hostname=None,
                 ready_callback=noop,
                 embed_clockservice=False,
                 pool_cls=conf.CELERYD_POOL,
                 listener_cls=conf.CELERYD_LISTENER,
                 mediator_cls=conf.CELERYD_MEDIATOR,
                 eta_scheduler_cls=conf.CELERYD_ETA_SCHEDULER,
                 schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME,
                 task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
                 task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
                 max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
                 pool_putlocks=conf.CELERYD_POOL_PUTLOCKS,
                 disable_rate_limits=conf.DISABLE_RATE_LIMITS,
                 db=conf.CELERYD_STATE_DB,
                 scheduler_cls=conf.CELERYBEAT_SCHEDULER):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.logger = log.get_default_logger()
        self.hostname = hostname or socket.gethostname()
        self.embed_clockservice = embed_clockservice
        self.ready_callback = ready_callback
        self.send_events = send_events
        self.task_time_limit = task_time_limit
        self.task_soft_time_limit = task_soft_time_limit
        self.max_tasks_per_child = max_tasks_per_child
        self.pool_putlocks = pool_putlocks
        self.timer_debug = log.SilenceRepeated(self.logger.debug,
                                               max_iterations=10)
        self.db = db
        self._finalize = Finalize(self, self.stop, exitpriority=1)

        if self.db:
            persistence = state.Persistent(self.db)
            Finalize(persistence, persistence.save, exitpriority=5)

        # Queues
        if disable_rate_limits:
            self.ready_queue = FastQueue()
            self.ready_queue.put = self.process_task
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)

        self.logger.debug("Instantiating thread components...")

        # Threads + Pool + Consumer
        self.pool = instantiate(pool_cls,
                                self.concurrency,
                                logger=self.logger,
                                initializer=process_initializer,
                                initargs=(self.hostname, ),
                                maxtasksperchild=self.max_tasks_per_child,
                                timeout=self.task_time_limit,
                                soft_timeout=self.task_soft_time_limit,
                                putlocks=self.pool_putlocks)

        self.mediator = None
        if not disable_rate_limits:
            self.mediator = instantiate(mediator_cls,
                                        self.ready_queue,
                                        callback=self.process_task,
                                        logger=self.logger)
        self.scheduler = instantiate(
            eta_scheduler_cls,
            precision=conf.CELERYD_ETA_SCHEDULER_PRECISION,
            on_error=self.on_timer_error,
            on_tick=self.on_timer_tick)

        self.beat = None
        if self.embed_clockservice:
            self.beat = beat.EmbeddedService(
                logger=self.logger,
                schedule_filename=schedule_filename,
                scheduler_cls=scheduler_cls)

        prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
        self.listener = instantiate(listener_cls,
                                    self.ready_queue,
                                    self.scheduler,
                                    logger=self.logger,
                                    hostname=self.hostname,
                                    send_events=self.send_events,
                                    init_callback=self.ready_callback,
                                    initial_prefetch_count=prefetch_count,
                                    pool=self.pool)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None,
                                 (self.pool, self.mediator, self.scheduler,
                                  self.beat, self.listener))
Example #49
0
def set_loglevel(panel, loglevel=None):
    if loglevel is not None:
        if not isinstance(loglevel, int):
            loglevel = conf.LOG_LEVELS[loglevel.upper()]
        log.get_default_logger(loglevel=loglevel)
    return {"ok": loglevel}
Example #50
0
 def __init__(self, limit=None, putlocks=True, logger=None, **options):
     self.limit = limit
     self.putlocks = putlocks
     self.logger = logger or log.get_default_logger()
     self.options = options
 def __init__(self, eta_schedule, logger=None, precision=None):
     super(ScheduleController, self).__init__()
     self.logger = logger or log.get_default_logger()
     self._scheduler = iter(eta_schedule)
     self.precision = precision or conf.CELERYD_ETA_SCHEDULER_PRECISION
     self.debug = log.SilenceRepeated(self.logger.debug, max_iterations=10)