Пример #1
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        Logging._setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Пример #2
0
    def __init__(self, *args, **kwargs):

        if hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_DB"):
            db = current_app.conf.CELERY_MONGODB_SCHEDULER_DB
        else:
            db = "celery"
        if hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_COLLECTION") \
            and current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION:
            collection=current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION
        else:
            collection="schedules"

        if hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_URL"):
              
             connection=Connection(current_app.conf.CELERY_MONGODB_SCHEDULER_URL) 
             get_logger(__name__).info("backend scheduler using %s/%s:%s",
                    current_app.conf.CELERY_MONGODB_SCHEDULER_DB,
                    db,collection)
        else:
            connection=Connection() 


        self.db=connection[db][collection]
  
        self._schedule = {}
        self._last_updated = None
        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval') \
                or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 300)
Пример #3
0
 def save(self):
     if self.total_run_count > self._task.total_run_count:
         self._task.total_run_count = self.total_run_count
     if self.last_run_at and self._task.last_run_at and self.last_run_at > self._task.last_run_at:
         self._task.last_run_at = self.last_run_at
     self._task.run_immediately = False
     try:
         self._task.save(save_condition={})
     except Exception:
         get_logger(__name__).error(traceback.format_exc())
Пример #4
0
    def __init__(self, *args, **kwargs):
        if hasattr(current_app.conf, 'CELERY_REDIS_SCHEDULER_URL'):
            get_logger(__name__).info('backend scheduler using %s',
                                      current_app.conf.CELERY_REDIS_SCHEDULER_URL)
        else:
            get_logger(__name__).info('backend scheduler using %s',
                                      current_app.conf.CELERY_REDIS_SCHEDULER_URL)

        self._schedule = {}
        self._last_updated = None
        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval') \
                             or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 300)
Пример #5
0
    def setup_task_loggers(self, loglevel=None, logfile=None, format=None,
                           colorize=None, propagate=False, **kwargs):
        """Setup the task logger.

        If `logfile` is not specified, then `sys.stderr` is used.

        Returns logger object.

        """
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.task_format
        colorize = self.supports_color(colorize, logfile)

        logger = self.setup_handlers(
            get_logger('celery.task'),
            logfile, format, colorize,
            formatter=TaskFormatter, **kwargs
        )
        logger.setLevel(loglevel)
        logger.propagate = int(propagate)    # this is an int for some reason.
                                             # better to not question why.
        signals.after_setup_task_logger.send(
            sender=None, logger=logger,
            loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )
        return logger
Пример #6
0
 def redirect_stdouts(self, loglevel=None, name='celery.redirected'):
     self.redirect_stdouts_to_logger(
         get_logger(name), loglevel=loglevel
     )
     os.environ.update(
         CELERY_LOG_REDIRECT='1',
         CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
     )
Пример #7
0
    def test_patches(self):
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)

        with in_sighandler():
            logging.getLoggerClass().log(get_logger('test'))
Пример #8
0
 def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, redirect_level="WARNING"):
     handled = self.setup_logging_subsystem(loglevel, logfile)
     if not handled:
         logger = get_logger("celery.redirected")
         if redirect_stdouts:
             self.redirect_stdouts_to_logger(logger, loglevel=redirect_level)
     os.environ.update(
         CELERY_LOG_LEVEL=str(loglevel) if loglevel else "",
         CELERY_LOG_FILE=str(logfile) if logfile else "",
         CELERY_LOG_REDIRECT="1" if redirect_stdouts else "",
         CELERY_LOG_REDIRECT_LEVEL=str(redirect_level),
     )
Пример #9
0
    def __init__(self, *args, **kwargs):
        if hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_DB"):
            db = current_app.conf.CELERY_MONGODB_SCHEDULER_DB
        else:
            db = "celery"
        if hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_URL"):
            self._mongo = mongoengine.connect(db, host=current_app.conf.CELERY_MONGODB_SCHEDULER_URL)
            get_logger(__name__).info("backend scheduler using %s/%s:%s",
                    current_app.conf.CELERY_MONGODB_SCHEDULER_URL,
                    db, self.Model._get_collection().name)
        else:
            self._mongo = mongoengine.connect(db)
            get_logger(__name__).info("backend scheduler using %s/%s:%s",
                    "mongodb://localhost",
                    db, self.Model._get_collection().name)

        self._schedule = {}
        self._last_updated = None
        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval')
                or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 5)
Пример #10
0
    def setup(self):
        logger = self.logger = get_logger('celery.task')
        logger.handlers = []
        logging.root.manager.loggerDict.pop(logger.name, None)
        self.uid = uuid()

        @current_app.task
        def test_task():
            pass
        self.get_logger().handlers = []
        self.task = test_task
        from celery._state import _task_stack
        _task_stack.push(test_task)
Пример #11
0
    def setup(self):
        logger = self.logger = get_logger("celery.task")
        logger.handlers = []
        logging.root.manager.loggerDict.pop(logger.name, None)
        self.uid = uuid()

        @current_app.task
        def test_task():
            pass
        test_task.logger.handlers = []
        self.task = test_task
        from celery.app.state import _tls
        _tls.current_task = test_task
Пример #12
0
    def setup(self):
        logger = self.logger = get_logger("celery.task")
        logger.handlers = []
        logging.root.manager.loggerDict.pop(logger.name, None)
        self.uid = uuid()

        @self.app.task(shared=False)
        def test_task():
            pass

        self.get_logger().handlers = []
        self.task = test_task
        from celery._state import _task_stack

        _task_stack.push(test_task)
Пример #13
0
 def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
         redirect_level='WARNING', colorize=None):
     handled = self.setup_logging_subsystem(
         loglevel, logfile, colorize=colorize,
     )
     if not handled:
         logger = get_logger('celery.redirected')
         if redirect_stdouts:
             self.redirect_stdouts_to_logger(logger,
                             loglevel=redirect_level)
     os.environ.update(
         CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
         CELERY_LOG_FILE=str(logfile) if logfile else '',
         CELERY_LOG_REDIRECT='1' if redirect_stdouts else '',
         CELERY_LOG_REDIRECT_LEVEL=str(redirect_level))
Пример #14
0
 def test_get_logger_sets_parent(self):
     logger = get_logger('celery.test_get_logger')
     self.assertEqual(logger.parent.name, base_logger.name)
Пример #15
0
__maintainer__ = "LexPredict, LLC"
__email__ = "*****@*****.**"

# add App vars for project-level loggers
for logger_name, logger_data in settings.LOGGING['loggers'].items():
    AppVar.set('Logging', f'logger:{logger_name}:log_level',
               logger_data['level'],
               f'Custom log level for "{logger_name}" logger.')

# add App vars for celery loggers
celery_logger_names = [
    'celery', 'celery.beat', 'celery.task', 'celery.worker',
    'celery.worker.request', 'celery.pool'
]
for logger_name in celery_logger_names:
    logger = get_logger(logger_name)
    AppVar.set('Logging', f'logger:{logger_name}:log_level',
               logging._levelToName.get(logger.level),
               f'Custom log level for "{logger.name}" logger.')

ENABLE_DB_CONSOLE_LOGGING = AppVar.set(
    'Logging', 'logger:django.db.backends:log_to_console', False,
    'Enable logging db sql queries to console.')


def reset_loggers_level():
    """
    Reset loggers' level from AppVar
    """
    def getEffectiveLevel(self):
        """
Пример #16
0
 def setUp(self):
     self.setup_logger = log.setup_logger
     self.get_logger = lambda n=None: get_logger(n) if n else logging.root
     Logging._setup = False
Пример #17
0
# coding=utf-8

import datetime as dt
import pytz

import sqlalchemy as sa
from app import db
from sqlalchemy.event import listen
from sqlalchemy.orm import relationship, foreign, remote
from sqlalchemy.sql import select, insert, update
from celery import schedules
from celery.utils.log import get_logger

from .tzcrontab import TzAwareCrontab

logger = get_logger('celery_sqlalchemy_scheduler.models')


def cronexp(field):
    """Representation of cron expression."""
    return field and str(field).replace(' ', '') or '*'


class ModelMixin(object):
    @classmethod
    def create(cls, **kw):
        return cls(**kw)

    def update(self, **kw):
        for attr, value in kw.items():
            setattr(self, attr, value)
Пример #18
0
"""Celery specific details for routing work requests to Cosmic Ray workers."""
import celery
from celery.utils.log import get_logger
import itertools
import json
import subprocess

from .celery import app
from ..worker import WorkerOutcome
from ..work_record import WorkRecord

LOG = get_logger(__name__)


@app.task(name='cosmic_ray.tasks.worker')
def worker_task(work_record, test_runner, test_args, timeout):
    """The celery task which performs a single mutation and runs a test suite.

    This runs `cosmic-ray worker` in a subprocess and returns the results.

    Returns: An updated WorkRecord
    """
    # The work_record param may come as just a dict (e.g. if it arrives over
    # celery), so we reconstruct a WorkRecord to make it easier to work with.
    work_record = WorkRecord(work_record)

    command = list(
        itertools.chain((
            'cosmic-ray',
            'worker',
            work_record.module,
Пример #19
0
 def get_logger(self, logger_name=None, **kwargs):
     """Get task-aware logger object."""
     logger = get_logger(logger_name or self.name)
     if logger.parent is logging.root:
         logger.parent = get_logger("celery.task")
     return logger
Пример #20
0
from celery.app.abstract import configurated, from_config
from celery.utils.imports import qualname
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.timeutils import humanize_seconds

STARTUP_INFO_FMT = """
Configuration ->
    . broker -> %(conninfo)s
    . loader -> %(loader)s
    . scheduler -> %(scheduler)s
%(scheduler_info)s
    . logfile -> %(logfile)s@%(loglevel)s
    . maxinterval -> %(hmax_interval)s (%(max_interval)ss)
""".strip()

logger = get_logger("celery.beat")


class Beat(configurated):
    Service = beat.Service

    app = None
    loglevel = from_config("log_level")
    logfile = from_config("log_file")
    schedule = from_config("schedule_filename")
    scheduler_cls = from_config("scheduler")
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self,
                 max_interval=None,
Пример #21
0
    TaskPool interface.

"""
from __future__ import absolute_import

import logging
import os
import time

from kombu.utils.encoding import safe_repr

from celery.utils import timer2
from celery.utils.log import get_logger

logger = get_logger('celery.concurrency')


def apply_target(target,
                 args=(),
                 kwargs={},
                 callback=None,
                 accept_callback=None,
                 pid=None,
                 **_):
    if accept_callback:
        accept_callback(pid or os.getpid(), time.time())
    callback(target(*args, **kwargs))


class BasePool(object):
Пример #22
0
# -*- coding: utf-8 -*-
#
import logging

from celery.signals import after_setup_logger
from celery.utils.log import get_logger
from kombu.utils.encoding import safe_str

from .logger import CeleryTaskFileHandler

safe_str = lambda x: x
logger = get_logger(__file__)


@after_setup_logger.connect
def add_celery_redis_handler(sender=None,
                             logger=None,
                             loglevel=None,
                             format=None,
                             **kwargs):
    if not logger:
        return
    handler = CeleryTaskFileHandler()
    handler.setLevel(loglevel)
    formatter = logging.Formatter(format)
    handler.setFormatter(formatter)
    logger.addHandler(handler)


# @task_failure.connect
# def on_task_failed(sender, task_id, **kwargs):
Пример #23
0
    in :mod:`djcelery.snapshots` in the `django-celery` distribution.

"""
from __future__ import absolute_import

from kombu.utils.limits import TokenBucket

from celery import platforms
from celery.app import app_or_default
from celery.utils import timer2
from celery.utils.dispatch import Signal
from celery.utils.imports import instantiate
from celery.utils.log import get_logger
from celery.utils.timeutils import rate

logger = get_logger('celery.evcam')


class Polaroid(object):
    timer = timer2
    shutter_signal = Signal(providing_args=('state', ))
    cleanup_signal = Signal()
    clear_after = False

    _tref = None
    _ctref = None

    def __init__(self,
                 state,
                 freq=1.0,
                 maxrate=None,
Пример #24
0
#  Otherwise you can read it here: http://www.gnu.org/licenses/gpl-2.0.txt
#
import time
from celery import current_task
from celery.utils.log import get_logger

from apiexceptions.nmap import APINMAPScanException
from apimethods.sensor.nmap import (apimethod_run_nmap_scan,
                                    apimethod_monitor_nmap_scan,
                                    apimethod_nmapdb_add_task,
                                    apimethod_nmapdb_get_task,
                                    apimethod_nmapdb_update_task)
from celerymethods.tasks import celery_instance
from celerymethods.utils import is_task_in_celery

logger = get_logger("celery")


@celery_instance.task
def run_nmap_scan(sensor_id, target, targets_number, scan_type, rdns,
                  scan_timing, autodetect, scan_ports, idm, user):
    """Launches an NMAP scan
    Args:
        sensor_id: The system ID where you want to get the [sensor]/interfaces from ossim_setup.conf
        target: IP address of the component where the NMAP will be executed
        targets_number: Number of hosts to scan
        scan_type: Sets the NMAP scan type
        rdns: Tells Nmap to do reverse DNS resolution on the active IP addresses it finds
        scan_timing: Set the timing template
        autodetect: Aggressive scan options (enable OS detection)
        scan_ports: Only scan specified ports
Пример #25
0
Файл: base.py Проект: zpl/celery
import os
import sys

from billiard.einfo import ExceptionInfo
from billiard.exceptions import WorkerLostError
from kombu.utils.encoding import safe_repr

from celery.exceptions import WorkerShutdown, WorkerTerminate
from celery.five import monotonic, reraise
from celery.utils import timer2
from celery.utils.text import truncate
from celery.utils.log import get_logger

__all__ = ('BasePool', 'apply_target')

logger = get_logger('celery.pool')


def apply_target(target,
                 args=(),
                 kwargs={},
                 callback=None,
                 accept_callback=None,
                 pid=None,
                 getpid=os.getpid,
                 propagate=(),
                 monotonic=monotonic,
                 **_):
    """Apply function within pool context."""
    if accept_callback:
        accept_callback(pid or getpid(), monotonic())
Пример #26
0
 def test_patches(self):
     ensure_process_aware_logger()
     with in_sighandler():
         logging.getLoggerClass().log(get_logger('test'))
Пример #27
0
 def test_get_logger_root(self):
     logger = get_logger(base_logger.name)
     assert logger.parent is logging.root
Пример #28
0
import sys
import calendar
from datetime import datetime, timedelta
import traceback
import redis
from celery import current_app
from celery.five import reraise, monotonic
from celery.utils.timeutils import maybe_make_aware, timezone
from celery.utils.log import LOG_LEVELS, get_logger
from celery.beat import ScheduleEntry, Scheduler, Service, SchedulingError


__version__ = '0.1.0'


logger = get_logger('leek')


def timestamp_from_dt(dt):
    """Convert a datetime to seconds since epoch."""
    return calendar.timegm(maybe_make_aware(dt).utctimetuple()) if dt else 0


class LeekScheduler(Scheduler):
    Entry = ScheduleEntry

    def __init__(self, *args, **kwargs):
        self.data = {}
        self.last_refresh = None

        if 'LEEK_REDIS_URL' not in current_app.conf:
Пример #29
0
 def setUp(self):
     self.setup_logger = log.setup_logger
     self.get_logger = lambda n=None: get_logger(n) if n else logging.root
     Logging._setup = False
Пример #30
0
 def print_schedule(self):
     schedule = self.fetch_schedule_dict()
     get_logger(__name__).info("**** Current Alert Schedule ****")
     for alert_name, details in schedule.items():
         get_logger(__name__).info("\t{0}: {1} (enabled={2})".format(
             alert_name, details['schedule_string'], details['enabled']))
Пример #31
0
 def test_get_logger_sets_parent(self):
     logger = get_logger('celery.test_get_logger')
     assert logger.parent.name == base_logger.name
Пример #32
0
import os
from functools import wraps

import celery
from celery import Celery
from celery.schedules import crontab
from celery.utils.log import get_logger

logger = get_logger("peerinst-scheduled")

# Set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dalite.settings")

app = Celery("dalite")

# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
#   should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")

# Load task modules from all registered Django app configs.
app.autodiscover_tasks()


@app.task(bind=True)
def debug_task(self):
    print(("Request: {0!r}".format(self.request)))


@app.task
Пример #33
0
"""
from __future__ import absolute_import

import atexit

from kombu.utils.limits import TokenBucket

from celery import platforms
from celery.app import app_or_default
from celery.utils import timer2
from celery.utils.dispatch import Signal
from celery.utils.imports import instantiate
from celery.utils.log import get_logger
from celery.utils.timeutils import rate

logger = get_logger("celery.evcam")


class Polaroid(object):
    timer = timer2
    shutter_signal = Signal(providing_args=("state", ))
    cleanup_signal = Signal()
    clear_after = False

    _tref = None
    _ctref = None

    def __init__(self,
                 state,
                 freq=1.0,
                 maxrate=None,
Пример #34
0
import logging
import os
import sys

from billiard.einfo import ExceptionInfo
from billiard.exceptions import WorkerLostError
from kombu.utils.encoding import safe_repr

from celery.five import monotonic, reraise
from celery.utils import timer2
from celery.utils.log import get_logger

__all__ = ['BasePool', 'apply_target']

logger = get_logger('celery.pool')


def apply_target(target, args=(), kwargs={}, callback=None,
                 accept_callback=None, pid=None, getpid=os.getpid,
                 propagate=(), monotonic=monotonic, **_):
    if accept_callback:
        accept_callback(pid or getpid(), monotonic())
    try:
        ret = target(*args, **kwargs)
    except propagate:
        raise
    except Exception:
        raise
    except BaseException as exc:
        try:
Пример #35
0
 def test_get_logger_root(self):
     logger = get_logger(base_logger.name)
     assert logger.parent is logging.root
Пример #36
0
implementation of this writing the snapshots to a database
in :mod:`djcelery.snapshots` in the `django-celery` distribution.
"""
from __future__ import absolute_import, print_function, unicode_literals
from kombu.utils.limits import TokenBucket
from celery import platforms
from celery.app import app_or_default
from celery.utils.timer2 import Timer
from celery.utils.dispatch import Signal
from celery.utils.imports import instantiate
from celery.utils.log import get_logger
from celery.utils.time import rate

__all__ = ['Polaroid', 'evcam']

logger = get_logger('celery.evcam')


class Polaroid(object):
    """Record event snapshots."""

    timer = None
    shutter_signal = Signal(name='shutter_signal', providing_args={'state'})
    cleanup_signal = Signal(name='cleanup_signal')
    clear_after = False

    _tref = None
    _ctref = None

    def __init__(self, state, freq=1.0, maxrate=None,
                 cleanup_freq=3600.0, timer=None, app=None):
Пример #37
0
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.timeutils import humanize_seconds

__all__ = ["Beat"]

STARTUP_INFO_FMT = """
Configuration ->
    . broker -> {conninfo}
    . loader -> {loader}
    . scheduler -> {scheduler}
{scheduler_info}
    . logfile -> {logfile}@%{loglevel}
    . maxinterval -> {hmax_interval} ({max_interval}s)
""".strip()

logger = get_logger("celery.beat")


class Beat(object):
    Service = beat.Service
    app = None

    def __init__(
        self,
        max_interval=None,
        app=None,
        socket_timeout=30,
        pidfile=None,
        no_color=None,
        loglevel="WARN",
        logfile=None,
Пример #38
0
    TaskPool interface.

"""
from __future__ import absolute_import

import logging
import os
import time

from kombu.utils.encoding import safe_repr

from celery.utils import timer2
from celery.utils.log import get_logger

logger = get_logger('celery.concurrency')


def apply_target(target, args=(), kwargs={}, callback=None,
        accept_callback=None, pid=None, **_):
    if accept_callback:
        accept_callback(pid or os.getpid(), time.time())
    callback(target(*args, **kwargs))


class BasePool(object):
    RUN = 0x1
    CLOSE = 0x2
    TERMINATE = 0x3

    Timer = timer2.Timer
Пример #39
0
def download(
    url, ie_key, formats, subtitles, outputdir,
    statuscallback=None, errorcallback=None
):
    """Download a video from url to outputdir."""

    if url.startswith('uploads:'):
        # FIXME; this should be a configuration variable
        url = url.replace('uploads:', 'https://tools.wmflabs.org/'
                                      'video2commons/static/uploads/', 1)
        ie_key = None

    url_blacklisted(url)

    outputdir = os.path.abspath(outputdir)
    statuscallback = statuscallback or (lambda text, percent: None)
    errorcallback = errorcallback or (lambda text: None)
    outtmpl = outputdir + u'/dl.%(ext)s'

    params = {
        'format': formats,
        'outtmpl': outtmpl,
        'writedescription': True,
        'writeinfojson': True,
        'writesubtitles': subtitles,
        'writeautomaticsub': False,
        'allsubtitles': True,
        'subtitlesformat': 'srt/ass/vtt/best',
        'cachedir': '/tmp/',
        'noplaylist': True,  # not implemented in video2commons
        'postprocessors': [{
            'key': 'FFmpegSubtitlesConvertor',
            'format': 'srt',
        }],
        'max_filesize': 5 * (1 << 30),
        'retries': 10,
        'fragment_retries': 10,
        'prefer_ffmpeg': True,  # avconv do not have srt encoder
        'prefer_free_formats': True,
        'logger': get_logger('celery.task.v2c.main.youtube-dl')
    }

    last_percentage = [Ellipsis]

    def progresshook(d):
        if d['status'] == 'downloading':
            total = d.get('total_bytes') or d.get('total_bytes_estimate')
            percentage = int(100.0 * d['downloaded_bytes'] / total)\
                if total else None
            if percentage != last_percentage[0]:
                last_percentage[0] = percentage
                statuscallback(
                    'Downloading to ' + (d['tmpfilename'] or d['filename']),
                    percentage
                )
        elif d['status'] == 'finished':
            statuscallback('Postprocessing...', -1)
        elif d['status'] == 'error':
            errorcallback('Error raised by YoutubeDL')

    statuscallback('Creating YoutubeDL instance', -1)
    dl = youtube_dl.YoutubeDL(params)
    dl.add_progress_hook(progresshook)

    statuscallback('Preprocessing...', -1)
    info = dl.extract_info(url, download=True, ie_key=ie_key)

    if info.get('webpage_url'):
        url_blacklisted(info['webpage_url'])

    filename = outtmpl % {'ext': info['ext']}
    if not os.path.isfile(filename):
        # https://github.com/rg3/youtube-dl/issues/8349
        filename = outtmpl % {'ext': 'mkv'}
        assert os.path.isfile(filename), \
            'Failed to determine the path of the downloaded video. ' + \
            'Is the video too large?'

    ret = {
        'extractor': ie_key,
        'subtitles': {},
        'target': filename,
    }

    for key in info.get('subtitles', {}):
        # Postprocesed: converted to srt
        filename = outtmpl % {'ext': key + '.srt'}
        if os.path.isfile(filename):
            ret['subtitles'][key] = filename

    return ret
Пример #40
0
from .session import session_cleanup
from .session import SessionManager
from .models import (
    PeriodicTask,
    PeriodicTaskChanged,
    CrontabSchedule,
    IntervalSchedule,
    SolarSchedule,
)
from .literals import DEFAULT_MAX_INTERVAL, DEFAULT_BEAT_DBURI, ADD_ENTRY_ERROR

session_manager = SessionManager()
# session = session_manager()

logger = get_logger('celery_sqlalchemy_scheduler.schedulers')


class ModelEntry(ScheduleEntry):
    """Scheduler entry taken from database row."""

    model_schedules = (
        # (schedule_type, model_type, model_field)
        (schedules.crontab, CrontabSchedule, 'crontab'),
        (schedules.schedule, IntervalSchedule, 'interval'),
        (schedules.solar, SolarSchedule, 'solar'),
    )
    save_fields = ['last_run_at', 'total_run_count', 'no_changes']

    def __init__(self, model, Session, app=None, **kw):
        """Initialize the model entry."""
Пример #41
0
    import pydocumentdb
    from pydocumentdb.document_client import DocumentClient
    from pydocumentdb.documents import (ConnectionPolicy, ConsistencyLevel,
                                        PartitionKind)
    from pydocumentdb.errors import HTTPFailure
    from pydocumentdb.retry_options import RetryOptions
except ImportError:  # pragma: no cover
    pydocumentdb = DocumentClient = ConsistencyLevel = PartitionKind = \
        HTTPFailure = ConnectionPolicy = RetryOptions = None

__all__ = ("CosmosDBSQLBackend", )

ERROR_NOT_FOUND = 404
ERROR_EXISTS = 409

LOGGER = get_logger(__name__)


class CosmosDBSQLBackend(KeyValueStoreBackend):
    """CosmosDB/SQL backend for Celery."""
    def __init__(self,
                 url=None,
                 database_name=None,
                 collection_name=None,
                 consistency_level=None,
                 max_retry_attempts=None,
                 max_retry_wait_time=None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
Пример #42
0
 def setup(self):
     self.setup_logger = self.app.log.setup_logger
     self.get_logger = lambda n=None: get_logger(n) if n else logging.root
     signals.setup_logging.receivers[:] = []
     self.app.log.already_setup = False
Пример #43
0
#
#  Otherwise you can read it here: http://www.gnu.org/licenses/gpl-2.0.txt
#
from celery import current_task
from apimethods.sensor.nmap import apimethod_run_nmap_scan, apimethod_monitor_nmap_scan, \
    apimethods_nmap_purge_scan_files, apimethod_nmapdb_add_task, apimethod_nmapdb_get_task, \
    apimethod_nmapdb_update_task, apimethod_nmapdb_delete_task, apimethod_delete_nmap_scan

from apiexceptions.nmap import APINMAPScanException

from celery.utils.log import get_logger
from celerymethods.tasks import celery_instance
from celery.task.control import inspect
import time

logger = get_logger("celery")
# from celery_once.tasks import QueueOnce
# from retrying import retry


@celery_instance.task
def run_nmap_scan(sensor_id, target, targets_number, scan_type, rdns, scan_timing, autodetect, scan_ports, idm, user):
    """Launches an NMAP scan
    Args:
        sensor_ip: The system IP where you want to get the [sensor]/interfaces from ossim_setup.conf
        target: IP address of the component where the NMAP will be executed
        scan_type: Sets the NMAP scan type
        rdns: Tells Nmap to do reverse DNS resolution on the active IP addresses it finds
        scan_timing: Set the timing template
        autodetect: Aggressive scan options (enable OS detection)
        scan_port: Only scan specified ports
Пример #44
0
 def test_get_logger_sets_parent(self):
     logger = get_logger('celery.test_get_logger')
     self.assertEqual(logger.parent.name, base_logger.name)
Пример #45
0
 def get_default_logger(self, name='celery', **kwargs):
     return get_logger(name)
Пример #46
0
 def test_get_logger_root(self):
     logger = get_logger(base_logger.name)
     self.assertIs(logger.parent, logging.root)
Пример #47
0
 def test_patches(self):
     ensure_process_aware_logger()
     with in_sighandler():
         logging.getLoggerClass().log(get_logger('test'))
Пример #48
0
# -*- coding: utf-8 -*-

# @File   : core.py
# @Author : Yuvv
# @Date   : 2018/5/4

from importlib import import_module
from celery import shared_task
from celery.task import Task
from celery.utils.log import get_logger

LOGGER = get_logger('celery.MLPMAsyncTask')


class MLPMAsyncTask(Task):
    def on_failure(self, exc, task_id, args, kwargs, einfo):
        LOGGER.error('execute task failed...', exc_info=True)


@shared_task(base=MLPMAsyncTask)
def do_async_task(func_module,
                  func_name,
                  args: (list, tuple) = None,
                  kwargs: dict = None):
    """
    异步执行未注册为 celery task 的任务(函数)
    :param func_module: 函数所在模块
    :param func_name: 函数名
    :param args: 参数,必须可被序列化为 json
    :param kwargs: 参数,必须可被序列化为 json
    :return: 返回函数执行结果
Пример #49
0
from celery.utils.log import get_logger

from .base import KeyValueStoreBackend

try:
    import azure.storage as azurestorage
    from azure.common import AzureMissingResourceHttpError
    from azure.storage.blob import BlockBlobService
    from azure.storage.common.retry import ExponentialRetry
except ImportError:  # pragma: no cover
    azurestorage = BlockBlobService = ExponentialRetry = \
        AzureMissingResourceHttpError = None  # noqa

__all__ = ("AzureBlockBlobBackend",)

LOGGER = get_logger(__name__)


class AzureBlockBlobBackend(KeyValueStoreBackend):
    """Azure Storage Block Blob backend for Celery."""

    def __init__(self,
                 url=None,
                 container_name=None,
                 retry_initial_backoff_sec=None,
                 retry_increment_base=None,
                 retry_max_attempts=None,
                 *args,
                 **kwargs):
        super(AzureBlockBlobBackend, self).__init__(*args, **kwargs)
Пример #50
0
from celery.utils.timeutils import humanize_seconds

__all__ = ['Beat']

STARTUP_INFO_FMT = """
LocalTime -> {timestamp}
Configuration ->
    . broker -> {conninfo}
    . loader -> {loader}
    . scheduler -> {scheduler}
{scheduler_info}
    . logfile -> {logfile}@%{loglevel}
    . maxinterval -> {hmax_interval} ({max_interval}s)
""".strip()

logger = get_logger('celery.beat')


class Beat(object):

    Service = beat.Service
    app = None

    def __init__(self,
                 max_interval=None,
                 app=None,
                 socket_timeout=30,
                 pidfile=None,
                 no_color=None,
                 loglevel='WARN',
                 logfile=None,
Пример #51
0
 def setup(self):
     self.setup_logger = self.app.log.setup_logger
     self.get_logger = lambda n=None: get_logger(n) if n else logging.root
     signals.setup_logging.receivers[:] = []
     self.app.log.already_setup = False
Пример #52
0
 def get_default_logger(self, name='celery', **kwargs):
     return get_logger(name)
Пример #53
0
 def setup(self):
     self.setup_logger = log.setup_logger
     self.get_logger = lambda n=None: get_logger(n) if n else logging.root
     signals.setup_logging.receivers[:] = []
     Logging._setup = False
Пример #54
0
 def redirect_stdouts(self, loglevel=None, name='celery.redirected'):
     self.redirect_stdouts_to_logger(get_logger(name), loglevel=loglevel)
     os.environ.update(
         CELERY_LOG_REDIRECT='1',
         CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
     )
Пример #55
0
 def test_get_logger_root(self):
     logger = get_logger(base_logger.name)
     self.assertIs(logger.parent, logging.root)
Пример #56
0
    def setup_logging_subsystem(self,
                                loglevel=None,
                                logfile=None,
                                format=None,
                                colorize=None,
                                hostname=None,
                                **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        self.already_setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None,
            loglevel=loglevel,
            logfile=logfile,
            format=format,
            colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(root, logfile, loglevel, format, colorize,
                                   **kwargs)

            # Configure the multiprocessing logger
            self._configure_logger(get_multiprocessing_logger(), logfile,
                                   loglevel if MP_LOG else logging.ERROR,
                                   format, colorize, **kwargs)

            signals.after_setup_logger.send(
                sender=None,
                logger=root,
                loglevel=loglevel,
                logfile=logfile,
                format=format,
                colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Пример #57
0
from celery.app.abstract import configurated, from_config
from celery.utils.imports import qualname
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.timeutils import humanize_seconds

STARTUP_INFO_FMT = """
Configuration ->
    . broker -> %(conninfo)s
    . loader -> %(loader)s
    . scheduler -> %(scheduler)s
%(scheduler_info)s
    . logfile -> %(logfile)s@%(loglevel)s
    . maxinterval -> %(hmax_interval)s (%(max_interval)ss)
""".strip()

logger = get_logger('celery.beat')


class Beat(configurated):
    Service = beat.Service

    app = None
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    schedule = from_config('schedule_filename')
    scheduler_cls = from_config('scheduler')
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self, max_interval=None, app=None,
            socket_timeout=30, pidfile=None, no_color=None, **kwargs):
Пример #58
0
# -*- coding: utf-8 -*-

from architect.manager.client import BaseClient
from celery.utils.log import get_logger

logger = get_logger(__name__)


class SpinnakerClient(BaseClient):

    def __init__(self, **kwargs):
        super(SpinnakerClient, self).__init__(**kwargs)

    def update_resources(self, resources=None):
        self.process_relation_metadata()

    def get_resource_status(self, kind, metadata):
        return 'unknown'

    def process_resource_metadata(self):
        pass

    def process_relation_metadata(self):
        pass
Пример #59
0
from celery import bootsteps
from celery.app import app_or_default
from celery.canvas import subtask
from celery.five import items, values
from celery.task.trace import build_tracer
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.utils.text import truncate
from celery.utils.timer2 import default_timer, to_timestamp
from celery.utils.timeutils import humanize_seconds, timezone

from . import heartbeat, loops, pidbox
from .state import task_reserved, maybe_shutdown, revoked

CLOSE = bootsteps.CLOSE
logger = get_logger(__name__)
debug, info, warn, error, crit = (logger.debug, logger.info, logger.warn,
                                  logger.error, logger.critical)

CONNECTION_RETRY = """\
consumer: Connection to broker lost. \
Trying to re-establish the connection...\
"""

CONNECTION_RETRY_STEP = """\
Trying again {when}...\
"""

CONNECTION_ERROR = """\
consumer: Cannot connect to %s: %s.
%s
Пример #60
0
def purge_mail_log(days):
    logger = get_logger(purge_mail_log.__name__)
    logger.info("Purging mails logs")
    management.call_command("purge_mail_log", days, verbosity=0)