def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_success.connect(postrun_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
Beispiel #2
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
Beispiel #3
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        client.captureException(exc_info=einfo.exc_info,
                                extra={
                                    'task_id': task_id,
                                    'task': sender,
                                    'args': args,
                                    'kwargs': kwargs,
                                })

    task_failure.connect(process_failure_signal, weak=False)

    def process_logger_event(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        import logging
        logger = logging.getLogger()
        handler = SentryHandler(client)
        if handler.__class__ in list(map(type, logger.handlers)):
            return False
        handler.setLevel(logging.ERROR)
        handler.addFilter(CeleryFilter())
        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
Beispiel #4
0
def register_logger(client, loglevel=logging.ERROR):
    def sentry_logger(sender, logger, loglevel, logfile, format, colorize,
                      **kw):
        filter_ = CeleryFilter()
        handler = SentryHandler(client)
        handler.setLevel(loglevel)
        handler.addFilter(filter_)
        logger.addHandler(handler)

    after_setup_logger.connect(sentry_logger, weak=False)
Beispiel #5
0
def json_logging():
    """
    Initializes the celery logging so that it uses
    our custom json formatter.
    """
    from celery.signals import after_setup_logger
    from celery.signals import after_setup_task_logger

    after_setup_logger.connect(json_formatter)
    after_setup_task_logger.connect(json_task_formatter)
Beispiel #6
0
def json_logging():
    """
    Initializes the celery logging so that it uses
    our custom json formatter.
    """
    from celery.signals import after_setup_logger
    from celery.signals import after_setup_task_logger

    after_setup_logger.connect(json_formatter)
    after_setup_task_logger.connect(json_task_formatter)
Beispiel #7
0
def register_logger(client, loglevel=logging.ERROR):
    def sentry_logger(sender, logger, loglevel, logfile, format,
                      colorize, **kw):
        filter_ = CeleryFilter()
        handler = SentryHandler(client)
        handler.setLevel(loglevel)
        handler.addFilter(filter_)
        logger.addHandler(handler)

    after_setup_logger.connect(sentry_logger, weak=False)
Beispiel #8
0
def register_logging_filter(client):
    def sentry_logging_filter(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        # Attempt to find an existing SentryHandler, and if it exists ensure
        # that the CeleryFilter is installed.
        # If one is found, we do not attempt to install another one.
        for h in logger.handlers:
            if type(h) == SentryHandler:
                filter_ = CeleryFilter()
                h.addFilter(filter_)
                return False

    after_setup_logger.connect(sentry_logging_filter, weak=False)
Beispiel #9
0
def register_logging_filter(client):
    def sentry_logging_filter(sender, logger, loglevel, logfile, format,
                              colorize, **kw):
        # Attempt to find an existing SentryHandler, and if it exists ensure
        # that the CeleryFilter is installed.
        # If one is found, we do not attempt to install another one.
        for h in logger.handlers:
            if type(h) == SentryHandler:
                filter_ = CeleryFilter()
                h.addFilter(filter_)
                return False

    after_setup_logger.connect(sentry_logging_filter, weak=False)
Beispiel #10
0
def register_logger_signal(client, logger=None, loglevel=logging.ERROR):
    filter_ = CeleryFilter()
    handler = SentryHandler(client)
    handler.setLevel(loglevel)
    handler.addFilter(filter_)

    def process_logger_event(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        for h in logger.handlers:
            if type(h) == SentryHandler:
                h.addFilter(filter_)
                return False

        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
Beispiel #11
0
def register_logger_signal(client, logger=None, loglevel=logging.ERROR):
    filter_ = CeleryFilter()

    handler = SentryHandler(client)
    handler.setLevel(loglevel)
    handler.addFilter(filter_)

    def process_logger_event(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        # Attempt to find an existing SentryHandler, and if it exists ensure
        # that the CeleryFilter is installed.
        # If one is found, we do not attempt to install another one.
        for h in logger.handlers:
            if type(h) == SentryHandler:
                h.addFilter(filter_)
                return False

        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
Beispiel #12
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):
        client.captureException(
            exc_info=einfo.exc_info, extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs}
        )

    task_failure.connect(process_failure_signal, weak=False)

    def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw):
        import logging

        logger = logging.getLogger()
        handler = SentryHandler(client)
        if handler.__class__ in list(map(type, logger.handlers)):
            return False
        handler.setLevel(logging.ERROR)
        handler.addFilter(CeleryFilter())
        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
Beispiel #13
0
def setup_app(app):
    """Setup Sentry extension."""
    app.config.setdefault('SENTRY_DSN', None)
    # Sanitize data more
    app.config.setdefault('SENTRY_PROCESSORS', (
        'raven.processors.SanitizePasswordsProcessor',
        'invenio.ext.logging.backends.sentry.InvenioSanitizeProcessor',
    ))
    # When a user is logged in, also include the user info in the log message.
    app.config.setdefault('SENTRY_USER_ATTRS', [
        'info',
    ])
    # Defaults to only reporting errors and warnings.
    app.config.setdefault('LOGGING_SENTRY_LEVEL', 'WARNING')
    # Send warnings to Sentry?
    app.config.setdefault('LOGGING_SENTRY_INCLUDE_WARNINGS', True)
    # Send Celery log messages to Sentry?
    app.config.setdefault('LOGGING_SENTRY_CELERY', True)
    # Transport mechanism for Celery. Defaults to synchronous transport.
    # See http://raven.readthedocs.org/en/latest/transports/index.html
    app.config.setdefault('LOGGING_SENTRY_CELERY_TRANSPORT', 'sync')

    if app.config['SENTRY_DSN']:
        # Detect Invenio requirements and add to Sentry include paths so
        # version information about them is added to the log message.
        app.config.setdefault('SENTRY_INCLUDE_PATHS', sentry_include_paths())

        # Fix-up known version problems getting version information
        # Patch submitted to raven-python, if accepted the following lines
        # can be removed:
        # https://github.com/getsentry/raven-python/pull/452
        from raven.utils import _VERSION_CACHE
        import numpy
        import webassets
        import setuptools
        _VERSION_CACHE['invenio'] = invenio.__version__
        _VERSION_CACHE['numpy'] = numpy.__version__
        _VERSION_CACHE['webassets'] = webassets.__version__
        _VERSION_CACHE['setuptools'] = setuptools.__version__

        # Modify Sentry transport for Celery - must be called prior to client
        # creation.
        celery_dsn_fix(app)

        # Installs sentry in app.extensions['sentry']
        s = Sentry(app,
                   logging=True,
                   level=getattr(logging, app.config['LOGGING_SENTRY_LEVEL']))

        # Replace method with more robust version
        s.add_sentry_id_header = add_sentry_id_header

        # Add extra tags information to sentry.
        s.client.extra_context({'version': invenio.__version__})

        # Capture warnings from warnings module
        if app.config['LOGGING_SENTRY_INCLUDE_WARNINGS']:
            setup_warnings(s)

        # Setup Celery logging to Sentry
        if app.config['LOGGING_SENTRY_CELERY']:
            # Setup Celery loggers
            after_setup_task_logger.connect(partial(celery_logger_setup,
                                                    app=app),
                                            weak=False)
            after_setup_logger.connect(partial(celery_logger_setup, app=app),
                                       weak=False)

        # Werkzeug only adds a stream handler if there's no other handlers
        # defined, so when Sentry adds a log handler no output is
        # received from Werkzeug unless we install a console handler here on
        # the werkzeug logger.
        if app.debug:
            logger = logging.getLogger('werkzeug')
            logger.setLevel(logging.INFO)
            handler = logging.StreamHandler()
            logger.addHandler(handler)
Beispiel #14
0
                'admin_tools.template_loaders.Loader',
                'django.template.loaders.filesystem.Loader',
                'django.template.loaders.app_directories.Loader',
            ]
        },
    },
]


def configure_logging(logger, **kwargs):
    admin_email_handler = AdminEmailHandler()
    admin_email_handler.setLevel(logging.ERROR)
    logger.addHandler(admin_email_handler)


after_setup_logger.connect(configure_logging)

GOOGLE_STEP2_URI = 'http://helpline.co.ke/gwelcome'
GOOGLE_OAUTH2_CLIENT_ID = 'REPLACE ME'
GOOGLE_OAUTH2_CLIENT_SECRET = 'REPLACE ME'

THUMB_CONF = {
    'large': {
        'size': 1280,
        'suffix': '-large'
    },
    'medium': {
        'size': 640,
        'suffix': '-medium'
    },
    'small': {
Beispiel #15
0
from celery import Celery

celery = Celery('Shekarchi',
                broker='redis://localhost:6379/0',
                backend='redis://localhost:6379/0',
                include=['app.api', 'controller'])

import logging

from celery.signals import after_setup_logger, after_setup_task_logger


def handle_logs(logger=None, loglevel=logging.DEBUG, **kwargs):
    from common import handler
    logger.addHandler(handler)
    return logger


after_setup_task_logger.connect(handle_logs)
after_setup_logger.connect(handle_logs)
Beispiel #16
0
                    'propagate': False,
                },
                'console_logger': {
                    'handlers': ['console'],
                    'level': 'DEBUG',
                    'propagate': True
                },
                'sentry.errors': {
                    'level': 'DEBUG',
                    'handlers': ['console'],
                    'propagate': False,
                },
            },
        }
        CELERY_WORKER_HIJACK_ROOT_LOGGER = False
        after_setup_logger.connect(celery_logger_setup_handler)

POSTGIS_VERSION = (2, 5, 0)

CELERY_BEAT_SCHEDULE = {
    # Periodically mark exports stuck in the "pending" state as "failed"
    # See https://github.com/kobotoolbox/kobocat/issues/315
    'log-stuck-exports-and-mark-failed': {
        'task': 'onadata.apps.viewer.tasks.log_stuck_exports_and_mark_failed',
        'schedule': timedelta(hours=6),
        'options': {
            'queue': 'kobocat_queue'
        }
    },
}
Beispiel #17
0
def setup_app(app):
    """Setup Sentry extension."""
    app.config.setdefault('SENTRY_DSN', None)
    # Sanitize data more
    app.config.setdefault('SENTRY_PROCESSORS', (
        'raven.processors.SanitizePasswordsProcessor',
        'invenio.ext.logging.backends.sentry.InvenioSanitizeProcessor',
    ))
    # When a user is logged in, also include the user info in the log message.
    app.config.setdefault('SENTRY_USER_ATTRS', ['info', ])
    # Defaults to only reporting errors and warnings.
    app.config.setdefault('LOGGING_SENTRY_LEVEL', 'WARNING')
    # Send warnings to Sentry?
    app.config.setdefault('LOGGING_SENTRY_INCLUDE_WARNINGS', True)
    # Send Celery log messages to Sentry?
    app.config.setdefault('LOGGING_SENTRY_CELERY', True)
    # Transport mechanism for Celery. Defaults to synchronous transport.
    # See http://raven.readthedocs.org/en/latest/transports/index.html
    app.config.setdefault('LOGGING_SENTRY_CELERY_TRANSPORT', 'sync')

    if app.config['SENTRY_DSN']:
        # Detect Invenio requirements and add to Sentry include paths so
        # version information about them is added to the log message.
        app.config.setdefault('SENTRY_INCLUDE_PATHS', sentry_include_paths())

        # Fix-up known version problems getting version information
        # Patch submitted to raven-python, if accepted the following lines
        # can be removed:
        # https://github.com/getsentry/raven-python/pull/452
        from raven.utils import _VERSION_CACHE
        import numpy
        import webassets
        import setuptools
        _VERSION_CACHE['invenio'] = invenio.__version__
        _VERSION_CACHE['numpy'] = numpy.__version__
        _VERSION_CACHE['webassets'] = webassets.__version__
        _VERSION_CACHE['setuptools'] = setuptools.__version__

        # Modify Sentry transport for Celery - must be called prior to client
        # creation.
        celery_dsn_fix(app)

        # Installs sentry in app.extensions['sentry']
        s = Sentry(
            app,
            logging=True,
            level=getattr(logging, app.config['LOGGING_SENTRY_LEVEL'])
        )

        # Replace method with more robust version
        s.add_sentry_id_header = add_sentry_id_header

        # Add extra tags information to sentry.
        s.client.extra_context({'version': invenio.__version__})

        # Capture warnings from warnings module
        if app.config['LOGGING_SENTRY_INCLUDE_WARNINGS']:
            setup_warnings(s)

        # Setup Celery logging to Sentry
        if app.config['LOGGING_SENTRY_CELERY']:
            # Setup Celery loggers
            after_setup_task_logger.connect(
                partial(celery_logger_setup, app=app),
                weak=False
            )
            after_setup_logger.connect(
                partial(celery_logger_setup, app=app),
                weak=False
            )

        # Werkzeug only adds a stream handler if there's no other handlers
        # defined, so when Sentry adds a log handler no output is
        # received from Werkzeug unless we install a console handler here on
        # the werkzeug logger.
        if app.debug:
            logger = logging.getLogger('werkzeug')
            logger.setLevel(logging.INFO)
            handler = logging.StreamHandler()
            logger.addHandler(handler)
Beispiel #18
0
    CELERY_ACCEPT_CONTENT=["json"],  # Ignore other content
    CELERY_RESULT_SERIALIZER="json",
    CELERY_ENABLE_UTC=True,
)


def setup_log(**args):
    logbook.SyslogHandler().push_application()
    logbook.StreamHandler(sys.stderr, bubble=True).push_application()


APP = None


def needs_app_context(f):
    @functools.wraps(f)
    def wrapper(*args, **kwargs):
        global APP

        if APP is None:
            APP = create_app()

        with APP.app_context():
            return f(*args, **kwargs)

    return wrapper


after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
Beispiel #19
0
    global n
    n += 1
    logger.info('request type: %s', type(self.request))
    logger.error('{0} Request: {1!r}'.format(n, self.request))


def update_loglevel(*args, **kwargs):
    app.log.redirect_stdouts(loglevel='INFO')


# it's not at all clear to me why these
# two signals work, or the correct timing at
# which to call the function to redirect the
# stdouts, but this worked, so I felt it
# was wise to just go with it . . .
after_setup_logger.connect(update_loglevel)
worker_process_init.connect(update_loglevel)

from djenga.celery.utils import auto_step


@auto_step(key=1)
def fly_to_the_moon(self):
    pass


@auto_step(key=2)
def shrink_the_moon(self):
    pass

Beispiel #20
0
def configure_syslog(app):
    if frontend_config.log.syslog:
        app.conf.update(CELERYD_LOG_COLOR=False)
        after_setup_logger.connect(setup_log)
        after_setup_task_logger.connect(setup_log)
Beispiel #21
0
    redirect_stdouts_to_logger(args['logger'])  # logs to local syslog
    if os.path.exists('/dev/log'):
        h = logging.handlers.SysLogHandler('/dev/log')
    else:
        h = logging.handlers.SysLogHandler()
    h.setLevel(args['loglevel'])
    formatter = logging.Formatter(logging.BASIC_FORMAT)
    h.setFormatter(formatter)
    args['logger'].addHandler(h)


APP = None


def needs_app_context(f):
    @functools.wraps(f)
    def wrapper(*args, **kwargs):
        global APP

        if APP is None:
            APP = create_app()

        with APP.app_context():
            return f(*args, **kwargs)

    return wrapper


after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
Beispiel #22
0
def configure_syslog(app):
    if probe_config.log.syslog:
        app.conf.update(CELERYD_LOG_COLOR=False)
        after_setup_logger.connect(setup_log)
        after_setup_task_logger.connect(setup_log)
Beispiel #23
0
    global n
    n += 1
    logger.info('request type: %s', type(self.request))
    logger.error('{0} Request: {1!r}'.format(n, self.request))


def update_loglevel(*args, **kwargs):
    app.log.redirect_stdouts(loglevel='INFO')


# it's not at all clear to me why these
# two signals work, or the correct timing at
# which to call the function to redirect the
# stdouts, but this worked, so I felt it
# was wise to just go with it . . .
after_setup_logger.connect(update_loglevel)
worker_process_init.connect(update_loglevel)

from djenga.celery.utils import auto_step


@auto_step(key=1)
def fly_to_the_moon(self):
    pass


@auto_step(key=2)
def shrink_the_moon(self):
    pass

Beispiel #24
0
            webhook_url=app.config["SLACK_WEBHOOK_URL"],
            channel=app.config["SLACK_CHANNEL"],
            format='{0} %(levelname)s - %(asctime)s - %(name)s - %(message)s'.
            format(tag))
        slack_handler.setLevel(logging.ERROR)
        logger.addHandler(slack_handler)
    except urllib.error.HTTPError:
        LOGGER.info(
            "******************** Slack webhook is not working *********************"
        )


if app.config["LOKI_LOGGING"]:
    from celery.signals import after_setup_logger

    after_setup_logger.connect(initialize_loki)

if app.config["SLACK_LOGGING"]:
    from celery.signals import after_setup_logger

    after_setup_logger.connect(initialize_slack_logger)


@after_task_publish.connect
def update_sent_state(sender=None, headers=None, **kwargs):
    """Change task status to SENT when task is published """
    # By default task status is PENDING if you get a non existing task by id
    # its status will be PENDING changing to SENT will confirm task exists

    task = celery.tasks.get(sender)
    backend = task.backend if task else celery.backend
Beispiel #25
0
def after_setup_logger_handler(sender=None,
                               logger=None,
                               loglevel=logging.DEBUG,
                               logfile=None,
                               format=None,
                               colorize=None,
                               **kwds):
    handler = logging.handlers.SysLogHandler(
        address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_LOCAL7)
    handler.setFormatter(logging.Formatter(format))
    handler.setLevel(loglevel)
    logger.addHandler(handler)


after_setup_logger.connect(after_setup_logger_handler)
after_setup_task_logger.connect(after_setup_logger_handler)

app = Celery(config.get('APPLICATION_NAME'),
             broker=config.get('CELERY_BROKER'),
             backend=config.get('CELERY_BACKEND'),
             include=[
                 'tasks.slack',
             ])

app.conf.update(
    CELERY_DEFAULT_QUEUE=config.get('CELERY_DEFAULT_QUEUE'),
    CELERY_TASK_RESULT_EXPIRES=config.get('CELERY_TASK_RESULT_EXPIRES'),
    CELERY_TRACK_STARTED=True,
    CELERY_QUEUES=(Queue(config.get('CELERY_DEFAULT_QUEUE'),
                         Exchange(config.get('CELERY_DEFAULT_QUEUE')),
Beispiel #26
0
        },
        'audit_logger': {
            'handlers': ['audit'],
            'level': 'DEBUG',
            'propagate': True
        }
    }
}


def configure_logging(logger, **kwargs):
    admin_email_handler = AdminEmailHandler()
    admin_email_handler.setLevel(logging.ERROR)
    logger.addHandler(admin_email_handler)

after_setup_logger.connect(configure_logging)

GOOGLE_STEP2_URI = 'http://ona.io/gwelcome'
GOOGLE_CLIENT_ID = '617113120802.onadata.apps.googleusercontent.com'
GOOGLE_CLIENT_SECRET = '9reM29qpGFPyI8TBuB54Z4fk'

THUMB_CONF = {
    'large': {'size': 1280, 'suffix': '-large'},
    'medium': {'size': 640, 'suffix': '-medium'},
    'small': {'size': 240, 'suffix': '-small'},
}
# order of thumbnails from largest to smallest
THUMB_ORDER = ['large', 'medium', 'small']
IMG_FILE_TYPE = 'jpg'

# celery
Beispiel #27
0

def initialize_logstash(logger=None, loglevel=logging.INFO, **kwargs):
    handler = logstash.TCPLogstashHandler(LOGSTASH_HOST,
                                          LOGSTASH_PORT,
                                          tags=['celery'],
                                          message_type='celery',
                                          version=1)
    handler.setLevel(loglevel)
    logger.addHandler(handler)
    return logger


if LOGSTASH_ENABLED:
    after_setup_task_logger.connect(initialize_logstash)
    after_setup_logger.connect(initialize_logstash)

app = Celery('signals')

# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
#   should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')

# Load task modules from all registered Django app configs.
app.autodiscover_tasks(['eip', 'influencer', 'stance', 'ethereum_client'])


@app.task(bind=True)
def debug_task(self):
Beispiel #28
0
                    'propagate': False,
                },
                'console_logger': {
                    'handlers': ['console'],
                    'level': 'DEBUG',
                    'propagate': True
                },
                'sentry.errors': {
                    'level': 'DEBUG',
                    'handlers': ['console'],
                    'propagate': False,
                },
            },
        }
        CELERY_WORKER_HIJACK_ROOT_LOGGER = False
        after_setup_logger.connect(celery_logger_setup_handler)

POSTGIS_VERSION = (2, 5, 0)

CELERY_BEAT_SCHEDULE = {
    # Periodically mark exports stuck in the "pending" state as "failed"
    # See https://github.com/kobotoolbox/kobocat/issues/315
    'log-stuck-exports-and-mark-failed': {
        'task': 'onadata.apps.viewer.tasks.log_stuck_exports_and_mark_failed',
        'schedule': timedelta(minutes=1),
        'options': {'queue': 'kobocat_queue'}
    },
}

# ## ISSUE 242 TEMPORARY FIX ###
# See https://github.com/kobotoolbox/kobocat/issues/242
#SYSLOG_ADDRESS = ('syslogserver', 514)
SYSLOG_ADDRESS = '/dev/log'

import logging
from celery.signals import after_setup_logger, after_setup_task_logger
 
def after_setup_logger_handler(sender=None, logger=None, loglevel=None,
                               logfile=None, format=None,
                               colorize=None, **kwds):
    handler = logging.handlers.SysLogHandler(address=SYSLOG_ADDRESS)
    handler.setFormatter(logging.Formatter(format))
    handler.setLevel(loglevel or logging.INFO)
    logger.addHandler(handler)
 
after_setup_logger.connect(after_setup_logger_handler)
after_setup_task_logger.connect(after_setup_logger_handler)