示例#1
0
    def __init__(self,
                 host=None,
                 port=None,
                 db=None,
                 password=None,
                 expires=None,
                 max_connections=None,
                 url=None,
                 **kwargs):
        super(RedisBackend, self).__init__(**kwargs)
        conf = self.app.conf
        if self.redis is None:
            raise ImproperlyConfigured(REDIS_MISSING)

        # For compatibility with the old REDIS_* configuration keys.
        def _get(key):
            for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}':
                try:
                    return conf[prefix.format(key)]
                except KeyError:
                    pass

        if host and '://' in host:
            url, host = host, None
        self.url = url
        uhost = uport = upass = udb = None
        if url:
            _, uhost, uport, _, upass, udb, _ = _parse_url(url)
            udb = udb.strip('/') if udb else 0
        self.host = uhost or host or _get('HOST') or self.host
        self.port = int(uport or port or _get('PORT') or self.port)
        self.db = udb or db or _get('DB') or self.db
        self.password = upass or password or _get('PASSWORD') or self.password
        self.expires = self.prepare_expires(expires, type=int)
        self.max_connections = (max_connections or _get('MAX_CONNECTIONS')
                                or self.max_connections)
示例#2
0
    def __init__(self,
                 app,
                 expires=None,
                 backend=None,
                 options=None,
                 url=None,
                 **kwargs):
        options = {} if not options else options
        super(CacheBackend, self).__init__(app, **kwargs)
        self.url = url

        self.options = dict(self.app.conf.cache_backend_options, **options)

        self.backend = url or backend or self.app.conf.cache_backend
        if self.backend:
            self.backend, _, servers = self.backend.partition('://')
            self.servers = servers.rstrip('/').split(';')
        self.expires = self.prepare_expires(expires, type=int)
        try:
            self.Client, self.key_t = backends[self.backend]()
        except KeyError:
            raise ImproperlyConfigured(
                UNKNOWN_BACKEND.format(self.backend, ', '.join(backends)))
        self._encode_prefixes()  # rencode the keyprefixes
示例#3
0
文件: base.py 项目: sintezcs/celery
    def wait_for_pending(self,
                         result,
                         timeout=None,
                         interval=0.5,
                         no_ack=True,
                         on_message=None,
                         on_interval=None,
                         callback=None,
                         propagate=True):
        self._ensure_not_eager()
        if on_message is not None:
            raise ImproperlyConfigured(
                'Backend does not support on_message callback')

        meta = self.wait_for(
            result.id,
            timeout=timeout,
            interval=interval,
            on_interval=on_interval,
            no_ack=no_ack,
        )
        if meta:
            result._maybe_set_cache(meta)
            return result.maybe_throw(propagate=propagate, callback=callback)
示例#4
0
    def __init__(self, dburi=None, expires=None,
                 engine_options=None, url=None, **kwargs):
        # The `url` argument was added later and is used by
        # the app to set backend by url (celery.backends.get_backend_by_url)
        super(DatabaseBackend, self).__init__(**kwargs)
        conf = self.app.conf
        self.expires = maybe_timedelta(self.prepare_expires(expires))
        self.dburi = url or dburi or conf.CELERY_RESULT_DBURI
        self.engine_options = dict(
            engine_options or {},
            **conf.CELERY_RESULT_ENGINE_OPTIONS or {})
        self.short_lived_sessions = kwargs.get(
            'short_lived_sessions',
            conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS,
        )

        tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {}
        Task.__table__.name = tablenames.get('task', 'celery_taskmeta')
        TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta')

        if not self.dburi:
            raise ImproperlyConfigured(
                'Missing connection string! Do you have '
                'CELERY_RESULT_DBURI set to a real value?')
    def __init__(
        self,
        url=None,
        container_name=None,
        retry_initial_backoff_sec=None,
        retry_increment_base=None,
        retry_max_attempts=None,
        *args,
        **kwargs
    ):
        super(AzureBlockBlobBackend, self).__init__(*args, **kwargs)

        if azurestorage is None:
            raise ImproperlyConfigured(
                "You need to install the azure-storage library to use the "
                "AzureBlockBlob backend"
            )

        conf = self.app.conf

        self._connection_string = self._parse_url(url)

        self._container_name = container_name or conf["azureblockblob_container_name"]

        self._retry_initial_backoff_sec = (
            retry_initial_backoff_sec
            or conf["azureblockblob_retry_initial_backoff_sec"]
        )

        self._retry_increment_base = (
            retry_increment_base or conf["azureblockblob_retry_increment_base"]
        )

        self._retry_max_attempts = (
            retry_max_attempts or conf["azureblockblob_retry_max_attempts"]
        )
示例#6
0
    def __init__(self, dburi=None, engine_options=None, url=None, **kwargs):
        # The `url` argument was added later and is used by
        # the app to set backend by url (celery.backends.get_backend_by_url)
        super(DatabaseBackend, self).__init__(
            expires_type=maybe_timedelta, **kwargs
        )
        conf = self.app.conf
        self.dburi = url or dburi or conf.sqlalchemy_dburi
        self.engine_options = dict(
            engine_options or {},
            **conf.sqlalchemy_engine_options or {})
        self.short_lived_sessions = kwargs.get(
            'short_lived_sessions',
            conf.sqlalchemy_short_lived_sessions,
        )

        tablenames = conf.sqlalchemy_table_names or {}
        Task.__table__.name = tablenames.get('task', 'celery_taskmeta')
        TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta')

        if not self.dburi:
            raise ImproperlyConfigured(
                'Missing connection string! Do you have the'
                ' sqlalchemy_dburi setting set to a real value?')
示例#7
0
    def __init__(self, app=None, **kwargs):
        self.options = {}

        super(MongoBackend, self).__init__(app, **kwargs)

        if not pymongo:
            raise ImproperlyConfigured(
                'You need to install the pymongo library to use the '
                'MongoDB backend.')

        # Set option defaults
        for key, value in items(self._prepare_client_options()):
            self.options.setdefault(key, value)

        # update conf with mongo uri data, only if uri was given
        if self.url:
            if self.url == 'mongodb://':
                self.url += 'localhost'

            uri_data = pymongo.uri_parser.parse_uri(self.url)
            # build the hosts list to create a mongo connection
            hostslist = [
                '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']
            ]
            self.user = uri_data['username']
            self.password = uri_data['password']
            self.mongo_host = hostslist
            if uri_data['database']:
                # if no database is provided in the uri, use default
                self.database_name = uri_data['database']

            self.options.update(uri_data['options'])

        # update conf with specific settings
        config = self.app.conf.get('mongodb_backend_settings')
        if config is not None:
            if not isinstance(config, dict):
                raise ImproperlyConfigured(
                    'MongoDB backend settings should be grouped in a dict')
            config = dict(config)  # do not modify original

            if 'host' in config or 'port' in config:
                # these should take over uri conf
                self.mongo_host = None

            self.host = config.pop('host', self.host)
            self.port = config.pop('port', self.port)
            self.mongo_host = config.pop('mongo_host', self.mongo_host)
            self.user = config.pop('user', self.user)
            self.password = config.pop('password', self.password)
            self.database_name = config.pop('database', self.database_name)
            self.taskmeta_collection = config.pop(
                'taskmeta_collection',
                self.taskmeta_collection,
            )
            self.groupmeta_collection = config.pop(
                'groupmeta_collection',
                self.groupmeta_collection,
            )

            self.options.update(config.pop('options', {}))
            self.options.update(config)
示例#8
0
def detect_settings(conf,
                    preconf=None,
                    ignore_keys=None,
                    prefix=None,
                    all_keys=None,
                    old_keys=None):
    preconf = {} if not preconf else preconf
    ignore_keys = set() if not ignore_keys else ignore_keys
    all_keys = SETTING_KEYS if not all_keys else all_keys
    old_keys = _OLD_SETTING_KEYS if not old_keys else old_keys

    source = conf
    if conf is None:
        source, conf = preconf, {}
    have = set(source.keys()) - ignore_keys
    is_in_new = have.intersection(all_keys)
    is_in_old = have.intersection(old_keys)

    info = None
    if is_in_new:
        # have new setting names
        info, left = _settings_info, is_in_old
        if is_in_old and len(is_in_old) > len(is_in_new):
            # Majority of the settings are old.
            info, left = _old_settings_info, is_in_new
    if is_in_old:
        # have old setting names, or a majority of the names are old.
        if not info:
            info, left = _old_settings_info, is_in_new
        if is_in_new and len(is_in_new) > len(is_in_old):
            # Majority of the settings are new
            info, left = _settings_info, is_in_old
    else:
        # no settings, just use new format.
        info, left = _settings_info, is_in_old

    if prefix:
        # always use new format if prefix is used.
        info, left = _settings_info, set()

    # only raise error for keys that the user didn't provide two keys
    # for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
    really_left = {key for key in left if info.convert[key] not in have}
    if really_left:
        # user is mixing old/new, or new/old settings, give renaming
        # suggestions.
        raise ImproperlyConfigured(
            info.mix_error.format(renames='\n'.join(
                FMT_REPLACE_SETTING.format(replace=key,
                                           with_=info.convert[key])
                for key in sorted(really_left))))

    preconf = {info.convert.get(k, k): v for k, v in preconf.items()}
    defaults = dict(deepcopy(info.defaults), **preconf)
    return Settings(
        preconf,
        [conf, defaults],
        (_old_key_to_new, _new_key_to_old),
        deprecated_settings=is_in_old,
        prefix=prefix,
    )
示例#9
0
from celery import states
from celery.backends.base import BaseBackend
from celery.exceptions import ImproperlyConfigured
from celery.five import range
from celery.utils.timeutils import maybe_timedelta

from .models import Task
from .models import TaskSet
from .session import SessionManager

try:
    from sqlalchemy.exc import DatabaseError, InvalidRequestError
    from sqlalchemy.orm.exc import StaleDataError
except ImportError:  # pragma: no cover
    raise ImproperlyConfigured(
        'The database result backend requires SQLAlchemy to be installed.'
        'See http://pypi.python.org/pypi/SQLAlchemy')

logger = logging.getLogger(__name__)

__all__ = ['DatabaseBackend']


@contextmanager
def session_cleanup(session):
    try:
        yield
    except Exception:
        session.rollback()
        raise
    finally:
示例#10
0
    def __init__(self,
                 host=None,
                 port=None,
                 db=None,
                 password=None,
                 max_connections=None,
                 url=None,
                 connection_pool=None,
                 **kwargs):
        super().__init__(expires_type=int, **kwargs)
        _get = self.app.conf.get
        if self.redis is None:
            raise ImproperlyConfigured(E_REDIS_MISSING.strip())

        if host and '://' in host:
            url, host = host, None

        self.max_connections = (max_connections
                                or _get('redis_max_connections')
                                or self.max_connections)
        self._ConnectionPool = connection_pool

        socket_timeout = _get('redis_socket_timeout')
        socket_connect_timeout = _get('redis_socket_connect_timeout')
        retry_on_timeout = _get('redis_retry_on_timeout')
        socket_keepalive = _get('redis_socket_keepalive')
        health_check_interval = _get('redis_backend_health_check_interval')

        self.connparams = {
            'host':
            _get('redis_host') or 'localhost',
            'port':
            _get('redis_port') or 6379,
            'db':
            _get('redis_db') or 0,
            'password':
            _get('redis_password'),
            'max_connections':
            self.max_connections,
            'socket_timeout':
            socket_timeout and float(socket_timeout),
            'retry_on_timeout':
            retry_on_timeout or False,
            'socket_connect_timeout':
            socket_connect_timeout and float(socket_connect_timeout),
        }

        username = _get('redis_username')
        if username:
            # We're extra careful to avoid including this configuration value
            # if it wasn't specified since older versions of py-redis
            # don't support specifying a username.
            # Only Redis>6.0 supports username/password authentication.

            # TODO: Include this in connparams' definition once we drop
            #       support for py-redis<3.4.0.
            self.connparams['username'] = username

        if health_check_interval:
            self.connparams["health_check_interval"] = health_check_interval

        # absent in redis.connection.UnixDomainSocketConnection
        if socket_keepalive:
            self.connparams['socket_keepalive'] = socket_keepalive

        # "redis_backend_use_ssl" must be a dict with the keys:
        # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile'
        # (the same as "broker_use_ssl")
        ssl = _get('redis_backend_use_ssl')
        if ssl:
            self.connparams.update(ssl)
            self.connparams['connection_class'] = self.connection_class_ssl

        if url:
            self.connparams = self._params_from_url(url, self.connparams)

        # If we've received SSL parameters via query string or the
        # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
        # via query string ssl_cert_reqs will be a string so convert it here
        if ('connection_class' in self.connparams and issubclass(
                self.connparams['connection_class'], redis.SSLConnection)):
            ssl_cert_reqs_missing = 'MISSING'
            ssl_string_to_constant = {
                'CERT_REQUIRED': CERT_REQUIRED,
                'CERT_OPTIONAL': CERT_OPTIONAL,
                'CERT_NONE': CERT_NONE,
                'required': CERT_REQUIRED,
                'optional': CERT_OPTIONAL,
                'none': CERT_NONE
            }
            ssl_cert_reqs = self.connparams.get('ssl_cert_reqs',
                                                ssl_cert_reqs_missing)
            ssl_cert_reqs = ssl_string_to_constant.get(ssl_cert_reqs,
                                                       ssl_cert_reqs)
            if ssl_cert_reqs not in ssl_string_to_constant.values():
                raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING_INVALID)

            if ssl_cert_reqs == CERT_OPTIONAL:
                logger.warning(W_REDIS_SSL_CERT_OPTIONAL)
            elif ssl_cert_reqs == CERT_NONE:
                logger.warning(W_REDIS_SSL_CERT_NONE)
            self.connparams['ssl_cert_reqs'] = ssl_cert_reqs

        self.url = url

        self.connection_errors, self.channel_errors = (
            get_redis_error_classes() if get_redis_error_classes else ((), ()))
        self.result_consumer = self.ResultConsumer(
            self,
            self.app,
            self.accept,
            self._pending_results,
            self._pending_messages,
        )
示例#11
0
 def init_queues(self):
     try:
         self.app.select_queues(self.use_queues)
     except KeyError, exc:
         raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
示例#12
0
    def __init__(self,
                 host=None,
                 port=None,
                 db=None,
                 password=None,
                 max_connections=None,
                 url=None,
                 connection_pool=None,
                 **kwargs):
        super(RedisBackend, self).__init__(expires_type=int, **kwargs)
        _get = self.app.conf.get
        if self.redis is None:
            raise ImproperlyConfigured(E_REDIS_MISSING.strip())

        if host and '://' in host:
            url, host = host, None

        self.max_connections = (max_connections
                                or _get('redis_max_connections')
                                or self.max_connections)
        self._ConnectionPool = connection_pool

        socket_timeout = _get('redis_socket_timeout')
        socket_connect_timeout = _get('redis_socket_connect_timeout')
        retry_on_timeout = _get('redis_retry_on_timeout')
        socket_keepalive = _get('redis_socket_keepalive')

        self.connparams = {
            'host':
            _get('redis_host') or 'localhost',
            'port':
            _get('redis_port') or 6379,
            'db':
            _get('redis_db') or 0,
            'password':
            _get('redis_password'),
            'max_connections':
            self.max_connections,
            'socket_timeout':
            socket_timeout and float(socket_timeout),
            'retry_on_timeout':
            retry_on_timeout or False,
            'socket_connect_timeout':
            socket_connect_timeout and float(socket_connect_timeout),
        }

        # absent in redis.connection.UnixDomainSocketConnection
        if socket_keepalive:
            self.connparams['socket_keepalive'] = socket_keepalive

        # "redis_backend_use_ssl" must be a dict with the keys:
        # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile'
        # (the same as "broker_use_ssl")
        ssl = _get('redis_backend_use_ssl')
        if ssl:
            self.connparams.update(ssl)
            self.connparams['connection_class'] = redis.SSLConnection

        if url:
            self.connparams = self._params_from_url(url, self.connparams)

        # If we've received SSL parameters via query string or the
        # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
        # via query string ssl_cert_reqs will be a string so convert it here
        if ('connection_class' in self.connparams and
                self.connparams['connection_class'] is redis.SSLConnection):
            ssl_cert_reqs_missing = 'MISSING'
            ssl_string_to_constant = {
                'CERT_REQUIRED': CERT_REQUIRED,
                'CERT_OPTIONAL': CERT_OPTIONAL,
                'CERT_NONE': CERT_NONE,
                'required': CERT_REQUIRED,
                'optional': CERT_OPTIONAL,
                'none': CERT_NONE
            }
            ssl_cert_reqs = self.connparams.get('ssl_cert_reqs',
                                                ssl_cert_reqs_missing)
            ssl_cert_reqs = ssl_string_to_constant.get(ssl_cert_reqs,
                                                       ssl_cert_reqs)
            if ssl_cert_reqs not in ssl_string_to_constant.values():
                raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING_INVALID)

            if ssl_cert_reqs == CERT_OPTIONAL:
                logger.warning(W_REDIS_SSL_CERT_OPTIONAL)
            elif ssl_cert_reqs == CERT_NONE:
                logger.warning(W_REDIS_SSL_CERT_NONE)
            self.connparams['ssl_cert_reqs'] = ssl_cert_reqs

        self.url = url

        self.connection_errors, self.channel_errors = (
            get_redis_error_classes() if get_redis_error_classes else ((), ()))
        self.result_consumer = self.ResultConsumer(
            self,
            self.app,
            self.accept,
            self._pending_results,
            self._pending_messages,
        )
示例#13
0
    def __init__(self, url=None, table_name=None, *args, **kwargs):
        super(DynamoDBBackend, self).__init__(*args, **kwargs)

        self.url = url
        self.table_name = table_name or self.table_name

        if not boto3:
            raise ImproperlyConfigured(
                'You need to install the boto3 library to use the '
                'DynamoDB backend.')

        aws_credentials_given = False
        aws_access_key_id = None
        aws_secret_access_key = None

        if url is not None:
            scheme, region, port, username, password, table, query = \
                parse_url(url)

            aws_access_key_id = username
            aws_secret_access_key = password

            access_key_given = aws_access_key_id is not None
            secret_key_given = aws_secret_access_key is not None

            if access_key_given != secret_key_given:
                raise ImproperlyConfigured(
                    'You need to specify both the Access Key ID '
                    'and Secret.')

            aws_credentials_given = access_key_given

            if region == 'localhost':
                # We are using the downloadable, local version of DynamoDB
                self.endpoint_url = 'http://localhost:{}'.format(port)
                self.aws_region = 'us-east-1'
                logger.warning(
                    'Using local-only DynamoDB endpoint URL: {}'.format(
                        self.endpoint_url))
            else:
                self.aws_region = region

            # If endpoint_url is explicitly set use it instead
            _get = self.app.conf.get
            config_endpoint_url = _get('dynamodb_endpoint_url')
            if config_endpoint_url:
                self.endpoint_url = config_endpoint_url

            self.read_capacity_units = int(
                query.get('read', self.read_capacity_units))
            self.write_capacity_units = int(
                query.get('write', self.write_capacity_units))
            self.table_name = table or self.table_name

        self._available_fields = (self._key_field, self._value_field,
                                  self._timestamp_field)

        self._client = None
        if aws_credentials_given:
            self._get_client(access_key_id=aws_access_key_id,
                             secret_access_key=aws_secret_access_key)
示例#14
0
SETTING_MISSING = """\
You have to configure a special task serializer
for signing and verifying tasks:
    * task_serializer = 'auth'

You have to accept only tasks which are serialized with 'auth'.
There is no point in signing messages if they are not verified.
    * accept_content = ['auth']
"""

__all__ = ("setup_security", )

try:
    import cryptography  # noqa
except ImportError:
    raise ImproperlyConfigured(CRYPTOGRAPHY_NOT_INSTALLED)


def setup_security(
    allowed_serializers=None,
    key=None,
    cert=None,
    store=None,
    digest=None,
    serializer="json",
    app=None,
):
    """See :meth:`@Celery.setup_security`."""
    if app is None:
        from celery import current_app
示例#15
0
文件: task.py 项目: mahak/celery
    def replace(self, sig):
        """Replace this task, with a new task inheriting the task id.

        Execution of the host task ends immediately and no subsequent statements
        will be run.

        .. versionadded:: 4.0

        Arguments:
            sig (Signature): signature to replace with.

        Raises:
            ~@Ignore: This is always raised when called in asynchronous context.
            It is best to always use ``return self.replace(...)`` to convey
            to the reader that the task won't continue after being replaced.
        """
        chord = self.request.chord
        if 'chord' in sig.options:
            raise ImproperlyConfigured(
                "A signature replacing a task must not be part of a chord")
        if isinstance(sig, _chain) and not getattr(sig, "tasks", True):
            raise ImproperlyConfigured("Cannot replace with an empty chain")

        # Ensure callbacks or errbacks from the replaced signature are retained
        if isinstance(sig, group):
            # Groups get uplifted to a chord so that we can link onto the body
            sig |= self.app.tasks['celery.accumulate'].s(index=0)
        for callback in maybe_list(self.request.callbacks) or []:
            sig.link(callback)
        for errback in maybe_list(self.request.errbacks) or []:
            sig.link_error(errback)
        # If the replacement signature is a chain, we need to push callbacks
        # down to the final task so they run at the right time even if we
        # proceed to link further tasks from the original request below
        if isinstance(sig, _chain) and "link" in sig.options:
            final_task_links = sig.tasks[-1].options.setdefault("link", [])
            final_task_links.extend(maybe_list(sig.options["link"]))
        # We need to freeze the replacement signature with the current task's
        # ID to ensure that we don't disassociate it from the existing task IDs
        # which would break previously constructed results objects.
        sig.freeze(self.request.id)
        # Ensure the important options from the original signature are retained
        replaced_task_nesting = self.request.get('replaced_task_nesting',
                                                 0) + 1
        sig.set(chord=chord,
                group_id=self.request.group,
                group_index=self.request.group_index,
                root_id=self.request.root_id,
                replaced_task_nesting=replaced_task_nesting)
        # If the task being replaced is part of a chain, we need to re-create
        # it with the replacement signature - these subsequent tasks will
        # retain their original task IDs as well
        for t in reversed(self.request.chain or []):
            sig |= signature(t, app=self.app)
        # Stamping sig with parents groups
        stamped_headers = self.request.stamped_headers
        if self.request.stamps:
            groups = self.request.stamps.get("groups")
            sig.stamp(visitor=GroupStampingVisitor(
                groups=groups, stamped_headers=stamped_headers))

        # Finally, either apply or delay the new signature!
        if self.request.is_eager:
            return sig.apply().get()
        else:
            sig.delay()
            raise Ignore('Replaced by new task')
示例#16
0
    def get_connection(self):
        if Redis is None:
            raise ImproperlyConfigured('`redis `library is not installed')

        return Redis(**self.options)
示例#17
0
    def join(self,
             timeout=None,
             propagate=True,
             interval=0.5,
             callback=None,
             no_ack=True,
             on_message=None,
             disable_sync_subtasks=True,
             on_interval=None):
        """Gather the results of all tasks as a list in order.

        Note:
            This can be an expensive operation for result store
            backends that must resort to polling (e.g., database).

            You should consider using :meth:`join_native` if your backend
            supports it.

        Warning:
            Waiting for tasks within a task may lead to deadlocks.
            Please see :ref:`task-synchronous-subtasks`.

        Arguments:
            timeout (float): The number of seconds to wait for results
                before the operation times out.
            propagate (bool): If any of the tasks raises an exception,
                the exception will be re-raised when this flag is set.
            interval (float): Time to wait (in seconds) before retrying to
                retrieve a result from the set.  Note that this does not have
                any effect when using the amqp result store backend,
                as it does not use polling.
            callback (Callable): Optional callback to be called for every
                result received.  Must have signature ``(task_id, value)``
                No results will be returned by this function if a callback
                is specified.  The order of results is also arbitrary when a
                callback is used.  To get access to the result object for
                a particular id you'll have to generate an index first:
                ``index = {r.id: r for r in gres.results.values()}``
                Or you can create new result objects on the fly:
                ``result = app.AsyncResult(task_id)`` (both will
                take advantage of the backend cache anyway).
            no_ack (bool): Automatic message acknowledgment (Note that if this
                is set to :const:`False` then the messages
                *will not be acknowledged*).
            disable_sync_subtasks (bool): Disable tasks to wait for sub tasks
                this is the default configuration. CAUTION do not enable this
                unless you must.

        Raises:
            celery.exceptions.TimeoutError: if ``timeout`` isn't
                :const:`None` and the operation takes longer than ``timeout``
                seconds.
        """
        if disable_sync_subtasks:
            assert_will_not_block()
        time_start = monotonic()
        remaining = None

        if on_message is not None:
            raise ImproperlyConfigured(
                'Backend does not support on_message callback')

        results = []
        for result in self.results:
            remaining = None
            if timeout:
                remaining = timeout - (monotonic() - time_start)
                if remaining <= 0.0:
                    raise TimeoutError('join operation timed out')
            value = result.get(
                timeout=remaining,
                propagate=propagate,
                interval=interval,
                no_ack=no_ack,
                on_interval=on_interval,
            )
            if callback:
                callback(result.id, value)
            else:
                results.append(value)
        return results
示例#18
0
    def _parse_url(cls, url, prefix=AZURE_BLOCK_BLOB_CONNECTION_PREFIX):
        connection_string = url[len(prefix):]
        if not connection_string:
            raise ImproperlyConfigured("Invalid URL")

        return connection_string
示例#19
0
    def replace(self, sig):
        """Replace this task, with a new task inheriting the task id.

        Execution of the host task ends immediately and no subsequent statements
        will be run.

        .. versionadded:: 4.0

        Arguments:
            sig (~@Signature): signature to replace with.

        Raises:
            ~@Ignore: This is always raised when called in asynchronous context.
            It is best to always use ``return self.replace(...)`` to convey
            to the reader that the task won't continue after being replaced.
        """
        chord = self.request.chord
        if 'chord' in sig.options:
            raise ImproperlyConfigured(
                "A signature replacing a task must not be part of a chord"
            )

        if isinstance(sig, group):
            sig |= self.app.tasks['celery.accumulate'].s(index=0).set(
                link=self.request.callbacks,
                link_error=self.request.errbacks,
            )
        elif isinstance(sig, _chain):
            if not sig.tasks:
                raise ImproperlyConfigured(
                    "Cannot replace with an empty chain"
                )

        if self.request.chain:
            # We need to freeze the new signature with the current task's ID to
            # ensure that we don't disassociate the new chain from the existing
            # task IDs which would break previously constructed results
            # objects.
            sig.freeze(self.request.id)
            if "link" in sig.options:
                final_task_links = sig.tasks[-1].options.setdefault("link", [])
                final_task_links.extend(maybe_list(sig.options["link"]))
            # Construct the new remainder of the task by chaining the signature
            # we're being replaced by with signatures constructed from the
            # chain elements in the current request.
            for t in reversed(self.request.chain):
                sig |= signature(t, app=self.app)

        sig.set(
            chord=chord,
            group_id=self.request.group,
            group_index=self.request.group_index,
            root_id=self.request.root_id,
        )
        sig.freeze(self.request.id)

        if self.request.is_eager:
            task_result = sig.apply()
            with allow_join_result():
                  return task_result.get()
        else:
            sig.delay()
            raise Ignore('Replaced by new task')
示例#20
0
文件: redis.py 项目: wyatt88/celery
    def __init__(self,
                 host=None,
                 port=None,
                 db=None,
                 password=None,
                 max_connections=None,
                 url=None,
                 connection_pool=None,
                 **kwargs):
        super(RedisBackend, self).__init__(expires_type=int, **kwargs)
        _get = self.app.conf.get
        if self.redis is None:
            raise ImproperlyConfigured(E_REDIS_MISSING.strip())

        if host and '://' in host:
            url, host = host, None

        self.max_connections = (max_connections
                                or _get('redis_max_connections')
                                or self.max_connections)
        self._ConnectionPool = connection_pool

        socket_timeout = _get('redis_socket_timeout')
        socket_connect_timeout = _get('redis_socket_connect_timeout')

        self.connparams = {
            'host':
            _get('redis_host') or 'localhost',
            'port':
            _get('redis_port') or 6379,
            'db':
            _get('redis_db') or 0,
            'password':
            _get('redis_password'),
            'max_connections':
            self.max_connections,
            'socket_timeout':
            socket_timeout and float(socket_timeout),
            'socket_connect_timeout':
            socket_connect_timeout and float(socket_connect_timeout),
        }

        # "redis_backend_use_ssl" must be a dict with the keys:
        # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile'
        # (the same as "broker_use_ssl")
        ssl = _get('redis_backend_use_ssl')
        if ssl:
            self.connparams.update(ssl)
            self.connparams['connection_class'] = redis.SSLConnection

        if url:
            self.connparams = self._params_from_url(url, self.connparams)
        self.url = url

        self.connection_errors, self.channel_errors = (
            get_redis_error_classes() if get_redis_error_classes else ((), ()))
        self.result_consumer = self.ResultConsumer(
            self,
            self.app,
            self.accept,
            self._pending_results,
            self._pending_messages,
        )
示例#21
0
    def __init__(self, app=None, **kwargs):
        self.options = {}

        super(MongoBackend, self).__init__(app, **kwargs)

        if not pymongo:
            raise ImproperlyConfigured(
                "You need to install the pymongo library to use the "
                "MongoDB backend.")

        # Set option defaults
        for key, value in items(self._prepare_client_options()):
            self.options.setdefault(key, value)

        # update conf with mongo uri data, only if uri was given
        if self.url:
            self.url = self._ensure_mongodb_uri_compliance(self.url)

            uri_data = pymongo.uri_parser.parse_uri(self.url)
            # build the hosts list to create a mongo connection
            hostslist = [
                "{0}:{1}".format(x[0], x[1]) for x in uri_data["nodelist"]
            ]
            self.user = uri_data["username"]
            self.password = uri_data["password"]
            self.mongo_host = hostslist
            if uri_data["database"]:
                # if no database is provided in the uri, use default
                self.database_name = uri_data["database"]

            self.options.update(uri_data["options"])

        # update conf with specific settings
        config = self.app.conf.get("mongodb_backend_settings")
        if config is not None:
            if not isinstance(config, dict):
                raise ImproperlyConfigured(
                    "MongoDB backend settings should be grouped in a dict")
            config = dict(config)  # don't modify original

            if "host" in config or "port" in config:
                # these should take over uri conf
                self.mongo_host = None

            self.host = config.pop("host", self.host)
            self.port = config.pop("port", self.port)
            self.mongo_host = config.pop("mongo_host", self.mongo_host)
            self.user = config.pop("user", self.user)
            self.password = config.pop("password", self.password)
            self.database_name = config.pop("database", self.database_name)
            self.taskmeta_collection = config.pop(
                "taskmeta_collection",
                self.taskmeta_collection,
            )
            self.groupmeta_collection = config.pop(
                "groupmeta_collection",
                self.groupmeta_collection,
            )

            self.options.update(config.pop("options", {}))
            self.options.update(config)
示例#22
0
    def _parse_url(cls, url, prefix="azureblockblob://"):
        connection_string = url[len(prefix):]
        if not connection_string:
            raise ImproperlyConfigured("Invalid URL")

        return connection_string
示例#23
0
    def __init__(self, app=None, url=None, **kwargs):
        """Initialize MongoDB backend instance.

        :raises celery.exceptions.ImproperlyConfigured: if
            module :mod:`pymongo` is not available.

        """
        self.options = {}

        super(MongoBackend, self).__init__(app, **kwargs)

        if not pymongo:
            raise ImproperlyConfigured(
                'You need to install the pymongo library to use the '
                'MongoDB backend.')

        self.url = url

        # Set option defaults
        for key, value in items(self._prepare_client_options()):
            self.options.setdefault(key, value)

        # update conf with mongo uri data, only if uri was given
        if self.url:
            uri_data = pymongo.uri_parser.parse_uri(self.url)
            # build the hosts list to create a mongo connection
            make_host_str = lambda x: "{0}:{1}".format(x[0], x[1])
            hostslist = map(make_host_str, uri_data['nodelist'])
            self.user = uri_data['username']
            self.password = uri_data['password']
            self.mongo_host = hostslist
            if uri_data['database']:
                # if no database is provided in the uri, use default
                self.database_name = uri_data['database']

            self.options.update(uri_data['options'])

        # update conf with specific settings
        config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS')
        if config is not None:
            if not isinstance(config, dict):
                raise ImproperlyConfigured(
                    'MongoDB backend settings should be grouped in a dict')
            config = dict(config)  # do not modify original

            if 'host' in config or 'port' in config:
                # these should take over uri conf
                self.mongo_host = None

            self.host = config.pop('host', self.host)
            self.port = config.pop('port', self.port)
            self.mongo_host = config.pop('mongo_host', self.mongo_host)
            self.user = config.pop('user', self.user)
            self.password = config.pop('password', self.password)
            self.database_name = config.pop('database', self.database_name)
            self.taskmeta_collection = config.pop(
                'taskmeta_collection',
                self.taskmeta_collection,
            )
            self.groupmeta_collection = config.pop(
                'groupmeta_collection',
                self.groupmeta_collection,
            )

            self.options.update(config.pop('options', {}))
            self.options.update(config)
示例#24
0
 def _find_path(self, url):
     if not url:
         raise ImproperlyConfigured(
             'You need to configure a path for the File-system backend')
     if url is not None and url.startswith('file:///'):
         return url[7:]
示例#25
0
from celery import states
from celery.backends.base import BaseBackend
from celery.exceptions import ImproperlyConfigured
from celery.five import range
from celery.utils.time import maybe_timedelta

from celery.backends.database.models import Task
from celery.backends.database.models import TaskSet
from celery.backends.database.session import SessionManager

try:
    from sqlalchemy.exc import DatabaseError, InvalidRequestError
    from sqlalchemy.orm.exc import StaleDataError
except ImportError:  # pragma: no cover
    raise ImproperlyConfigured(
        'The database result backend requires SQLAlchemy to be installed.'
        'See https://pypi.org/project/SQLAlchemy/')

logger = logging.getLogger(__name__)

__all__ = ('DatabaseBackend', )


@contextmanager
def session_cleanup(session):
    try:
        yield
    except Exception:
        session.rollback()
        raise
    finally:
示例#26
0
    def __init__(self, *args, **kwargs):
        if self.sentinel is None:
            raise ImproperlyConfigured(E_REDIS_SENTINEL_MISSING.strip())

        super().__init__(*args, **kwargs)
示例#27
0
def _verify_django_version(django: "ModuleType") -> None:
    if django.VERSION < (1, 11):
        raise ImproperlyConfigured('Celery 5.x requires Django 1.11 or later.')
示例#28
0
def _verify_django_version(django):
    if django.VERSION < (1, 11):
        raise ImproperlyConfigured('Celery 4.x requires Django 1.11 or later.')
示例#29
0
    def __init__(self, url=None, table_name=None, *args, **kwargs):
        super(DynamoDBBackend, self).__init__(*args, **kwargs)

        self.url = url
        self.table_name = table_name or self.table_name

        if not boto3:
            raise ImproperlyConfigured(
                "You need to install the boto3 library to use the " "DynamoDB backend."
            )

        aws_credentials_given = False
        aws_access_key_id = None
        aws_secret_access_key = None

        if url is not None:
            scheme, region, port, username, password, table, query = parse_url(url)

            aws_access_key_id = username
            aws_secret_access_key = password

            access_key_given = aws_access_key_id is not None
            secret_key_given = aws_secret_access_key is not None

            if access_key_given != secret_key_given:
                raise ImproperlyConfigured(
                    "You need to specify both the Access Key ID " "and Secret."
                )

            aws_credentials_given = access_key_given

            if region == "localhost":
                # We are using the downloadable, local version of DynamoDB
                self.endpoint_url = "http://localhost:{}".format(port)
                self.aws_region = "us-east-1"
                logger.warning(
                    "Using local-only DynamoDB endpoint URL: {}".format(
                        self.endpoint_url
                    )
                )
            else:
                self.aws_region = region

            # If endpoint_url is explicitly set use it instead
            _get = self.app.conf.get
            config_endpoint_url = _get("dynamodb_endpoint_url")
            if config_endpoint_url:
                self.endpoint_url = config_endpoint_url

            self.read_capacity_units = int(query.get("read", self.read_capacity_units))
            self.write_capacity_units = int(
                query.get("write", self.write_capacity_units)
            )

            ttl = query.get("ttl_seconds", self.time_to_live_seconds)
            if ttl:
                try:
                    self.time_to_live_seconds = int(ttl)
                except ValueError as e:
                    logger.error('TTL must be a number; got "{ttl}"', exc_info=e)
                    raise e

            self.table_name = table or self.table_name

        self._available_fields = (
            self._key_field,
            self._value_field,
            self._timestamp_field,
        )

        self._client = None
        if aws_credentials_given:
            self._get_client(
                access_key_id=aws_access_key_id, secret_access_key=aws_secret_access_key
            )