def __call__(self, f): @wraps(f) def inner(*args, **kwargs): try: return f(*args, **kwargs) except self.NON_RETRYABLE_EXCEPTIONS: raise except Exception as e: number_of_prior_retries = task_func.request.retries # Whilst the New Relic agent does report the exception that caused a retry, # it does so in a form like: # `celery.exceptions:Retry: Retry in 640s: error('Error -3 while decompressing: incorrect header check',)` # ...which causes all retry exceptions to be lumped together in the same # `celery.exceptions:Retry` group. The original exception is then only # reported to New Relic once the max number of retries has been reached. # As such we manually report the retried exceptions to New Relic here, so # that the original exception is shown verbatim immediately, and then filter # out the automatic `celery.exceptions:Retry` exceptions via the web UI. See: # https://docs.newrelic.com/docs/agents/python-agent/back-end-services/python-agent-celery#ignoring-task-retry-errors params = { "number_of_prior_retries": number_of_prior_retries, } newrelic.agent.record_exception(params=params) # Implement exponential backoff with some randomness to prevent # thundering herd type problems. Constant factor chosen so we get # reasonable pause between the fastest retries. timeout = 10 * int(random.uniform(1.9, 2.1) ** number_of_prior_retries) raise task_func.retry(exc=e, countdown=timeout) task_func = task(*self.task_args, **self.task_kwargs)(inner) return task_func
def decorator(proc): def decorated_proc(*args, **kwargs): cache.set(cache_id, current_task.request.id, 60 * 60) return proc(*args, **kwargs) decorated_task = task(base=SingleInstanceTask, *args, **kwargs)(decorated_proc) decorated_task.django_cache_id = cache_id decorated_task.logger = logger return decorated_task
def update_cache(task, *args, **kwargs): task_id = kwargs['task_id'] try: result = task(*args, **kwargs) time = str(datetime.now()) cache.set(task.__name__ + task_id + '_short', result, settings.SHORT_CACHE) cache.set(task.__name__ + task_id + '_long', result, settings.LONG_CACHE) cache.set(task.__name__ + task_id + '_status', 'broker ' + task.__name__ + task_id + " " + time, settings.LONG_CACHE) return task.__name__ + task_id + ' updated' except Exception, e: return str(task.__name__+ " failed")
def __call__(self, f): @wraps(f) def inner(*args, **kwargs): try: return f(*args, **kwargs) except self.raise_exceptions: raise except Exception as e: # Implement exponential backoff with some randomness to prevent # thundering herd type problems. Constant factor chosen so we get # reasonable pause between the fastest retries. f.retry(exc=e, countdown=10 * int(random.uniform(2, 3) ** f.request.retries)) return task(*self.task_args, **self.task_kwargs)(inner)
def runTask(task, *args, **kwargs): LOCK_EXPIRE = 60 * 5 # Lock expires in 5 minutes if 'task_id' in kwargs: task_id = kwargs['task_id'] else: task_id = "" kwargs['task_id'] = "" long_cache_key, short_cache_key = getCacheKeys(task, *args, **kwargs) #if short cache available use it result = cache.get(short_cache_key) cache_status = "Short " + str(datetime.now()) lock_id = long_cache_key + "_lock" if not result: #else use long cache then update both caches in background result = cache.get(long_cache_key) if 'cacheonly' in kwargs: if kwargs['cacheonly'] == "true": cache_status = '' return cache_status, result # cache.add fails if if the key already exists acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE) # memcache delete is very slow, but we have to use it to take # advantage of using add() for atomic locking release_lock = lambda: cache.delete(lock_id) if acquire_lock(): if 'nocache' in kwargs: if kwargs['nocache'] == "true": try: result = task(*args, **kwargs) update_cache(task, *args, **kwargs) except Exception, e: message = e.message pass finally: release_lock()
def optional_celery(**kparms): name = kparms.pop('name', None) def wrapped(func): def inner(*args, **kw): return func(*args, **kw) return inner if settings.USE_CELERY_DECORATOR: from celery import task wrapper = task(**kparms) elif settings.CELERY: wrapper = settings.CELERY.task(**kparms) else: wrapper = wrapped return wrapper
def test_task_alias(self): from celery import task assert task.__file__ assert task(add)
def assert_compat_decorator(self, decorator, type, **opts): task = decorator(**opts)(add) assert task(8, 8) == 16 assert isinstance(task, type)
def assertIsTask(self, t, expected): self.assertEqual(t, task(expected))
def process(self): def _process(): print('doing...') return [celery.task(name='delete_vpc_doing')(_process)]
} tags = [ resource_tag(RESOURCE_REPOSITORY_TYPE, repo_id), action_tag('sync') ] result = sync.apply_async_with_reservation(RESOURCE_REPOSITORY_TYPE, repo_id, tags=tags, kwargs=kwargs) return result sync = task(RepoSyncManager.sync, base=Task) def _now_timestamp(): """ @return: iso 8601 UTC timestamp suitable for indicating when a sync completed @rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format def _repo_storage_dir(): storage_dir = pulp_config.config.get('server', 'storage_dir') dir = os.path.join(storage_dir, 'repos')
""" Checks that Pylint does not complain about certain aspects of the Celery library """ # pylint: disable=C0111,R0903,W0232 from celery import task @task(queue='celery') def test_task(an_arg, another_arg): return an_arg + another_arg TEST_TASK_NON_DECORATOR = task(ignore_results=True)(lambda: 0)
@param repo_id: identifies the repo @type repo_id: str @return: full path to the directory in which an importer can store the given repository as it is synchronized @rtype: str """ dir = os.path.join(REPO_STORAGE_DIR, repo_id) if not os.path.exists(dir): os.makedirs(dir) return dir sync = task(RepoSyncManager.sync, base=Task) def _now_timestamp(): """ @return: timestamp suitable for indicating when a sync completed @rtype: str """ now = datetime.datetime.now(dateutils.local_tz()) now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format def _repo_storage_dir(): storage_dir = pulp_config.config.get('server', 'storage_dir') dir = os.path.join(storage_dir, 'repos')
def test_task_alias(self): from celery import task self.assertTrue(task.__file__) self.assertTrue(task(add))
def assertCompatDecorator(self, decorator, type, **opts): task = decorator(**opts)(add) self.assertEqual(task(8, 8), 16) self.assertIsInstance(task, type)
@type distributor_id: str @param action_id: The ID of the request to begin tracking. @type action_id: str """ collection = Bind.get_collection() query = self.bind_id(consumer_id, repo_id, distributor_id) query['consumer_actions.id'] = action_id update = {'$set': {'consumer_actions.$.status': Bind.Status.FAILED}} collection.update(query, update, safe=True) def find_action(self, action_id): """ Find a consumer action by ID. @param action_id: An action ID. @type action_id: str @return: The action if found, else None """ collection = Bind.get_collection() query = {'consumer_actions.id': action_id} binding = collection.find_one(query) if binding is None: return for action in binding['consumer_actions']: if action['id'] == action_id: return action bind = task(BindManager.bind, base=Task) delete = task(BindManager.delete, base=Task) unbind = task(BindManager.unbind, base=Task)
# Log a message so that we can debug but don't throw _logger.warn(e) errors.append(e) except Exception, e: _logger.exception(e) errors.append(e) # Don't do anything else since we still want to process all the other consumers error = None if len(errors) > 0: error = PulpCodedException(error_code, **error_kwargs) error.child_exceptions = errors return TaskResult({}, error, spawned_tasks) associate = task(ConsumerGroupManager.associate, base=Task, ignore_result=True) create_consumer_group = task(ConsumerGroupManager.create_consumer_group, base=Task) delete_consumer_group = task(ConsumerGroupManager.delete_consumer_group, base=Task, ignore_result=True) update_consumer_group = task(ConsumerGroupManager.update_consumer_group, base=Task) unassociate = task(ConsumerGroupManager.unassociate, base=Task, ignore_result=True) bind = task(ConsumerGroupManager.bind, base=Task) unbind = task(ConsumerGroupManager.unbind, base=Task) def validate_existing_consumer_group(group_id):
@staticmethod def get_role(role): """ Get a Role by id. :param role: A role id to search for :type role: str :return: a Role object that have the given role id. :rtype: Role or None """ return Role.get_collection().find_one({'id': role}) add_permissions_to_role = task(RoleManager.add_permissions_to_role, base=Task, ignore_result=True) add_user_to_role = task(RoleManager.add_user_to_role, base=Task, ignore_result=True) create_role = task(RoleManager.create_role, base=Task) delete_role = task(RoleManager.delete_role, base=Task, ignore_result=True) remove_permissions_from_role = task(RoleManager.remove_permissions_from_role, base=Task, ignore_result=True) remove_user_from_role = task(RoleManager.remove_user_from_role, base=Task, ignore_result=True) update_role = task(RoleManager.update_role, base=Task)
if limit is not None: cursor.limit(limit) return list(cursor) def auto_distributors(self, repo_id): """ Returns all distributors for the given repo that are configured for automatic publishing. """ dist_coll = RepoDistributor.get_collection() auto_distributors = list( dist_coll.find({ 'repo_id': repo_id, 'auto_publish': True })) return auto_distributors publish = task(RepoPublishManager.publish, base=Task) def _now_timestamp(): """ @return: UTC timestamp suitable for indicating when a publish completed @rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def assertCompatDecorator(self, decorator, type, **opts): task = decorator(**opts)(add) self.assertEqual(task(8, 8), 16) self.assertTrue(task.accept_magic_kwargs) self.assertIsInstance(task, type)
@type group_id: str @param distributor_id: identifies the group's distributor @type distributor_id: str @return: timestamp of the last publish or None @rtype: datetime.datetime """ distributor = manager_factory.repo_group_distributor_manager().get_distributor( group_id, distributor_id) date = distributor['last_publish'] if date is not None: date = dateutils.parse_iso8601_datetime(date) return date publish = task(RepoGroupPublishManager.publish, base=Task, ignore_result=True) def _now_timestamp(): """ @return: UTC timestamp suitable for indicating when a publish completed @rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
state = self.get_state(file, check_if_unknown=False) if state not in (CacheFileState.GENERATING, CacheFileState.EXISTS): self.schedule_generation(file, force=force) def schedule_generation(self, file, force=False): # overwrite this to have the file generated in the background, # e. g. in a worker queue. raise NotImplementedError try: from celery import task except ImportError: pass else: _celery_task = task(ignore_result=True, serializer='pickle')(_generate_file) class Celery(BaseAsync): """ A backend that uses Celery to generate the images. """ def __init__(self, *args, **kwargs): try: import celery # noqa except ImportError: raise ImproperlyConfigured('You must install celery to use' ' imagekit.cachefiles.backends.Celery.') super(Celery, self).__init__(*args, **kwargs) def schedule_generation(self, file, force=False):
""" List the install schedules currently defined for the repo. @param repo_id: @return: """ cls._validate_scheduled_operation(operation) consumer = cls.get_consumer(consumer_id, ['schedules']) return consumer.get('schedules', {}).get(operation, []) @staticmethod def _validate_scheduled_operation(operation): if operation not in ['install', 'update', 'uninstall']: raise ValueError('"%s" is not a valid operation' % operation) register = task(ConsumerManager.register, base=Task) unregister = task(ConsumerManager.unregister, base=Task, ignore_result=True) update = task(ConsumerManager.update, base=Task) def update_notes(notes, delta_notes): """ Accepts original notes and delta and returns updated notes @return: updated notes @rtype: dict """ for key, value in delta_notes.items(): if value is None: # try deleting a note if it exists try: del notes[key]
@return: True if unique else False @rtype: bool """ spec = { 'repo_id': repo_id, 'unit_id': unit_id, 'unit_type_id': unit_type_id, } unit_coll = RepoContentUnit.get_collection() existing_count = unit_coll.find(spec).count() return bool(existing_count) associate_from_repo = task(RepoUnitAssociationManager.associate_from_repo, base=Task) unassociate_by_criteria = task( RepoUnitAssociationManager.unassociate_by_criteria, base=Task) def load_associated_units(source_repo_id, criteria): criteria.association_fields = None # Retrieve the units to be associated association_query_manager = manager_factory.repo_unit_association_query_manager( ) associate_us = association_query_manager.get_units(source_repo_id, criteria=criteria) return associate_us
:param names: names of operations to convert to values :type name: list or tuple of str's :rtype: list of int's :return: list of operation values :raises InvalidValue: when any of the given operation names is invalid """ if names is None: raise InvalidValue('operation_names') operations = [self.operation_name_to_value(n) for n in names] return operations def operation_value_to_name(self, operation): """ Convert an operation value to an operation name Returns None if given operation value is invalid. :param operation: operation value :type operation: int :rtype: str :return: operation name """ if operation < authorization.CREATE or operation > authorization.EXECUTE: return None return authorization.OPERATION_NAMES[operation] grant = task(PermissionManager.grant, base=Task, ignore_result=True) revoke = task(PermissionManager.revoke, base=Task, ignore_result=True)
j.delete() elif (now() - job.updated).seconds > 60 * 60: j.delete() def finished(stream): stream.seek(0) job.data = File(stream) job.save() job.updates.success(u'Finished export') handle, filename = mkstemp(helpers.get_extension_for_writer(job.writer)) os.close(handle) stream = open(filename, 'r+w') module, dot, klass = job.handler.rpartition('.') module = import_module(module) handler = getattr(module, klass)(job) module, dot, klass = job.writer.rpartition('.') module = import_module(module) writer = getattr(module, klass)(stream, handler.export_wrapper, handler.export_item) handler.start_export(writer, finished) if 'djcelery' in settings.INSTALLED_APPS: from celery import task import_task = task(import_task) export_task = task(export_task)
collection.update({'_id': distributor['_id']}, {'$pull': {'scheduled_publishes': schedule_id}}, safe=True) def list_publish_schedules(self, repo_id, distributor_id): """ Add a scheduled publish for the repo to the given distributor. @param repo_id: @param distributor_id: """ collection = RepoDistributor.get_collection() distributor = collection.find_one({'repo_id': repo_id, 'id': distributor_id}) if distributor is None: raise MissingResource(repo=repo_id, distributor=distributor_id) return distributor['scheduled_publishes'] add_distributor = task(RepoDistributorManager.add_distributor, base=Task) remove_distributor = task(RepoDistributorManager.remove_distributor, base=Task, ignore_result=True) update_distributor_config = task(RepoDistributorManager.update_distributor_config, base=Task, ignore_result=True) def is_distributor_id_valid(distributor_id): """ @return: true if the distributor ID is valid; false otherwise @rtype: bool """ result = _DISTRIBUTOR_ID_REGEX.match(distributor_id) is not None return result
result = OptionBag() result.delay = function return result serial_execution = False elif ACTIVITYLOG_MODE == 'rq': from django_rq import job maybe_async = job( 'activitylog', timeout=ACTIVITYLOG_TASK_EXPIRATION, result_ttl=ACTIVITYLOG_TASK_EXPIRATION, ) serial_execution = True elif ACTIVITYLOG_MODE == 'celery': import celery maybe_async = celery.task(expires=ACTIVITYLOG_TASK_EXPIRATION, ) serial_execution = True @maybe_async @transaction.commit_on_success def update_activity(user_id, address, agent, _now_dt): ip, _ = IP.concurrent_get_or_create( address=address, fast_mode=serial_execution, ) if agent: agent, _ = UserAgent.concurrent_get_or_create( name=agent, fast_mode=serial_execution, )
role = self.get_role(SUPER_USER_ROLE) if role is None: role = self.create_role(SUPER_USER_ROLE, 'Super Users', 'Role indicates users with admin privileges') role['permissions'] = [{'resource': '/', 'permission': [CREATE, READ, UPDATE, DELETE, EXECUTE]}] Role.get_collection().save(role, safe=True) @staticmethod def get_role(role): """ Get a Role by id. :param role: A role id to search for :type role: str :return: a Role object that have the given role id. :rtype: Role or None """ return Role.get_collection().find_one({'id': role}) add_permissions_to_role = task(RoleManager.add_permissions_to_role, base=Task, ignore_result=True) add_user_to_role = task(RoleManager.add_user_to_role, base=Task, ignore_result=True) create_role = task(RoleManager.create_role, base=Task) delete_role = task(RoleManager.delete_role, base=Task, ignore_result=True) remove_permissions_from_role = task(RoleManager.remove_permissions_from_role, base=Task, ignore_result=True) remove_user_from_role = task(RoleManager.remove_user_from_role, base=Task, ignore_result=True) update_role = task(RoleManager.update_role, base=Task)
@staticmethod def get_admins(): """ Get a list of users with the super-user role. :return: list of users who are admins. :rtype: list of User """ user_query_manager = factory.user_query_manager() try: super_users = user_query_manager.find_users_belonging_to_role(SUPER_USER_ROLE) return super_users except MissingResource: return None create_user = task(UserManager.create_user, base=Task) delete_user = task(UserManager.delete_user, base=Task, ignore_result=True) update_user = task(UserManager.update_user, base=Task) def invalid_type(input_value, valid_type): """ @return: true if input_value is not of valid_type @rtype: bool """ if input_value is not None and not isinstance(input_value, valid_type): return True return False
:param type_id: The content type ID. :type type_id: str :return: (profiler, cfg) :rtype: tuple """ try: plugin, cfg = plugin_api.get_profiler_by_type(type_id) except plugin_exceptions.PluginNotFound: plugin = Profiler() cfg = {} return plugin, cfg regenerate_applicability_for_consumers = task( ApplicabilityRegenerationManager.regenerate_applicability_for_consumers, base=Task, ignore_result=True) regenerate_applicability_for_repos = task( ApplicabilityRegenerationManager.regenerate_applicability_for_repos, base=Task, ignore_result=True) class DoesNotExist(Exception): """ An Exception to be raised when a get() is called on a manager with query parameters that do not match an object in the database. """ pass
:type upload_id: str :return: full path on the server's filesystem :rtype: str """ upload_storage_dir = ContentUploadManager._upload_storage_dir() path = os.path.join(upload_storage_dir, upload_id) return path @staticmethod def _upload_storage_dir(): """ Calculates the location of the directory into which to store uploaded files. This is necessary as a dynamic call so unit tests have the opportunity to change the constants entry for local storage. This call will create the directory if it doesn't exist. :return: full path to the upload directory """ storage_dir = pulp_config.config.get('server', 'storage_dir') upload_storage_dir = os.path.join(storage_dir, 'uploads') if not os.path.exists(upload_storage_dir): os.makedirs(upload_storage_dir) return upload_storage_dir import_uploaded_unit = task(ContentUploadManager.import_uploaded_unit, base=Task)
OrphanManager.delete(ref_path) @staticmethod def delete(path): """ Delete the specified path. File and links are unlinked. Directories are recursively deleted. Exceptions are logged and discarded. :param path: An absolute path. :type path: str """ try: if os.path.isfile(path) or os.path.islink(path): os.unlink(path) else: shutil.rmtree(path) except OSError, e: _logger.error(_('Delete path: %(p)s failed: %(m)s'), { 'p': path, 'm': str(e) }) delete_all_orphans = task(OrphanManager.delete_all_orphans, base=Task) delete_orphans_by_id = task(OrphanManager.delete_orphans_by_id, base=Task, ignore_result=True) delete_orphans_by_type = task(OrphanManager.delete_orphans_by_type, base=Task, ignore_result=True)
self.schedule_generation(file, force=force) def schedule_generation(self, file, force=False): # overwrite this to have the file generated in the background, # e. g. in a worker queue. raise NotImplementedError QUEUE_NAME = getattr(settings, 'IMAGEKIT_CACHEBACKEND_CELERY_QUEUE', 'celery') try: from celery import task except ImportError: pass else: _celery_task = task(ignore_result=True, serializer='pickle', queue=QUEUE_NAME)(_generate_file) class Celery(BaseAsync): """ A backend that uses Celery to generate the images. """ def __init__(self, *args, **kwargs): try: import celery # noqa except ImportError: raise ImproperlyConfigured('You must install celery to use' ' imagekit.cachefiles.backends.Celery.') super(Celery, self).__init__(*args, **kwargs) def schedule_generation(self, file, force=False):
def B(id): return chord(make_request.s(id, '{0} {1!r}'.format(id, i)) for i in range(10))(B_callback.s(id)) @task()
from export.utils import mail_export try: from celery import task mail_export = task(mail_export) except ImportError: pass
ordered_fields = (sorted(values_tree) + sorted(values_plot) + sorted(values_sp)) if ordered_fields: limited_qs = initial_qs.extra(select=extra_select)\ .values(*ordered_fields) else: limited_qs = initial_qs.none() if not initial_qs.exists(): job.status = ExportJob.EMPTY_QUERYSET_ERROR # if the initial queryset was not empty but the limited queryset # is empty, it means that there were no fields which the user # was allowed to export. elif not limited_qs.exists(): job.status = ExportJob.MODEL_PERMISSION_ERROR else: csv_file = TemporaryFile() write_csv(limited_qs, csv_file, field_order=ordered_fields) csv_name = generate_filename(limited_qs) job.outfile.save(csv_name, File(csv_file)) job.status = ExportJob.COMPLETE job.save() async_csv_export = task(csv_export)
# Retrieve the entries cursor = RepoPublishResult.get_collection().find(search_params) # Sort the results on the 'started' field. By default, descending order is used cursor.sort('started', direction=constants.SORT_DIRECTION[sort]) if limit is not None: cursor.limit(limit) return list(cursor) def auto_distributors(self, repo_id): """ Returns all distributors for the given repo that are configured for automatic publishing. """ dist_coll = RepoDistributor.get_collection() auto_distributors = list(dist_coll.find({'repo_id' : repo_id, 'auto_publish' : True})) return auto_distributors publish = task(RepoPublishManager.publish, base=Task) def _now_timestamp(): """ @return: timestamp suitable for indicating when a publish completed @rtype: str """ now = datetime.datetime.now(dateutils.local_tz()) now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
result = OptionBag() result.delay = function return result serial_execution = False elif ACTIVITYLOG_MODE == 'rq': from django_rq import job maybe_async = job( 'activitylog', timeout=ACTIVITYLOG_TASK_EXPIRATION, result_ttl=ACTIVITYLOG_TASK_EXPIRATION, ) serial_execution = True elif ACTIVITYLOG_MODE == 'celery': import celery maybe_async = celery.task( expires=ACTIVITYLOG_TASK_EXPIRATION, ) serial_execution = True @maybe_async @transaction.commit_on_success def update_activity(user_id, address, agent, _now_dt): ip, _ = IP.concurrent_get_or_create( address=address, fast_mode=serial_execution, ) if agent: agent, _ = UserAgent.concurrent_get_or_create( name=agent, fast_mode=serial_execution, ) else:
def set_importer_scratchpad(self, repo_id, contents): """ Sets the value of the scratchpad for the given repo and saves it to the database. If there is a previously saved value it will be replaced. If the repo has no importer associated with it, this call does nothing. @param repo_id: identifies the repo @type repo_id: str @param contents: value to write to the scratchpad field @type contents: anything that can be saved in the database """ importer_coll = RepoImporter.get_collection() # Validation repo_importer = importer_coll.find_one({'repo_id': repo_id}) if repo_importer is None: return # Update repo_importer['scratchpad'] = contents importer_coll.save(repo_importer, safe=True) remove_importer = task(RepoImporterManager.remove_importer, base=Task, ignore_result=True) set_importer = task(RepoImporterManager.set_importer, base=Task) update_importer_config = task(RepoImporterManager.update_importer_config, base=Task)
Get all profiles associated with a consumer. @param consumer_id: uniquely identifies the consumer. @type consumer_id: str @return: A list of profiles: {consumer_id:<str>, content_type:<str>, profile:<dict>} @rtype: list """ collection = UnitProfile.get_collection() query = dict(consumer_id=consumer_id) cursor = collection.find(query) return list(cursor) @staticmethod def find_by_criteria(criteria): """ Return a list of unit profiles that match the provided criteria. @param criteria: A Criteria object representing a search you want to perform @type criteria: pulp.server.db.model.criteria.Criteria @return: list of UnitProfile instances @rtype: list """ return UnitProfile.get_collection().query(criteria) create = task(ProfileManager.create, base=Task) delete = task(ProfileManager.delete, base=Task, ignore_result=True) update = task(ProfileManager.update, base=Task)
# Log a message so that we can debug but don't throw _logger.warn(e) errors.append(e) except Exception, e: _logger.exception(e) errors.append(e) # Don't do anything else since we still want to process all the other consumers error = None if len(errors) > 0: error = PulpCodedException(error_code, **error_kwargs) error.child_exceptions = errors return TaskResult({}, error, spawned_tasks) associate = task(ConsumerGroupManager.associate, base=Task, ignore_result=True) create_consumer_group = task(ConsumerGroupManager.create_consumer_group, base=Task) delete_consumer_group = task(ConsumerGroupManager.delete_consumer_group, base=Task, ignore_result=True) update_consumer_group = task(ConsumerGroupManager.update_consumer_group, base=Task) unassociate = task(ConsumerGroupManager.unassociate, base=Task, ignore_result=True) bind = task(ConsumerGroupManager.bind, base=Task) unbind = task(ConsumerGroupManager.unbind, base=Task) def validate_existing_consumer_group(group_id): """ Validate the existence of a consumer group, given its id. Returns the consumer group db collection upon successful validation, raises an exception upon failure @param group_id: unique id of the consumer group to validate
@return: True if unique else False @rtype: bool """ spec = { 'repo_id' : repo_id, 'unit_id' : unit_id, 'unit_type_id' : unit_type_id, } unit_coll = RepoContentUnit.get_collection() existing_count = unit_coll.find(spec).count() return bool(existing_count) associate_from_repo = task(RepoUnitAssociationManager.associate_from_repo, base=Task) unassociate_by_criteria = task(RepoUnitAssociationManager.unassociate_by_criteria) def load_associated_units(source_repo_id, criteria): criteria.association_fields = None # Retrieve the units to be associated association_query_manager = manager_factory.repo_unit_association_query_manager() associate_us = association_query_manager.get_units(source_repo_id, criteria=criteria) return associate_us def calculate_associated_type_ids(source_repo_id, associated_units): if associated_units is not None:
""" Add a scheduled publish for the repo to the given distributor. @param repo_id: @param distributor_id: """ collection = RepoDistributor.get_collection() distributor = collection.find_one({ 'repo_id': repo_id, 'id': distributor_id }) if distributor is None: raise MissingResource(repo=repo_id, distributor=distributor_id) return distributor['scheduled_publishes'] add_distributor = task(RepoDistributorManager.add_distributor, base=Task) remove_distributor = task(RepoDistributorManager.remove_distributor, base=Task, ignore_result=True) update_distributor_config = task( RepoDistributorManager.update_distributor_config, base=Task, ignore_result=True) def is_distributor_id_valid(distributor_id): """ @return: true if the distributor ID is valid; false otherwise @rtype: bool """ result = _DISTRIBUTOR_ID_REGEX.match(distributor_id) is not None
def process(self): # 拆分子任务 def _process(params): print('doing...') print(params) return [celery.task(name='create_vpc_doing')(_process)]
# default to all repos if none were specified if not repo_ids: repo_ids = [repo['id'] for repo in repo_collection.find(fields=['id'])] logger.info('regenerating content unit counts for %d repositories' % len(repo_ids)) for repo_id in repo_ids: logger.debug('regenerating content unit count for repository "%s"' % repo_id) counts = {} cursor = association_collection.find({'repo_id':repo_id}) type_ids = cursor.distinct('unit_type_id') cursor.close() for type_id in type_ids: spec = {'repo_id': repo_id, 'unit_type_id': type_id} counts[type_id] = association_collection.find(spec).count() repo_collection.update({'id': repo_id}, {'$set':{'content_unit_counts': counts}}, safe=True) create_and_configure_repo = task(RepoManager.create_and_configure_repo, base=Task) delete_repo = task(RepoManager.delete_repo, base=Task, ignore_result=True) update_repo_and_plugins = task(RepoManager.update_repo_and_plugins, base=Task) def is_repo_id_valid(repo_id): """ :return: true if the repo ID is valid; false otherwise :rtype: bool """ result = _REPO_ID_REGEX.match(repo_id) is not None return result
class Wkhtmltopdf(object): '''Wkhtmltopdf class container to use the robust wkhtmltopdf library which is capable of generating a PDF from HTML, CSS, and JavaScript using a modified WebKit engine. This extension allows you to easily incorporate this functionality into your Flask app. In addition to the dependencies automatically installed, you must manually download the appropriate wkhtmltopdf command line tool from http://wkhtmltopdf.org/downloads.html The main function render_template_to_pdf() works similar to Flask's built-in render_template() function and in fact utilizes some of the same underlying functions. However, as the name suggests, it will return a pdf instead of a rendered webpage. To initialize, pass your flask app's object to Flask-WkHTMLtoPDF:: from flask_wkhtmltopdf import Wkhtmltopdf app = Flask(__name__) wkhtmltopdf = Wkhtmltopdf(app) Then pass the template to the render_template_to_pdf() function. You can pass Jinja2 params just like with render_template():: render_template_to_pdf('test.html', download=True, save=False, param='hello') Celery, an asynchronous task queue, is highly suggested when using Flask-WkHTMLtoPDF as rendering the PDF can be resource heavy and take an unacceptable amount of time to generate. To enable Celery, set 'WKHTMLTOPDF_USE_CELERY = True' in your Flask app's config. You must add three variables to your Flask app's config:: WKHTMLTOPDF_BIN_PATH = r'C:\Program Files\wkhtmltopdf\bin' PDF_DIR_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'pdf') ''' use_celery = False def __init__(self, app=None): if app is not None: self._init_app(app) def _init_app(self, app): '''Initalizes the app with Flask-WkHTMLtoPDF. :param app: The Flask application object. ''' self.use_celery = app.config.get('WKHTMLTOPDF_USE_CELERY', False) self.add_path = app.config.get('WKHTMLTOPDF_BIN_PATH', None) self.pdf_dir_path = app.config.get('PDF_DIR_PATH', None) #checks to see if condition is true before applying decorator. def _maybe_decorate(condition, decorator): return decorator if condition else lambda x: x @_maybe_decorate(use_celery, celery.task()) def render_template_to_pdf(self, template_name_or_list, save=False, download=False, **context): '''Renders a template from the template folder with the given context and produces a pdf. As this can be resource intensive, the function can easily be decorated with celery.task() by setting the WKHTMLTOPDF_USE_CELERY to True. :param template_name_or_list: The name of the template to be rendered, or an iterable with template names. The first one existing will be rendered. :param save: Specifies whether to save the temporary pdf generated. Defaults to False. :param download: Specifies if the pdf should be displayed in the browser or downloaded as an attachment. Defaults to False (in browser). :param context: The variables that should be available in the context of the Jinja2 template. ''' #Get the system's PATH and add wkhtmltopdf to it if necessary path = os.getenv("PATH") if "wkhtmltopdf" not in path: if self.add_path is None: raise ValueError('WKHTMLTOPDF_BIN_PATH config variable must be set in the Flask app or added to the OS PATH') os.environ["PATH"] += os.pathsep + self.add_path #render appropriate template and write to a temp file rendered = render_template(template_name_or_list, **context) with tempfile.NamedTemporaryFile(suffix='.html', dir=os.path.dirname(__file__), delete=False, mode='w') as temp_html: temp_html.write(rendered) #Checks to see if the pdf directory exists and generates a random pdf name if self.pdf_dir_path is None: raise ValueError('PDF_DIR_PATH config variable must be set in the Flask app') if not os.path.isdir(self.pdf_dir_path): os.makedirs(self.pdf_dir_path) with tempfile.NamedTemporaryFile(suffix='.pdf', dir=self.pdf_dir_path, delete=False) as temp_pdf: pass #Run wkhtmltopdf via the appropriate subprocess call wkhtmltopdfargs = "wkhtmltopdf" + " " + temp_html.name + " " + temp_pdf.name #A work around for python 2.6 try: subprocess.check_output(wkhtmltopdfargs, shell=True) except: def check_output(*popenargs, **kwargs): process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] error = subprocess.CalledProcessError(retcode, cmd) error.output = output raise error return output subprocess.check_output = check_output subprocess.check_output(wkhtmltopdfargs, shell=True) #Remove the temporary files created os.remove(temp_html.name) with open(temp_pdf.name, 'rb') as f: binary_pdf = f.read() response = make_response(binary_pdf) response.headers['Content-Type'] = 'application/pdf' if download is True: response.headers['Content-Disposition'] = 'attachment; filename=%s.pdf' % temp_pdf.name else: response.headers['Content-Disposition'] = 'inline; filename=%s.pdf' % temp_pdf.name if save is False: os.remove(temp_pdf.name) return response
:param upload_id: identifies the upload in question :type upload_id: str :return: full path on the server's filesystem :rtype: str """ upload_storage_dir = ContentUploadManager._upload_storage_dir() path = os.path.join(upload_storage_dir, upload_id) return path @staticmethod def _upload_storage_dir(): """ Calculates the location of the directory into which to store uploaded files. This is necessary as a dynamic call so unit tests have the opportunity to change the constants entry for local storage. This call will create the directory if it doesn't exist. :return: full path to the upload directory """ storage_dir = pulp_config.config.get('server', 'storage_dir') upload_storage_dir = os.path.join(storage_dir, 'uploads') if not os.path.exists(upload_storage_dir): os.makedirs(upload_storage_dir) return upload_storage_dir import_uploaded_unit = task(ContentUploadManager.import_uploaded_unit, base=Task)
:param type_id: The content type ID. :type type_id: str :return: (profiler, cfg) :rtype: tuple """ try: plugin, cfg = plugin_api.get_profiler_by_type(type_id) except plugin_exceptions.PluginNotFound: plugin = Profiler() cfg = {} return plugin, cfg regenerate_applicability_for_consumers = task( ApplicabilityRegenerationManager.regenerate_applicability_for_consumers, base=Task, ignore_result=True) regenerate_applicability_for_repos = task( ApplicabilityRegenerationManager.regenerate_applicability_for_repos, base=Task, ignore_result=True) batch_regenerate_applicability_task = task( ApplicabilityRegenerationManager.batch_regenerate_applicability, base=Task, ignore_results=True) class DoesNotExist(Exception): """ An Exception to be raised when a get() is called on a manager with query parameters that do not match an object in the database. """ pass
@param value: note value """ self.add_notes(group_id, {key: value}) def unset_note(self, group_id, key): """ Unset a single key and value pair in a repo group's notes. @param group_id: unique id of the repo group to unset a note on @type group_id: str @param key: note key @type key: immutable """ self.remove_notes(group_id, [key]) associate = task(RepoGroupManager.associate, base=Task, ignore_result=True) create_and_configure_repo_group = task(RepoGroupManager.create_and_configure_repo_group, base=Task) delete_repo_group = task(RepoGroupManager.delete_repo_group, base=Task, ignore_result=True) unassociate = task(RepoGroupManager.unassociate, base=Task, ignore_result=True) update_repo_group = task(RepoGroupManager.update_repo_group, base=Task) def validate_existing_repo_group(group_id): """ Validate the existence of a repo group, given its id. Returns the repo group db collection upon successful validation, raises an exception upon failure @param group_id: unique id of the repo group to validate @type group_id: str @return: repo group db collection @rtype: L{pulp.server.db.connection.PulpCollection}
return if not os.access(path, os.W_OK): logger.warn( _('Cannot delete orphaned file: %(p)s, Insufficient permissions') % {'p': path}) return if os.path.isfile(path) or os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) # delete parent directories on the path as long as they fall empty storage_dir = pulp_config.config.get('server', 'storage_dir') root_content_regex = re.compile(os.path.join(storage_dir, 'content', '[^/]+/?')) while True: path = os.path.dirname(path) if root_content_regex.match(path): break contents = os.listdir(path) if contents: break if not os.access(path, os.W_OK): break os.rmdir(path) delete_all_orphans = task(OrphanManager.delete_all_orphans, base=Task, ignore_result=True) delete_orphans_by_id = task(OrphanManager.delete_orphans_by_id, base=Task, ignore_result=True) delete_orphans_by_type = task(OrphanManager.delete_orphans_by_type, base=Task, ignore_result=True)
logger.debug( 'regenerating content unit count for repository "%s"' % repo_id) counts = {} cursor = association_collection.find({'repo_id': repo_id}) type_ids = cursor.distinct('unit_type_id') cursor.close() for type_id in type_ids: spec = {'repo_id': repo_id, 'unit_type_id': type_id} counts[type_id] = association_collection.find(spec).count() repo_collection.update({'id': repo_id}, {'$set': { 'content_unit_counts': counts }}, safe=True) create_and_configure_repo = task(RepoManager.create_and_configure_repo, base=Task) delete_repo = task(RepoManager.delete_repo, base=Task, ignore_result=True) update_repo_and_plugins = task(RepoManager.update_repo_and_plugins, base=Task) def is_repo_id_valid(repo_id): """ :return: true if the repo ID is valid; false otherwise :rtype: bool """ result = _REPO_ID_REGEX.match(repo_id) is not None return result
def _exists(self, file): return bool(getattr(file, '_file', None) or file.storage.exists(file.name)) def _generate_file(backend, file, force=False): backend.generate_now(file, force=force) try: import celery except ImportError: pass else: _generate_file = celery.task(ignore_result=True)(_generate_file) class Async(Simple): """ A backend that uses Celery to generate the images. """ def __init__(self, *args, **kwargs): try: import celery except ImportError: raise ImproperlyConfigured('You must install celery to use' ' imagekit.cachefiles.backend.Async.') super(Async, self).__init__(*args, **kwargs)
Sets the value of the scratchpad for the given repo and saves it to the database. If there is a previously saved value it will be replaced. If the repo has no importer associated with it, this call does nothing. @param repo_id: identifies the repo @type repo_id: str @param contents: value to write to the scratchpad field @type contents: anything that can be saved in the database """ importer_coll = RepoImporter.get_collection() # Validation repo_importer = importer_coll.find_one({'repo_id': repo_id}) if repo_importer is None: return # Update repo_importer['scratchpad'] = contents importer_coll.save(repo_importer, safe=True) remove_importer = task(RepoImporterManager.remove_importer, base=Task, ignore_result=True) set_importer = task(RepoImporterManager.set_importer, base=Task) update_importer_config = task(RepoImporterManager.update_importer_config, base=Task)