def add_result(repo_id, offset): started = datetime.datetime.now(dateutils.local_tz()) completed = started + datetime.timedelta(days=offset) r = RepoSyncResult.expected_result(repo_id, 'foo', 'bar', dateutils.format_iso8601_datetime(started), dateutils.format_iso8601_datetime(completed), 1, 1, 1, '', '', RepoSyncResult.RESULT_SUCCESS) RepoSyncResult.get_collection().save(r, safe=True)
def create_date_range_filter(self, start_date=None, end_date=None): """ Create a date filter based on start and end issue dates specified in the repo config. :param start_date: start time for the filter :type start_date: datetime.datetime :param end_date: end time for the filter :type end_date: datetime.datetime :return: Q object with start and/or end dates, or None if start and end dates are not provided :rtype: mongoengine.Q or types.NoneType """ if start_date: start_date = dateutils.format_iso8601_datetime(start_date) if end_date: end_date = dateutils.format_iso8601_datetime(end_date) if start_date and end_date: return mongoengine.Q(created__gte=start_date, created__lte=end_date) elif start_date: return mongoengine.Q(created__gte=start_date) elif end_date: return mongoengine.Q(created__lte=end_date)
def test_save_update_defaults(self): """ Test the save method with default arguments when the object is already in the database. """ task_id = str(uuid4()) worker_name = 'worker_name' tags = ['tag_1', 'tag_2'] state = constants.CALL_ACCEPTED_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = { 'what do we want?': 'progress!', 'when do we want it?': 'now!' } task_type = 'some.task' start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus(task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) # Let's go ahead and insert the object ts.save() # Now let's alter it a bit, and make sure the alteration makes it to the DB correctly. new_state = constants.CALL_RUNNING_STATE ts.state = new_state # This should update ts in the database ts.save() ts = TaskStatus.objects() # There should only be one TaskStatus in the db self.assertEqual(len(ts), 1) ts = ts[0] # Make sure all the attributes are correct self.assertEqual(ts['task_id'], task_id) self.assertEqual(ts['worker_name'], worker_name) self.assertEqual(ts['tags'], tags) # The state should have been updated self.assertEqual(ts['state'], new_state) self.assertEqual(ts['error'], error) self.assertEqual(ts['spawned_tasks'], spawned_tasks) self.assertEqual(ts['progress_report'], progress_report) self.assertEqual(ts['task_type'], task_type) self.assertEqual(ts['start_time'], start_time) self.assertEqual(ts['finish_time'], finish_time) self.assertEqual(ts['result'], result) # These are always None self.assertEqual(ts['traceback'], None) self.assertEqual(ts['exception'], None)
def add_result(repo_id, dist_id, offset): started = dateutils.now_utc_datetime_with_tzinfo() completed = started + datetime.timedelta(days=offset) r = RepoPublishResult.expected_result( repo_id, dist_id, 'bar', dateutils.format_iso8601_datetime(started), dateutils.format_iso8601_datetime(completed), 'test-summary', 'test-details', RepoPublishResult.RESULT_SUCCESS) RepoPublishResult.get_collection().insert(r, safe=True)
def add_result(repo_id, offset): started = datetime.datetime.now(dateutils.local_tz()) completed = started + datetime.timedelta(days=offset) r = RepoSyncResult.expected_result( repo_id, 'foo', 'bar', dateutils.format_iso8601_datetime(started), dateutils.format_iso8601_datetime(completed), 1, 1, 1, '', '', RepoSyncResult.RESULT_SUCCESS) RepoSyncResult.get_collection().save(r, safe=True)
def add_result(repo_id, dist_id, offset): started = dateutils.now_utc_datetime_with_tzinfo() completed = started + datetime.timedelta(days=offset) r = RepoPublishResult.expected_result( repo_id, dist_id, 'bar', dateutils.format_iso8601_datetime(started), dateutils.format_iso8601_datetime(completed), 'test-summary', 'test-details', RepoPublishResult.RESULT_SUCCESS) RepoPublishResult.get_collection().insert(r, safe=True)
def convert_schedule(save_func, call): """ Converts one scheduled call from the old schema to the new :param save_func: a function that takes one parameter, a dictionary that represents the scheduled call in its new schema. This function should save the call to the database. :type save_func: function :param call: dictionary representing the scheduled call in its old schema :type call: dict """ call.pop('call_exit_states', None) call['total_run_count'] = call.pop('call_count') call['iso_schedule'] = call['schedule'] interval, start_time, occurrences = dateutils.parse_iso8601_interval( call['schedule']) # this should be a pickled instance of celery.schedules.schedule call['schedule'] = pickle.dumps(schedule(interval)) call_request = call.pop('serialized_call_request') # we are no longer storing these pickled. # these are cast to a string because python 2.6 sometimes fails to # deserialize json from unicode. call['args'] = pickle.loads(str(call_request['args'])) call['kwargs'] = pickle.loads(str(call_request['kwargs'])) # keeping this pickled because we don't really know how to use it yet call['principal'] = call_request['principal'] # this always get calculated on-the-fly now call.pop('next_run', None) first_run = call['first_run'].replace(tzinfo=dateutils.utc_tz()) call['first_run'] = dateutils.format_iso8601_datetime(first_run) last_run = call.pop('last_run') if last_run: last_run_at = last_run.replace(tzinfo=dateutils.utc_tz()) call['last_run_at'] = dateutils.format_iso8601_datetime(last_run_at) else: call['last_run_at'] = None call['task'] = NAMES_TO_TASKS[call_request['callable_name']] # this is a new field that is used to determine when the scheduler needs to # re-read the collection of schedules. call['last_updated'] = time.time() # determine if this is a consumer-related schedule, which we can only identify # by the consumer resource tag. If it is, save that tag value in the new # "resource" field, which is the new way that we will identify the # relationship between a schedule and some other object. This is not # necessary for repos, because we have a better method above for identifying # them (move_scheduled_syncs). tags = call_request.get('tags', []) for tag in tags: if tag.startswith('pulp:consumer:'): call['resource'] = tag break save_func(call)
def test_save_update_with_set_on_insert(self): """ Test the save method with set on insert arguments when the object is already in the database. """ task_id = str(uuid4()) worker_name = 'worker_name' tags = ['tag_1', 'tag_2'] state = constants.CALL_ACCEPTED_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = {'what do we want?': 'progress!', 'when do we want it?': 'now!'} task_type = 'some.task' old_start_time = start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus( task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) # Put the object in the database, and then change some of it settings. ts.save() new_worker_name = 'a different_worker' new_state = constants.CALL_SUSPENDED_STATE new_start_time = old_start_time + timedelta(minutes=10) new_start_time = dateutils.format_iso8601_datetime(new_start_time) ts.worker_name = new_worker_name ts.state = new_state ts.start_time = new_start_time # This should update the worker_name on ts in the database, but should not update the state # or start_time ts.save_with_set_on_insert(fields_to_set_on_insert=['state', 'start_time']) ts = TaskStatus.objects() # There should only be one TaskStatus in the db self.assertEqual(len(ts), 1) ts = ts[0] # Make sure all the attributes are correct self.assertEqual(ts['task_id'], task_id) # Queue should have been updated self.assertEqual(ts['worker_name'], new_worker_name) self.assertEqual(ts['tags'], tags) # state should not have been updated self.assertEqual(ts['state'], state) self.assertEqual(ts['error'], error) self.assertEqual(ts['spawned_tasks'], spawned_tasks) self.assertEqual(ts['progress_report'], progress_report) self.assertEqual(ts['task_type'], task_type) # start_time should not have been updated self.assertEqual(ts['start_time'], start_time) self.assertEqual(ts['finish_time'], finish_time) self.assertEqual(ts['result'], result) # These are always None self.assertEqual(ts['traceback'], None) self.assertEqual(ts['exception'], None)
def convert_schedule(save_func, call): """ Converts one scheduled call from the old schema to the new :param save_func: a function that takes one parameter, a dictionary that represents the scheduled call in its new schema. This function should save the call to the database. :type save_func: function :param call: dictionary representing the scheduled call in its old schema :type call: dict """ call.pop('call_exit_states', None) call['total_run_count'] = call.pop('call_count') call['iso_schedule'] = call['schedule'] interval, start_time, occurrences = dateutils.parse_iso8601_interval(call['schedule']) # this should be a pickled instance of celery.schedules.schedule call['schedule'] = pickle.dumps(schedule(interval)) call_request = call.pop('serialized_call_request') # we are no longer storing these pickled. # these are cast to a string because python 2.6 sometimes fails to # deserialize json from unicode. call['args'] = pickle.loads(str(call_request['args'])) call['kwargs'] = pickle.loads(str(call_request['kwargs'])) # keeping this pickled because we don't really know how to use it yet call['principal'] = call_request['principal'] # this always get calculated on-the-fly now call.pop('next_run', None) first_run = call['first_run'].replace(tzinfo=dateutils.utc_tz()) call['first_run'] = dateutils.format_iso8601_datetime(first_run) last_run = call.pop('last_run') if last_run: last_run_at = last_run.replace(tzinfo=dateutils.utc_tz()) call['last_run_at'] = dateutils.format_iso8601_datetime(last_run_at) else: call['last_run_at'] = None call['task'] = NAMES_TO_TASKS[call_request['callable_name']] # this is a new field that is used to determine when the scheduler needs to # re-read the collection of schedules. call['last_updated'] = time.time() # determine if this is a consumer-related schedule, which we can only identify # by the consumer resource tag. If it is, save that tag value in the new # "resource" field, which is the new way that we will identify the # relationship between a schedule and some other object. This is not # necessary for repos, because we have a better method above for identifying # them (move_scheduled_syncs). tags = call_request.get('tags', []) for tag in tags: if tag.startswith('pulp:consumer:'): call['resource'] = tag break save_func(call)
def test_save_insert_with_set_on_insert(self): """ Test the save method with set on insert arguments when the object is not already in the database. """ task_id = str(uuid4()) worker_name = 'some_worker' tags = ['tag_1', 'tag_2'] state = constants.CALL_RUNNING_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = { 'what do we want?': 'progress!', 'when do we want it?': 'now!' } task_type = 'some.task' start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus(task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) # This should cause ts to be in the database ts.save_with_set_on_insert( fields_to_set_on_insert=['state', 'start_time']) ts = TaskStatus.objects() # There should only be one TaskStatus in the db self.assertEqual(len(ts), 1) ts = ts[0] # Make sure all the attributes are correct self.assertEqual(ts['task_id'], task_id) self.assertEqual(ts['worker_name'], worker_name) self.assertEqual(ts['tags'], tags) self.assertEqual(ts['state'], state) self.assertEqual(ts['error'], error) self.assertEqual(ts['spawned_tasks'], spawned_tasks) self.assertEqual(ts['progress_report'], progress_report) self.assertEqual(ts['task_type'], task_type) self.assertEqual(ts['start_time'], start_time) self.assertEqual(ts['finish_time'], finish_time) self.assertEqual(ts['result'], result) # These are always None self.assertEqual(ts['traceback'], None) self.assertEqual(ts['exception'], None)
def test_save_update_defaults(self): """ Test the save method with default arguments when the object is already in the database. """ task_id = str(uuid4()) worker_name = 'worker_name' tags = ['tag_1', 'tag_2'] state = constants.CALL_ACCEPTED_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = {'what do we want?': 'progress!', 'when do we want it?': 'now!'} task_type = 'some.task' start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus( task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) # Let's go ahead and insert the object ts.save() # Now let's alter it a bit, and make sure the alteration makes it to the DB correctly. new_state = constants.CALL_RUNNING_STATE ts.state = new_state # This should update ts in the database ts.save() ts = TaskStatus.objects() # There should only be one TaskStatus in the db self.assertEqual(len(ts), 1) ts = ts[0] # Make sure all the attributes are correct self.assertEqual(ts['task_id'], task_id) self.assertEqual(ts['worker_name'], worker_name) self.assertEqual(ts['tags'], tags) # The state should have been updated self.assertEqual(ts['state'], new_state) self.assertEqual(ts['error'], error) self.assertEqual(ts['spawned_tasks'], spawned_tasks) self.assertEqual(ts['progress_report'], progress_report) self.assertEqual(ts['task_type'], task_type) self.assertEqual(ts['start_time'], start_time) self.assertEqual(ts['finish_time'], finish_time) self.assertEqual(ts['result'], result) # These are always None self.assertEqual(ts['traceback'], None) self.assertEqual(ts['exception'], None)
def add_result(repo_id, dist_id, offset): started = datetime.datetime.now(dateutils.local_tz()) completed = started + datetime.timedelta(days=offset) r = RepoPublishResult.expected_result( repo_id, dist_id, "bar", dateutils.format_iso8601_datetime(started), dateutils.format_iso8601_datetime(completed), "test-summary", "test-details", RepoPublishResult.RESULT_SUCCESS, ) RepoPublishResult.get_collection().insert(r, safe=True)
def _load_repo_extras(repo, repos=None): config = get_config() repoapi = RepositoryAPI() repo["url"] = os.path.join(config.cds.baseurl, repo["relative_path"]) repo["parent"] = None repo["children"] = [] if repos is None: repos = getattr(threading.local(), "repos", dict()) for repo2 in repos.values(): if repo2 == repo: continue elif repo["id"] in repo2["clone_ids"]: # the clone_id attribute is broken, but we check it anyway # just in case it gets fixed some day repo["parent"] = repo2 elif repo2["id"] in repo["clone_ids"]: repo["children"].append(repo2) elif ( repo["source"] and repo["source"]["type"] == "local" and repo["source"]["url"].endswith("/%s" % repo2["id"]) ): # the child syncs from a local repo that ends with # /<parent repo id> repo["parent"] = repo2 elif ( repo2["source"] and repo2["source"]["type"] == "local" and repo2["source"]["url"].endswith("/%s" % repo["id"]) ): repo["children"].append(repo2) repo["keys"] = dict() for key in repoapi.listkeys(repo["id"]): repo["keys"][os.path.basename(key)] = "%s/%s" % (config.cds.keyurl, key) if repo["parent"]: repo["updates"] = has_updates(repo) if repo["last_sync"] and repo["sync_schedule"]: repo["next_sync"] = format_iso8601_datetime( parse_iso8601_datetime(repo["last_sync"]) + parse_iso8601_interval(repo["sync_schedule"])[0] ) elif repo["sync_schedule"]: repo["next_sync"] = format_iso8601_datetime(parse_iso8601_interval(repo["sync_schedule"])[1]) else: repo["next_sync"] = None repo["groupid"].sort()
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() common_utils.delete_working_directory()
def __call__(self, *args, **kwargs): """ This overrides CeleryTask's __call__() method. We use this method for task state tracking of Pulp tasks. """ # Check task status and skip running the task if task state is 'canceled'. try: task_status = TaskStatus.objects.get(task_id=self.request.id) except DoesNotExist: task_status = None if task_status and task_status['state'] == constants.CALL_CANCELED_STATE: _logger.debug("Task cancel received for task-id : [%s]" % self.request.id) return # Update start_time and set the task state to 'running' for asynchronous tasks. # Skip updating status for eagerly executed tasks, since we don't want to track # synchronous tasks in our database. if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) # Using 'upsert' to avoid a possible race condition described in the apply_async method # above. TaskStatus.objects(task_id=self.request.id).update_one( set__state=constants.CALL_RUNNING_STATE, set__start_time=start_time, upsert=True) # Run the actual task _logger.debug("Running task : [%s]" % self.request.id) return super(Task, self).__call__(*args, **kwargs)
def serialize(self): data = {} for field in ('response', 'reasons', 'state', 'task_id', 'task_group_id', 'schedule_id', 'progress', 'result', 'tags'): data[field] = getattr(self, field) ex = getattr(self, 'exception') if ex is not None: data['exception'] = traceback.format_exception_only(type(ex), ex) else: data['exception'] = None tb = getattr(self, 'traceback') if tb is not None: if isinstance(tb, (str, list, tuple)): data['traceback'] = str(tb) else: data['traceback'] = traceback.format_tb(tb) else: data['traceback'] = None for field in ('start_time', 'finish_time'): dt = getattr(self, field) if dt is not None: data[field] = dateutils.format_iso8601_datetime(dt) else: data[field] = None return data
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ if isinstance(exc, PulpCodedException): _logger.info(_('Task failed : [%(task_id)s] : %(msg)s') % {'task_id': task_id, 'msg': str(exc)}) _logger.debug(traceback.format_exc()) else: _logger.info(_('Task failed : [%s]') % task_id) # celery will log the traceback if kwargs.get('scheduled_call_id') is not None: utils.increment_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save() self._handle_cProfile(task_id) common_utils.delete_working_directory()
def __init__(self, repo, publish_conduit, config, distributor_type): """ :param repo: Pulp managed Yum repository :type repo: pulp.plugins.model.Repository :param publish_conduit: Conduit providing access to relative Pulp functionality :type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param config: Pulp configuration for the distributor :type config: pulp.plugins.config.PluginCallConfiguration :param distributor_type: The type of the distributor that is being published :type distributor_type: str :ivar last_published: last time this distributor published the repo :ivar last_delete: last time a unit was removed from this repository :ivar repo: repository being operated on :ivar predistributor: distributor object that is associated with this distributor. It's publish history affects the type of publish is performed :ivar symlink_list: list of symlinks to rsync :ivar content_unit_file_list: list of content units to rsync :ivar symlink_src: path to directory containing all symlinks """ super(Publisher, self).__init__("Repository publish", repo, publish_conduit, config, distributor_type=distributor_type) distributor = Distributor.objects.get_or_404(repo_id=self.repo.id, distributor_id=publish_conduit.distributor_id) self.last_published = distributor["last_publish"] self.last_deleted = repo.last_unit_removed self.repo = repo self.predistributor = self._get_predistributor() if self.last_published: string_date = dateutils.format_iso8601_datetime(self.last_published) else: string_date = None if self.predistributor: search_params = {'repo_id': repo.id, 'distributor_id': self.predistributor["id"], 'started': {"$gte": string_date}} self.predist_history = RepoPublishResult.get_collection().find(search_params) else: self.predist_history = [] self.remote_path = self.get_remote_repo_path() if self.is_fastforward(): start_date = self.last_published end_date = None if self.predistributor: end_date = self.predistributor["last_publish"] date_filter = self.create_date_range_filter(start_date=start_date, end_date=end_date) else: date_filter = None self.symlink_list = [] self.content_unit_file_list = [] self.symlink_src = os.path.join(self.get_working_dir(), '.relative/') self._add_necesary_steps(date_filter=date_filter, config=config)
def test_task_status_update(self): """ Tests the successful operation of task status update. """ task_id = self.get_random_uuid() worker_name = 'special_worker_name' tags = ['test-tag1', 'test-tag2'] state = 'waiting' TaskStatus(task_id, worker_name, tags, state).save() now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) delta = {'start_time': start_time, 'state': 'running', 'progress_report': {'report-id': 'my-progress'}} TaskStatus.objects(task_id=task_id).update_one( set__start_time=delta['start_time'], set__state=delta['state'], set__progress_report=delta['progress_report']) task_status = TaskStatus.objects(task_id=task_id).first() self.assertEqual(task_status['start_time'], delta['start_time']) # Make sure that parse_iso8601_datetime is able to parse the start_time without errors dateutils.parse_iso8601_datetime(task_status['start_time']) self.assertEqual(task_status['state'], delta['state']) self.assertEqual(task_status['progress_report'], delta['progress_report']) self.assertEqual(task_status['worker_name'], worker_name)
def test_task_status_update_fires_notification(self, mock_send): """ Test that update_one() also fires a notification. """ task_id = self.get_random_uuid() worker_name = 'special_worker_name' tags = ['test-tag1', 'test-tag2'] state = 'waiting' ts = TaskStatus(task_id, worker_name, tags, state) ts.save() # ensure event was fired for save() mock_send.assert_called_once_with(ts, routing_key="tasks.%s" % task_id) now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) delta = {'start_time': start_time, 'state': 'running', 'progress_report': {'report-id': 'my-progress'}} self.assertEquals(len(mock_send.call_args_list), 1) TaskStatus.objects(task_id=task_id).update_one( set__start_time=delta['start_time'], set__state=delta['state'], set__progress_report=delta['progress_report']) # ensure event was fired for update_one() self.assertEquals(len(mock_send.call_args_list), 2) mock_send.assert_called_with(ts, routing_key="tasks.%s" % task_id)
def set_task_started(task_id, timestamp=None): """ Update a task's state to reflect that it has started running. :param task_id: The identity of the task to be updated. :type task_id: basestring :param timestamp: The (optional) ISO-8601 finished timestamp (UTC). :type timestamp: str """ collection = TaskStatus.get_collection() if not timestamp: now = datetime.now(dateutils.utc_tz()) started = dateutils.format_iso8601_datetime(now) else: started = timestamp select = { 'task_id': task_id } update = { '$set': {'start_time': started} } collection.update(select, update, safe=True) select = { 'task_id': task_id, 'state': {'$in': [constants.CALL_WAITING_STATE, constants.CALL_ACCEPTED_STATE]} } update = { '$set': {'state': constants.CALL_RUNNING_STATE} } collection.update(select, update, safe=True)
def set_task_succeeded(task_id, result=None, timestamp=None): """ Update a task's state to reflect that it has succeeded. :param task_id: The identity of the task to be updated. :type task_id: basestring :param result: The optional value returned by the task execution. :type result: anything :param timestamp: The (optional) ISO-8601 finished timestamp (UTC). :type timestamp: str """ collection = TaskStatus.get_collection() if not timestamp: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) else: finished = timestamp update = { '$set': { 'finish_time': finished, 'state': constants.CALL_FINISHED_STATE, 'result': result } } collection.update({'task_id': task_id}, update, safe=True)
def set_task_failed(task_id, traceback=None, timestamp=None): """ Update a task's state to reflect that it has succeeded. :param task_id: The identity of the task to be updated. :type task_id: basestring :ivar traceback: A string representation of the traceback resulting from the task execution. :type traceback: basestring :param timestamp: The (optional) ISO-8601 finished timestamp (UTC). :type timestamp: str """ collection = TaskStatus.get_collection() if not timestamp: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) else: finished = timestamp update = { '$set': { 'finish_time': finished, 'state': constants.CALL_ERROR_STATE, 'traceback': traceback } } collection.update({'task_id': task_id}, update, safe=True)
def test_build_progress_report(self): """ Test the build_progress_report() method. """ state = progress.SyncProgressReport.STATE_ISOS_IN_PROGRESS state_times = {progress.SyncProgressReport.STATE_ISOS_IN_PROGRESS: datetime.utcnow()} num_isos = 5 num_isos_finished = 3 iso_error_messages = {'an.iso': "No!"} error_message = 'This is an error message.' traceback = 'This is a traceback.' total_bytes = 1024 finished_bytes = 512 report = progress.SyncProgressReport( self.conduit, state=state, state_times=state_times, num_isos=num_isos, num_isos_finished=num_isos_finished, iso_error_messages=iso_error_messages, error_message=error_message, traceback=traceback, total_bytes=total_bytes, finished_bytes=finished_bytes) report = report.build_progress_report() # Make sure all the appropriate attributes were set self.assertEqual(report['state'], state) expected_state_times = {} for key, value in state_times.items(): expected_state_times[key] = format_iso8601_datetime(value) self.assertTrue(report['state_times'], expected_state_times) self.assertEqual(report['num_isos'], num_isos) self.assertEqual(report['num_isos_finished'], num_isos_finished) self.assertEqual(report['iso_error_messages'], iso_error_messages) self.assertEqual(report['error_message'], error_message) self.assertEqual(report['traceback'], traceback) self.assertEqual(report['total_bytes'], total_bytes) self.assertEqual(report['finished_bytes'], finished_bytes)
def succeeded(self, reply): """ Notification (reply) indicating an RMI succeeded. This information is relayed to the task coordinator. :param reply: A successful reply object. :type reply: gofer.rmi.async.Succeeded """ _logger.info(_('Task RMI (succeeded): %(r)s'), {'r': reply}) call_context = dict(reply.data) action = call_context.get('action') task_id = call_context['task_id'] result = dict(reply.retval) finished = reply.timestamp if not finished: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) TaskStatus.objects(task_id=task_id).update_one(set__finish_time=finished, set__state=constants.CALL_FINISHED_STATE, set__result=result) if action == 'bind': if result['succeeded']: ReplyHandler._bind_succeeded(task_id, call_context) else: ReplyHandler._bind_failed(task_id, call_context) return if action == 'unbind': if result['succeeded']: ReplyHandler._unbind_succeeded(call_context) else: ReplyHandler._unbind_failed(task_id, call_context) return
def succeeded(self, reply): """ Notification (reply) indicating an RMI succeeded. This information is relayed to the task coordinator. :param reply: A successful reply object. :type reply: gofer.rmi.async.Succeeded """ _logger.info(_('Task RMI (succeeded): %(r)s'), {'r': reply}) call_context = dict(reply.data) action = call_context.get('action') task_id = call_context['task_id'] result = dict(reply.retval) finished = reply.timestamp if not finished: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) TaskStatus.objects(task_id=task_id).update_one( set__finish_time=finished, set__state=constants.CALL_FINISHED_STATE, set__result=result) if action == 'bind': if result['succeeded']: ReplyHandler._bind_succeeded(task_id, call_context) else: ReplyHandler._bind_failed(task_id, call_context) return if action == 'unbind': if result['succeeded']: ReplyHandler._unbind_succeeded(call_context) else: ReplyHandler._unbind_failed(task_id, call_context) return
def test_datetime_with_tz(self): n = datetime.datetime.now(dateutils.local_tz()) s = dateutils.format_iso8601_datetime(n) b = dateutils.parse_iso8601_datetime(s) for f in self.dt_fields: self.assertTrue( getattr(n, f) == getattr(b, f), 'Field mismatch: %s' % f)
def failed(self, reply): """ Notification (reply) indicating an RMI failed. This information used to update the task status. :param reply: A failure reply object. :type reply: gofer.rmi.async.Failed """ _logger.info(_('Task RMI (failed): %(r)s'), {'r': reply}) call_context = dict(reply.data) action = call_context.get('action') task_id = call_context['task_id'] traceback = reply.xstate['trace'] finished = reply.timestamp if not finished: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) TaskStatus.objects(task_id=task_id).update_one( set__finish_time=finished, set__state=constants.CALL_ERROR_STATE, set__traceback=traceback) if action == 'bind': ReplyHandler._bind_failed(task_id, call_context) return if action == 'unbind': ReplyHandler._unbind_failed(task_id, call_context) return
def test_first_run_string(self): first_run = dateutils.format_iso8601_datetime( datetime.utcnow().replace(tzinfo=dateutils.utc_tz()) + timedelta(days=1)) call = ScheduledCall('PT1M', 'pulp.tasks.dosomething', first_run=first_run) self.assertEqual(first_run, call.first_run)
def on_failure(self, exc, task_id, args, kwargs, einfo): """ This overrides the error handler run by the worker when the task fails. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. :param einfo: celery's ExceptionInfo instance, containing serialized traceback. """ _logger.debug("Task failed : [%s]" % task_id) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['state'] = constants.CALL_ERROR_STATE task_status['finish_time'] = finish_time task_status['traceback'] = einfo.traceback if not isinstance(exc, PulpException): exc = PulpException(str(exc)) task_status['error'] = exc.to_dict() task_status.save()
def test_build_progress_report(self): """ Test the build_progress_report() method. """ state = progress.SyncProgressReport.STATE_ISOS_IN_PROGRESS state_times = {progress.SyncProgressReport.STATE_ISOS_IN_PROGRESS: datetime.utcnow()} num_isos = 5 num_isos_finished = 3 iso_error_messages = {'an.iso': "No!"} error_message = 'This is an error message.' traceback = 'This is a traceback.' total_bytes = 1024 finished_bytes = 512 report = progress.SyncProgressReport( self.conduit, state=state, state_times=state_times, num_isos=num_isos, num_isos_finished=num_isos_finished, iso_error_messages=iso_error_messages, error_message=error_message, traceback=traceback, total_bytes=total_bytes, finished_bytes=finished_bytes) report = report.build_progress_report() # Make sure all the appropriate attributes were set self.assertEqual(report['state'], state) expected_state_times = {} for key, value in state_times.items(): expected_state_times[key] = format_iso8601_datetime(value) self.assertTrue(report['state_times'], expected_state_times) self.assertEqual(report['num_isos'], num_isos) self.assertEqual(report['num_isos_finished'], num_isos_finished) self.assertEqual(report['iso_error_messages'], iso_error_messages) self.assertEqual(report['error_message'], error_message) self.assertEqual(report['traceback'], traceback) self.assertEqual(report['total_bytes'], total_bytes) self.assertEqual(report['finished_bytes'], finished_bytes)
def __init__(self, repo, publish_conduit, config, distributor_type): """ :param repo: Pulp managed Yum repository :type repo: pulp.plugins.model.Repository :param publish_conduit: Conduit providing access to relative Pulp functionality :type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param config: Pulp configuration for the distributor :type config: pulp.plugins.config.PluginCallConfiguration :param distributor_type: The type of the distributor that is being published :type distributor_type: str :ivar last_published: last time this distributor published the repo :ivar last_delete: last time a unit was removed from this repository :ivar repo: repository being operated on :ivar predistributor: distributor object that is associated with this distributor. It's publish history affects the type of publish is performed :ivar symlink_list: list of symlinks to rsync :ivar content_unit_file_list: list of content units to rsync :ivar symlink_src: path to directory containing all symlinks """ super(Publisher, self).__init__("Repository publish", repo, publish_conduit, config, distributor_type=distributor_type) distributor = Distributor.objects.get_or_404(repo_id=self.repo.id, distributor_id=publish_conduit.distributor_id) self.last_published = distributor["last_publish"] self.last_deleted = repo.last_unit_removed self.repo = repo self.predistributor = self._get_predistributor() if self.last_published: string_date = dateutils.format_iso8601_datetime(self.last_published) else: string_date = None if self.predistributor: search_params = {'repo_id': repo.id, 'distributor_id': self.predistributor["id"], 'started': {"$gte": string_date}} self.predist_history = RepoPublishResult.get_collection().find(search_params) else: self.predist_history = [] self.remote_path = self.get_remote_repo_path() if self.is_fastforward(): start_date = self.last_published end_date = None if self.predistributor: end_date = self.predistributor["last_publish"] date_filter = self.create_date_range_filter(start_date=start_date, end_date=end_date) else: date_filter = None self.symlink_list = [] self.content_unit_file_list = [] self.symlink_src = os.path.join(self.get_working_dir(), '.relative/') self._add_necesary_steps(date_filter=date_filter, config=config)
def __call__(self, *args, **kwargs): """ This overrides CeleryTask's __call__() method. We use this method for task state tracking of Pulp tasks. """ # Check task status and skip running the task if task state is 'canceled'. try: task_status = TaskStatus.objects.get(task_id=self.request.id) except DoesNotExist: task_status = None if task_status and task_status[ 'state'] == constants.CALL_CANCELED_STATE: _logger.debug("Task cancel received for task-id : [%s]" % self.request.id) return # Update start_time and set the task state to 'running' for asynchronous tasks. # Skip updating status for eagerly executed tasks, since we don't want to track # synchronous tasks in our database. if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) # Using 'upsert' to avoid a possible race condition described in the apply_async method # above. TaskStatus.objects(task_id=self.request.id).update_one( set__state=constants.CALL_RUNNING_STATE, set__start_time=start_time, upsert=True) # Run the actual task _logger.debug("Running task : [%s]" % self.request.id) return super(Task, self).__call__(*args, **kwargs)
def failed(self, reply): """ Notification (reply) indicating an RMI failed. This information used to update the task status. :param reply: A failure reply object. :type reply: gofer.rmi.async.Failed """ _logger.info(_('Task RMI (failed): %(r)s'), {'r': reply}) call_context = dict(reply.data) action = call_context.get('action') task_id = call_context['task_id'] traceback = reply.xstate['trace'] finished = reply.timestamp if not finished: now = datetime.now(dateutils.utc_tz()) finished = dateutils.format_iso8601_datetime(now) TaskStatus.objects(task_id=task_id).update_one(set__finish_time=finished, set__state=constants.CALL_ERROR_STATE, set__traceback=traceback) if action == 'bind': ReplyHandler._bind_failed(task_id, call_context) return if action == 'unbind': ReplyHandler._unbind_failed(task_id, call_context) return
def test_update_task_status(self): """ Tests the successful operation of update_task_status(). """ task_id = self.get_random_uuid() queue = 'special_queue' tags = ['test-tag1', 'test-tag2'] state = 'waiting' TaskStatusManager.create_task_status(task_id, queue, tags, state) now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) delta = {'start_time': start_time, 'state': 'running', 'disregard': 'ignored', 'progress_report': {'report-id': 'my-progress'}} updated = TaskStatusManager.update_task_status(task_id, delta) task_status = TaskStatusManager.find_by_task_id(task_id) self.assertEqual(task_status['start_time'], delta['start_time']) # Make sure that parse_iso8601_datetime is able to parse the start_time without errors dateutils.parse_iso8601_datetime(task_status['start_time']) self.assertEqual(task_status['state'], delta['state']) self.assertEqual(task_status['progress_report'], delta['progress_report']) self.assertEqual(task_status['queue'], queue) self.assertEqual(updated['start_time'], delta['start_time']) self.assertEqual(updated['state'], delta['state']) self.assertEqual(updated['progress_report'], delta['progress_report']) self.assertTrue('disregard' not in updated) self.assertTrue('disregard' not in task_status)
def test_update_task_status(self): """ Tests the successful operation of update_task_status(). """ task_id = self.get_random_uuid() queue = 'special_queue' tags = ['test-tag1', 'test-tag2'] state = 'waiting' TaskStatusManager.create_task_status(task_id, queue, tags, state) now = datetime.now(dateutils.utc_tz()) start_time = dateutils.format_iso8601_datetime(now) delta = { 'start_time': start_time, 'state': 'running', 'disregard': 'ignored', 'progress_report': { 'report-id': 'my-progress' } } updated = TaskStatusManager.update_task_status(task_id, delta) task_status = TaskStatusManager.find_by_task_id(task_id) self.assertEqual(task_status['start_time'], delta['start_time']) # Make sure that parse_iso8601_datetime is able to parse the start_time without errors dateutils.parse_iso8601_datetime(task_status['start_time']) self.assertEqual(task_status['state'], delta['state']) self.assertEqual(task_status['progress_report'], delta['progress_report']) self.assertEqual(task_status['queue'], queue) self.assertEqual(updated['start_time'], delta['start_time']) self.assertEqual(updated['state'], delta['state']) self.assertEqual(updated['progress_report'], delta['progress_report']) self.assertTrue('disregard' not in updated) self.assertTrue('disregard' not in task_status)
def serialize(self): """ Serialize the call report for either the wire or storage in the db. @return: dictionary containing the serialized fields of the call report. @rtype: dict """ data = {} # straight forward fields for field in ('call_request_id', 'call_request_group_id', 'call_request_tags', 'schedule_id', 'principal_login', 'response', 'reasons', 'state', 'progress', 'dependency_failures'): data[field] = getattr(self, field) # legacy fields data['task_id'] = self.call_request_id data['task_group_id'] = self.call_request_group_id data['tags'] = self.call_request_tags # report the result, if configured to if self.serialize_result: data['result'] = self.result else: data['result'] = OBFUSCATED_VALUE # format the exception and traceback, if they exist ex = getattr(self, 'exception') if ex is not None: data['exception'] = traceback.format_exception_only(type(ex), ex) else: data['exception'] = None tb = getattr(self, 'traceback') if tb is not None: if isinstance(tb, (str, list, tuple)): data['traceback'] = str(tb) else: data['traceback'] = traceback.format_tb(tb) else: data['traceback'] = None # format the date times in iso8601 format for field in ('start_time', 'finish_time'): dt = getattr(self, field) if dt is not None: data[field] = dateutils.format_iso8601_datetime(dt) else: data[field] = None return data
def _now_timestamp(): """ @return: UTC timestamp suitable for indicating when a publish completed @rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def _now_timestamp(): """ @return: timestamp suitable for indicating when a publish completed @rtype: str """ now = datetime.datetime.now(dateutils.local_tz()) now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def test___init__(self): """ Test the __init__() method. """ task_id = str(uuid4()) worker_name = 'some_worker' tags = ['tag_1', 'tag_2'] state = constants.CALL_ACCEPTED_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = { 'what do we want?': 'progress!', 'when do we want it?': 'now!' } task_type = 'some.task' start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus(task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) self.assertEqual(ts.task_id, task_id) self.assertEqual(ts.worker_name, worker_name) self.assertEqual(ts.tags, tags) self.assertEqual(ts.state, state) self.assertEqual(ts.error, error) self.assertEqual(ts.spawned_tasks, spawned_tasks) self.assertEqual(ts.progress_report, progress_report) self.assertEqual(ts.task_type, task_type) self.assertEqual(ts.start_time, start_time) self.assertEqual(ts.finish_time, finish_time) self.assertEqual(ts.result, result) self.assertEqual(ts.traceback, None) self.assertEqual(ts.exception, None)
def test_iso8601_string_field(self): iso8601_field = fields.ISO8601StringField() valid = dateutils.format_iso8601_datetime(datetime.now()) iso8601_field.validate(valid) invalid_values = ['date', {}, [], 1, datetime.now()] for invalid in invalid_values: self.assertRaises(ValidationError, iso8601_field.validate, invalid)
def test_first_run_datetime(self): first_run = datetime.utcnow().replace(tzinfo=dateutils.utc_tz()) + timedelta(days=1) call = ScheduledCall('PT1M', 'pulp.tasks.dosomething', first_run=first_run) # make sure it is an ISO8601 string with the correct value self.assertTrue(isinstance(call.first_run, basestring)) self.assertEqual(dateutils.format_iso8601_datetime(first_run), call.first_run)
def _now_timestamp(): """ @return: UTC timestamp suitable for indicating when a publish completed @rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def _now_timestamp(): """ @return: timestamp suitable for indicating when a publish completed @rtype: str """ now = datetime.datetime.now(dateutils.local_tz()) now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def _generate_distributor(self, repo_id, config=None, previously_published=True): config = config or {} distributor_id = str(uuid.uuid4()) distributor_model = RepoDistributor(repo_id, distributor_id, "yum_distributor", config, True) if previously_published: distributor_model["last_published"] = dateutils.format_iso8601_datetime(datetime.datetime.now()) self.distributors_collection.insert(distributor_model) return self.distributors_collection.find_one({"id": distributor_id})
def serialize(self): """ Serialize the call report for either the wire or storage in the db. @return: dictionary containing the serialized fields of the call report. @rtype: dict """ data = {} # straight forward fields for field in ('call_request_id', 'call_request_group_id', 'call_request_tags', 'schedule_id', 'principal_login', 'response', 'reasons', 'state', 'progress', 'dependency_failures'): data[field] = getattr(self, field) # legacy fields data['task_id'] = self.call_request_id data['task_group_id'] = self.call_request_group_id data['tags'] = self.call_request_tags # report the result, if configured to if self.serialize_result: data['result'] = self.result else: data['result'] = OBFUSCATED_VALUE # format the exception and traceback, if they exist ex = getattr(self, 'exception') if ex is not None: data['exception'] = traceback.format_exception_only(type(ex), ex) else: data['exception'] = None tb = getattr(self, 'traceback') if tb is not None: if isinstance(tb, (str, list, tuple)): data['traceback'] = str(tb) else: data['traceback'] = traceback.format_tb(tb) else: data['traceback'] = None # format the date times in iso8601 format for field in ('start_time', 'finish_time'): dt = getattr(self, field) if dt is not None: data[field] = dateutils.format_iso8601_datetime(dt) else: data[field] = None return data
def test_save_insert_with_set_on_insert(self): """ Test the save method with set on insert arguments when the object is not already in the database. """ task_id = str(uuid4()) worker_name = 'some_worker' tags = ['tag_1', 'tag_2'] state = constants.CALL_RUNNING_STATE spawned_tasks = ['foo'] error = {'error': 'some_error'} progress_report = {'what do we want?': 'progress!', 'when do we want it?': 'now!'} task_type = 'some.task' start_time = datetime.now() finish_time = start_time + timedelta(minutes=5) start_time = dateutils.format_iso8601_datetime(start_time) finish_time = dateutils.format_iso8601_datetime(finish_time) result = None ts = TaskStatus( task_id, worker_name, tags, state, spawned_tasks=spawned_tasks, error=error, progress_report=progress_report, task_type=task_type, start_time=start_time, finish_time=finish_time, result=result) # This should cause ts to be in the database ts.save_with_set_on_insert(fields_to_set_on_insert=['state', 'start_time']) ts = TaskStatus.objects() # There should only be one TaskStatus in the db self.assertEqual(len(ts), 1) ts = ts[0] # Make sure all the attributes are correct self.assertEqual(ts['task_id'], task_id) self.assertEqual(ts['worker_name'], worker_name) self.assertEqual(ts['tags'], tags) self.assertEqual(ts['state'], state) self.assertEqual(ts['error'], error) self.assertEqual(ts['spawned_tasks'], spawned_tasks) self.assertEqual(ts['progress_report'], progress_report) self.assertEqual(ts['task_type'], task_type) self.assertEqual(ts['start_time'], start_time) self.assertEqual(ts['finish_time'], finish_time) self.assertEqual(ts['result'], result) # These are always None self.assertEqual(ts['traceback'], None) self.assertEqual(ts['exception'], None)
def on_success(self, retval, task_id, args, kwargs): """ This overrides the success handler run by the worker when the task executes successfully. It updates state, finish_time and traceback of the relevant task status for asynchronous tasks. Skip updating status for synchronous tasks. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. """ _logger.debug("Task successful : [%s]" % task_id) if kwargs.get('scheduled_call_id') is not None: if not isinstance(retval, AsyncResult): _logger.info( _('resetting consecutive failure count for schedule %(id)s' ) % {'id': kwargs['scheduled_call_id']}) utils.reset_failure_count(kwargs['scheduled_call_id']) if not self.request.called_directly: now = datetime.now(dateutils.utc_tz()) finish_time = dateutils.format_iso8601_datetime(now) task_status = TaskStatus.objects.get(task_id=task_id) task_status['finish_time'] = finish_time task_status['result'] = retval # Only set the state to finished if it's not already in a complete state. This is # important for when the task has been canceled, so we don't move the task from canceled # to finished. if task_status['state'] not in constants.CALL_COMPLETE_STATES: task_status['state'] = constants.CALL_FINISHED_STATE if isinstance(retval, TaskResult): task_status['result'] = retval.return_value if retval.error: task_status['error'] = retval.error.to_dict() if retval.spawned_tasks: task_list = [] for spawned_task in retval.spawned_tasks: if isinstance(spawned_task, AsyncResult): task_list.append(spawned_task.task_id) elif isinstance(spawned_task, dict): task_list.append(spawned_task['task_id']) task_status['spawned_tasks'] = task_list if isinstance(retval, AsyncResult): task_status['spawned_tasks'] = [ retval.task_id, ] task_status['result'] = None task_status.save() if config.get('profiling', 'enabled') is True: profile_directory = config.get('profiling', 'directory') self.pr.disable() self.pr.dump_stats("%s/%s" % (profile_directory, task_id)) common_utils.delete_working_directory()
def __init__(self, consumer_id, originator, event_type, details): super(ConsumerHistoryEvent, self).__init__() self.consumer_id = consumer_id self.originator = originator self.type = event_type self.details = details now = datetime.datetime.now(dateutils.utc_tz()) self.timestamp = dateutils.format_iso8601_datetime(now)
def __init__(self, consumer_id, originator, event_type, details): super(ConsumerHistoryEvent, self).__init__() self.consumer_id = consumer_id self.originator = originator self.type = event_type self.details = details now = datetime.datetime.now(dateutils.utc_tz()) self.timestamp = dateutils.format_iso8601_datetime(now)
def _now_timestamp(): """ Return a current timestamp in iso8601 format. :return: iso8601 UTC timestamp with timezone specified. :rtype: str """ now = dateutils.now_utc_datetime_with_tzinfo() now_in_iso_format = dateutils.format_iso8601_datetime(now) return now_in_iso_format
def _generate_distributor(self, repo_id, config=None, previously_published=True): config = config or {} distributor_id = str(uuid.uuid4()) distributor_model = model.Distributor(repo_id, distributor_id, 'yum_distributor', config, True) if previously_published: distributor_model.last_published = dateutils.format_iso8601_datetime( datetime.datetime.now()) distributor_model.save() return self.distributors_collection.find_one({'distributor_id': distributor_id})
def _generate_distributor(self, repo_id, config=None, previously_published=True): config = config or {} distributor_id = str(uuid.uuid4()) distributor_model = RepoDistributor(repo_id, distributor_id, 'yum_distributor', config, True) if previously_published: distributor_model['last_published'] = dateutils.format_iso8601_datetime( datetime.datetime.now()) self.distributors_collection.insert(distributor_model) return self.distributors_collection.find_one({'id': distributor_id})
def _convert_repo_dates_to_strings(repo): """ Convert the last_unit_added & last_unit_removed fields of a repository This modifies the repository in place :param repo: diatabase representation of a repo :type repo: dict """ # convert the native datetime object to a string with timezone specified last_unit_added = repo.get('last_unit_added') if last_unit_added: new_date = dateutils.to_utc_datetime(last_unit_added, no_tz_equals_local_tz=False) repo['last_unit_added'] = dateutils.format_iso8601_datetime(new_date) last_unit_removed = repo.get('last_unit_removed') if last_unit_removed: new_date = dateutils.to_utc_datetime(last_unit_removed, no_tz_equals_local_tz=False) repo['last_unit_removed'] = dateutils.format_iso8601_datetime(new_date)