def create_prepare_repository(self, **kwargs): ''' Create a new prepare job for this repository. ''' eager_fields = kwargs.pop('_eager_fields', None) job_class = self._get_job_class() fields = ('extra_vars', 'job_type') unallowed_fields = set(kwargs.keys()) - set(fields) if unallowed_fields: logger.warn('Fields {} are not allowed as overrides.'.format( unallowed_fields)) map(kwargs.pop, unallowed_fields) job = copy_model_by_class(self, job_class, fields, kwargs) if eager_fields: for fd, val in eager_fields.items(): setattr(job, fd, val) # Set the job back-link on the job job.repository_id = self.pk job.name = "Prepare Repository {}".format(self.name) job.description = "Repository {} Borg Preparation".format(self.name) job.save() from cyborgbackup.main.signals import disable_activity_stream fields = () with disable_activity_stream(): copy_m2m_relationships(self, job, fields, kwargs=kwargs) return job
def create_job(self, **kwargs): ''' Create a new job based on this job. ''' eager_fields = kwargs.pop('_eager_fields', None) job_class = self.__class__ fields = self._get_job_field_names() unallowed_fields = set(kwargs.keys()) - set(fields) if unallowed_fields: logger.warn('Fields {} are not allowed as overrides.'.format( unallowed_fields)) map(kwargs.pop, unallowed_fields) job = copy_model_by_class(self, job_class, fields, kwargs) if eager_fields: for fd, val in eager_fields.items(): setattr(job, fd, val) # Set the job back-link on the job parent_field_name = job_class._get_parent_field_name() setattr(job, parent_field_name, self) job.save() from cyborgbackup.main.signals import disable_activity_stream with disable_activity_stream(): copy_m2m_relationships(self, job, fields, kwargs=kwargs) job.create_config_from_prompts(kwargs) return job
def start_task(self, task, dependent_tasks=[]): from cyborgbackup.main.tasks import handle_work_error, handle_work_success task_actual = { 'type': get_type_for_model(type(task)), 'id': task.id, } dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks] error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies) success_handler = handle_work_success.s(task_actual=task_actual) task.status = 'waiting' (start_status, opts) = task.pre_start() if not start_status: task.status = 'failed' if task.job_explanation: task.job_explanation += ' ' task.job_explanation += 'Task failed pre-start check.' task.save() # TODO: run error handler to fail sub-tasks and send notifications else: logger.info('Submitting %s to instance group cyborgbackup.', task.log_format) with disable_activity_stream(): task.celery_task_id = str(uuid.uuid4()) task.save() self.consume_capacity(task, 'cyborgbackup') def post_commit(): task.websocket_emit_status(task.status) if task.status != 'failed': task.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler, queue='cyborgbackup') connection.on_commit(post_commit)