def __init__(self, repository=None): self.created = timezone.now() self.timestamp_end = None self.timestamp_start = None self.repository = repository self.state = self.State.job_created self.id = data_root().jobs.insert(self) if repository: repository.jobs[self.id] = self data_root().jobs_by_state[self.state][self.id] = self
def add_view(self, request): data = request.POST or None repository_form = Repository.Form(data) if data and repository_form.is_valid(): repository = Repository(**repository_form.cleaned_data) data_root().repositories.append(repository) transaction.get().note('Added repository %s' % repository.name) transaction.commit() return self.redirect_to() return self.render(request, 'core/repository/add.html', { 'repository_form': repository_form, })
def force_state(self, state): with transaction.manager as txn: if self.state == state: return False log.debug('%s: Forced state %s -> %s', self.id, self.state, state) self._check_set_start_timestamp(self.state) del data_root().jobs_by_state[self.state][self.id] self.state = state data_root().jobs_by_state[self.state][self.id] = self self._check_set_end_timestamp() txn.note('Job %s forced to state %s' % (self.id, state)) borgcube.utils.hook.borgcube_job_post_force_state(job=self, forced_state=state) return True
def __init__(self, trigger, enabled=True, access=(), comment=''): self.trigger = trigger self.enabled = enabled self.access = tuple(access) self.comment = comment self.id = str(uuid.uuid4()) data_root().trigger_ids[self.id] = self
def clean(self): data = super().clean() o = data_root()._p_jar[oid_bytes(data['job_config'])] if not isinstance(o, BackupConfig): raise ValidationError('Invalid object reference') data['job_config'] = o return data
def __init__(self, id, repository, name, client=None, job=None, comment='', nfiles=0, original_size=0, compressed_size=0, deduplicated_size=0, duration=datetime.timedelta(), timestamp=None, timestamp_end=None): self.id = id self.repository = repository self.client = client self.job = job self.name = name self.comment = comment self.nfiles = nfiles self.original_size = original_size self.compressed_size = compressed_size self.deduplicated_size = deduplicated_size self.duration = duration self.timestamp = timestamp self.timestamp_end = timestamp_end data_root().archives[id] = self repository.archives[id] = self if client: client.archives[id] = self
def object_publisher(request, path): """ Renders a *path* against the *RootPublisher*. The request will receive the following attributes: - *publisher*: the publisher handling the request - *view_name*: the verbatim view name (?view=...) """ view_name = request.GET.get('view') path_segments = [s for s in path.split('/') if s != '.'] path_segments.reverse() if '..' in path_segments: log.warning('Bad request: Refusing path containing "..".') return HttpResponse(status=400) root_publisher = RootPublisher(data_root()) view = root_publisher.resolve(path_segments, view_name) request.root = root_publisher request.view_name = view_name try: request.publisher = view.__self__ except AttributeError: # We don't explicitly prohibit the resolver to return a view callable that isn't # part of a publisher. pass response = view(request) if response is None: qualname = view.__module__ + '.' + view.__qualname__ raise ValueError( 'The view %s returned None instead of a HTTPResponse' % qualname) return response
def clients(self): client_re = re.compile(self.client_re, re.IGNORECASE) for hostname, client in data_root().clients.items(): if not client_re.fullmatch(hostname): continue log.debug('Matched client %s to pattern %r', hostname, self.client_re) yield client
def update_state(self, previous, to): with transaction.manager as txn: if self.state != previous: raise ValueError( 'Cannot transition job state from %r to %r, because current state is %r' % (previous, to, self.state)) borgcube.utils.hook.borgcube_job_pre_state_update( job=self, current_state=previous, target_state=to) del data_root().jobs_by_state[self.state][self.id] self.state = to data_root().jobs_by_state[self.state][self.id] = self self._check_set_start_timestamp(previous) self._check_set_end_timestamp() log.debug('%s: phase %s -> %s', self.id, previous, to) txn.note('Job %s state update: %s -> %s' % (self.id, previous, to)) borgcube.utils.hook.borgcube_job_post_state_update( job=self, prior_state=previous, current_state=to)
def __init__(self, hostname, description='', connection=None): self.hostname = hostname self.description = description self.connection = connection self.jobs = LOBTree() self.archives = OOBTree() self.job_configs = PersistentList() data_root().clients[hostname] = self
def clean(self, value): value = super().clean(value) for repository in data_root().repositories: if repository.oid == value: return repository else: raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
def borgcube_web_children(publisher, children): if publisher.name == 'management': return { 'trigger': TriggerManagementPublisher(data_root().trigger_ids), } if isinstance(getattr(publisher.get_companion(), 'trigger', None), Trigger): return { 'trigger': TriggerPublisher(publisher.get_companion().trigger), }
def trigger(request, trigger_id): # This is a URL-based view, since it can be called anonymously (I plan to make everything # publisher-based authenticated-only). try: trig = data_root().trigger_ids[trigger_id] except KeyError: raise Http404 # Depending on the security framework that'll be used (I'm atm mainly considering Yosai), # this might become unnecessary. (TODO/SEC) trig.run(access_context='anonymous-web') return HttpResponse()
def delete(self, manifest, stats, cache): borg_archive = borg.archive.Archive(manifest.repository, manifest.key, manifest, self.name, cache=cache) borg_archive.delete(stats) del data_root().archives[self.id] del self.repository.archives[self.id] if self.client: del self.client.archives[self.id]
def queue_backup_job_conditional(apiserver, job_config): for state, jobs in data_root().jobs_by_state.items(): if state in BackupJob.State.STABLE - {BackupJob.State.job_created}: continue for job in jobs.values(): if job.config == job_config: log.warning( 'run_from_schedule: not triggering a new job for config %s, since job %s is queued or running', job_config.oid, job.id) return job_config.create_job()
def delete_view(self, request): client = self.parent.parent.client if request.method == 'POST': client.job_configs.remove(self.config) # Could just leave it there, but likely not the intention behind clicking (delete). for schedule in data_root().schedules: for action in list(schedule.actions): if getattr(action, 'job_config', None) == self.config: schedule.actions.remove(action) transaction.get().note('Deleted job config %s from client %s' % (self.config.oid, client.hostname)) transaction.commit() return self.redirect_to()
def handle(self, *args, **options): try: client = data_root().clients[options['client']] except KeyError: raise CommandError('Client %s not found' % options['client']) for repository in data_root().repositories: if repository.name == options['repository']: break if repository.id.startswith(options['repository']): break if repository.url == options['repository']: break else: raise CommandError('Repository %s not found' % options['repository']) with open_repository(repository) as borg_repository: manifest, key = Manifest.load(borg_repository) with Cache(borg_repository, key, manifest, lock_wait=1) as cache: names = self.find_archives(manifest, options['archive'], regex=options['regex']) imported = 0 pi = ProgressIndicatorPercent( msg='Importing archives %4.1f %%: %s', total=len(names), step=0.1) for name in names: imported += self.import_archive(manifest, cache, repository, name, client) pi.show(info=[name]) pi.finish() print('Imported %d archives.' % imported, file=sys.stderr)
def execute(self, apiserver): client_re = re.compile(self.client_re, re.IGNORECASE) job_config_re = re.compile(self.job_config_re, re.IGNORECASE) for client in data_root().clients.values(): if not client_re.fullmatch(client.hostname): continue log.debug('Matched client %s to pattern %r', client.hostname, self.client_re) for job_config in client.job_configs: if not job_config_re.fullmatch(job_config.label): continue log.debug('Matched job config %s to pattern %r', job_config.label, self.job_config_re) job_config.create_job() transaction.commit()
def borgcubed_idle(apiserver): """Check schedule. Are we supposed to do something right about now?""" # seconds = seconds_until_next_occurence() # log.debug('setting alarm clock to beep in %d seconds', seconds) # signal.alarm(seconds) this_very_moment = now() for schedule in data_root().schedules: if not schedule.recurrence_enabled: continue # TODO: when django-recurrence#81 is resolved, use cache. occurence = schedule.recurrence.after(this_very_moment) if latest_executions.get(schedule._p_oid) == occurence: continue if occurence and abs( (occurence - this_very_moment).total_seconds()) < 10: latest_executions[schedule._p_oid] = occurence execute(apiserver, schedule)
def import_archive(self, manifest, cache, repository, archive_name, client=None): with transaction.manager as txn: archive_info = manifest.archives[archive_name] fpr = bin_to_hex(archive_info.id) if fpr in data_root().archives: print('Skipping archive %s [%s], already known' % (archive_info.name, fpr), file=sys.stderr) return False archive = borg.archive.Archive(manifest.repository, manifest.key, manifest, archive_name, cache=cache) stats = archive.calc_stats(cache) duration = archive.ts_end - archive.ts Archive( id=archive.fpr, repository=repository, name=archive.name, client=client, nfiles=stats.nfiles, original_size=stats.osize, compressed_size=stats.csize, deduplicated_size=stats.usize, duration=duration, timestamp=archive.ts, timestamp_end=archive.ts_end, ) txn.note( '(cli) associated archive %s on repository %s with client %s' % (archive_name, repository.name, client.hostname)) return True
def oid_get(oid): for repository in data_root().repositories: if repository.oid == oid: return repository else: raise KeyError
def job_cancel(request, job_id): job = data_root().jobs[int(job_id)] daemon = APIClient() daemon.cancel_job(job) return redirect(client_view, job.client.hostname)
def handle(self, *args, **options): try: trig = data_root().trigger_ids[options['trigger_id']] except KeyError: raise CommandError('Trigger %s not found' % options['trigger_id']) trig.run(access_context='local-cli')
def delete_view(self, request): if request.method == 'POST': data_root().schedules.remove(self.schedule) transaction.get().note('Deleted schedule %s' % self.schedule.oid) transaction.commit() return self.parent.redirect_to()
def job_configs_as_choices(): for client in data_root().clients.values(): for config in client.job_configs: yield config.oid, config
def prune_root(): return data_root().plugin_data(PruneRoot)
def clear_db(): settings.BUILTIN_ZEO = False settings.DB_URI = 'memory://' data_root() yield reset_db_connection()
def get_choices(): for repository in data_root().repositories: yield repository.oid, str(repository)
def schedule_add_and_edit(render, request, data, schedule=None, context=None): if schedule: schedule._p_activate() form = Schedule.Form(data, initial=schedule.__dict__) else: form = Schedule.Form(data) action_forms = [] # This generally works since pluggy loads plugin modules for us. classes = ScheduledAction.__subclasses__() log.debug('Discovered schedulable actions: %s', ', '.join(cls.dotted_path() for cls in classes)) if schedule: for scheduled_action in schedule.actions: scheduled_action._p_activate() action_form = scheduled_action.form( initial=scheduled_action.__dict__) action_forms.append(action_form) if data: try: actions_data = json.loads(data['actions-data']) except KeyError: return HttpResponseBadRequest( 'Invalid POST data for schedule form. Is JavaScript disabled?') all_valid = form.is_valid() txn = transaction.get() if all_valid: if schedule: schedule.actions.clear() schedule._update(form.cleaned_data) txn.note('Edited schedule %s' % schedule.oid) else: schedule = Schedule(**form.cleaned_data) data_root().schedules.append(schedule) txn.note('Added schedule %s' % schedule.name) for serialized_action in actions_data: dotted_path = serialized_action.pop('class') action = ScheduledAction.get_class(dotted_path) if not action: log.error('invalid/unknown schedulable action %r, ignoring', dotted_path) continue action_form = action.form(serialized_action) valid = action_form.is_valid() all_valid &= valid if all_valid: scheduled_action = action(schedule, **action_form.cleaned_data) schedule.actions.append(scheduled_action) txn.note(' - Added scheduled action %s' % scheduled_action.dotted_path()) action_forms.append(action_form) if all_valid: txn.commit() return request.publisher.redirect_to() context = dict(context or {}) context.update({ 'form': form, 'classes': {cls.dotted_path(): cls.name for cls in classes}, 'action_forms': action_forms, }) return render(request, 'core/schedule/add.html', context)
def borgcube_job_blocked(job, blocking_jobs): for state in (PruneJob.State.discovering, PruneJob.State.prune): for other in data_root().jobs_by_state[state]: if other.short_name == 'prune' and job.repository in other.repositories: blocking_jobs.append(other)