def outpost_controller(self: MonitoredTask, outpost_pk: str): """Create/update/monitor the deployment of an Outpost""" logs = [] outpost: Outpost = Outpost.objects.get(pk=outpost_pk) self.set_uid(slugify(outpost.name)) try: if not outpost.service_connection: return if outpost.type == OutpostType.PROXY: service_connection = outpost.service_connection if isinstance(service_connection, DockerServiceConnection): logs = ProxyDockerController( outpost, service_connection).up_with_logs() if isinstance(service_connection, KubernetesServiceConnection): logs = ProxyKubernetesController( outpost, service_connection).up_with_logs() LOGGER.debug( "---------------Outpost Controller logs starting----------------") for log in logs: LOGGER.debug(log) LOGGER.debug( "-----------------Outpost Controller logs end-------------------") except ControllerException as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) else: self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, logs))
def ldap_sync(self: MonitoredTask, source_pk: str): """Synchronization of an LDAP Source""" self.result_timeout_hours = 2 try: source: LDAPSource = LDAPSource.objects.get(pk=source_pk) except LDAPSource.DoesNotExist: # Because the source couldn't be found, we don't have a UID # to set the state with return self.set_uid(slugify(source.name)) try: messages = [] for sync_class in [ UserLDAPSynchronizer, GroupLDAPSynchronizer, MembershipLDAPSynchronizer, ]: sync_inst = sync_class(source) count = sync_inst.sync() messages.append( f"Synced {count} objects from {sync_class.__name__}") self.set_status(TaskResult( TaskResultStatus.SUCCESSFUL, messages, )) except LDAPException as exc: # No explicit event is created here as .set_status with an error will do that LOGGER.debug(exc) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def outpost_controller(self: MonitoredTask, outpost_pk: str, action: str = "up", from_cache: bool = False): """Create/update/monitor/delete the deployment of an Outpost""" logs = [] if from_cache: outpost: Outpost = cache.get(CACHE_KEY_OUTPOST_DOWN % outpost_pk) else: outpost: Outpost = Outpost.objects.get(pk=outpost_pk) if not outpost: return self.set_uid(slugify(outpost.name)) try: controller = controller_for_outpost(outpost) if not controller: return logs = getattr(controller, f"{action}_with_logs")() LOGGER.debug( "---------------Outpost Controller logs starting----------------") for log in logs: LOGGER.debug(log) LOGGER.debug( "-----------------Outpost Controller logs end-------------------") except ControllerException as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) else: self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, logs))
def ldap_sync(self: MonitoredTask, source_pk: str, sync_class: str): """Synchronization of an LDAP Source""" self.result_timeout_hours = 2 try: source: LDAPSource = LDAPSource.objects.get(pk=source_pk) except LDAPSource.DoesNotExist: # Because the source couldn't be found, we don't have a UID # to set the state with return sync = path_to_class(sync_class) self.set_uid( f"{source.slug}_{sync.__name__.replace('LDAPSynchronizer', '').lower()}" ) try: sync_inst = sync(source) count = sync_inst.sync() messages = sync_inst.messages messages.append(f"Synced {count} objects.") self.set_status(TaskResult( TaskResultStatus.SUCCESSFUL, messages, )) except LDAPException as exc: # No explicit event is created here as .set_status with an error will do that LOGGER.debug(exc) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def send_mail(self: MonitoredTask, message: dict[Any, Any], email_stage_pk: Optional[int] = None): """Send Email for Email Stage. Retries are scheduled automatically.""" self.save_on_success = False message_id = make_msgid(domain=DNS_NAME) self.set_uid(slugify(message_id.replace(".", "_").replace("@", "_"))) try: if not email_stage_pk: stage: EmailStage = EmailStage(use_global_settings=True) else: stage: EmailStage = EmailStage.objects.get(pk=email_stage_pk) backend = stage.backend backend.open() # Since django's EmailMessage objects are not JSON serialisable, # we need to rebuild them from a dict message_object = EmailMultiAlternatives() for key, value in message.items(): setattr(message_object, key, value) if not stage.use_global_settings: message_object.from_email = stage.from_address # Because we use the Message-ID as UID for the task, manually assign it message_object.extra_headers["Message-ID"] = message_id LOGGER.debug("Sending mail", to=message_object.to) stage.backend.send_messages([message_object]) self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, messages=["Successfully sent Mail."], )) except (SMTPException, ConnectionError) as exc: LOGGER.debug("Error sending email, retrying...", exc=exc) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) raise exc
def outpost_controller(self: MonitoredTask, outpost_pk: str, action: str = "up", from_cache: bool = False): """Create/update/monitor/delete the deployment of an Outpost""" logs = [] if from_cache: outpost: Outpost = cache.get(CACHE_KEY_OUTPOST_DOWN % outpost_pk) LOGGER.debug("Getting outpost from cache to delete") else: outpost: Outpost = Outpost.objects.filter(pk=outpost_pk).first() LOGGER.debug("Getting outpost from DB") if not outpost: LOGGER.warning("No outpost") return self.set_uid(slugify(outpost.name)) try: controller_type = controller_for_outpost(outpost) if not controller_type: return with controller_type(outpost, outpost.service_connection) as controller: logs = getattr(controller, f"{action}_with_logs")() LOGGER.debug( "---------------Outpost Controller logs starting----------------" ) for log in logs: LOGGER.debug(log) LOGGER.debug( "-----------------Outpost Controller logs end-------------------" ) except (ControllerException, ServiceConnectionInvalid) as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) else: self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, logs))
def backup_database(self: MonitoredTask): # pragma: no cover """Database backup""" self.result_timeout_hours = 25 if not should_backup(): self.set_status(TaskResult(TaskResultStatus.UNKNOWN, ["Backups are not configured."])) return try: start = datetime.now() out = StringIO() management.call_command("dbbackup", quiet=True, stdout=out) self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, [ f"Successfully finished database backup {naturaltime(start)} {out.getvalue()}", ], ) ) LOGGER.info("Successfully backed up database.") except ( IOError, BotoCoreError, ClientError, Boto3Error, PermissionError, CommandConnectorError, ValueError, ) as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def update_latest_version(self: MonitoredTask): """Update latest version info""" try: response = get( "https://api.github.com/repos/goauthentik/authentik/releases/latest" ) response.raise_for_status() data = response.json() tag_name = data.get("tag_name") upstream_version = tag_name.split("/")[1] cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT) self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, ["Successfully updated latest Version"] ) ) _set_prom_info() # Check if upstream version is newer than what we're running, # and if no event exists yet, create one. local_version = parse(__version__) if local_version < parse(upstream_version): # Event has already been created, don't create duplicate if Event.objects.filter( action=EventAction.UPDATE_AVAILABLE, context__new_version=upstream_version, ).exists(): return event_dict = {"new_version": upstream_version} if match := re.search(URL_FINDER, data.get("body", "")): event_dict["message"] = f"Changelog: {match.group()}" Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save() except (RequestException, IndexError) as exc: cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def managed_reconcile(self: MonitoredTask): """Run ObjectManager to ensure objects are up-to-date""" try: ObjectManager().run() self.set_status( TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated managed models."])) except DatabaseError as exc: # pragma: no cover self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))
def send_mail(self: MonitoredTask, message: dict[Any, Any], email_stage_pk: Optional[int] = None): """Send Email for Email Stage. Retries are scheduled automatically.""" self.save_on_success = False message_id = make_msgid(domain=DNS_NAME) self.set_uid(slugify(message_id.replace(".", "_").replace("@", "_"))) try: if not email_stage_pk: stage: EmailStage = EmailStage(use_global_settings=True) else: stages = EmailStage.objects.filter(pk=email_stage_pk) if not stages.exists(): self.set_status( TaskResult( TaskResultStatus.WARNING, messages=["Email stage does not exist anymore. Discarding message."], ) ) return stage: EmailStage = stages.first() try: backend = stage.backend except ValueError as exc: # pyright: reportGeneralTypeIssues=false LOGGER.warning(exc) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) return backend.open() # Since django's EmailMessage objects are not JSON serialisable, # we need to rebuild them from a dict message_object = EmailMultiAlternatives() for key, value in message.items(): setattr(message_object, key, value) if not stage.use_global_settings: message_object.from_email = stage.from_address # Because we use the Message-ID as UID for the task, manually assign it message_object.extra_headers["Message-ID"] = message_id LOGGER.debug("Sending mail", to=message_object.to) backend.send_messages([message_object]) Event.new( EventAction.EMAIL_SENT, message=(f"Email to {', '.join(message_object.to)} sent"), subject=message_object.subject, body=get_email_body(message_object), from_email=message_object.from_email, to_email=message_object.to, ).save() self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, messages=["Successfully sent Mail."], ) ) except (SMTPException, ConnectionError, OSError) as exc: LOGGER.debug("Error sending email, retrying...", exc=exc) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) raise exc
def notification_transport(self: MonitoredTask, notification_pk: int, transport_pk: int): """Send notification over specified transport""" self.save_on_success = False try: notification: Notification = Notification.objects.get( pk=notification_pk) transport: NotificationTransport = NotificationTransport.objects.get( pk=transport_pk) transport.send(notification) self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL)) except NotificationTransportError as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) raise exc
def clean_temporary_users(self: MonitoredTask): """Remove temporary users created by SAML Sources""" _now = now() messages = [] deleted_users = 0 for user in User.objects.filter(attributes__saml__isnull=False): sources = SAMLSource.objects.filter( pk=user.attributes.get("saml", {}).get("source", "")) if not sources.exists(): LOGGER.warning( "User has an invalid SAML Source and won't be deleted!", user=user) messages.append( f"User {user} has an invalid SAML Source and won't be deleted!" ) continue source = sources.first() source_delta = timedelta_from_string( source.temporary_user_delete_after) if _now - user.last_login >= source_delta: LOGGER.debug("User is expired and will be deleted.", user=user, delta=source_delta) # TODO: Check if user is signed in anywhere? user.delete() deleted_users += 1 messages.append(f"Successfully deleted {deleted_users} users.") self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))
def certificate_discovery(self: MonitoredTask): """Discover, import and update certificates from the filesystem""" certs = {} private_keys = {} discovered = 0 for file in glob(CONFIG.y("cert_discovery_dir") + "/**", recursive=True): path = Path(file) if not path.exists(): continue if path.is_dir(): continue # For certbot setups, we want to ignore archive. if "archive" in file: continue # Support certbot's directory structure if path.name in ["fullchain.pem", "privkey.pem"]: cert_name = path.parent.name else: cert_name = path.name.replace(path.suffix, "") try: with open(path, "r+", encoding="utf-8") as _file: body = _file.read() if "PRIVATE KEY" in body: private_keys[cert_name] = ensure_private_key_valid(body) else: certs[cert_name] = ensure_certificate_valid(body) except (OSError, ValueError) as exc: LOGGER.warning("Failed to open file or invalid format", exc=exc, file=path) discovered += 1 for name, cert_data in certs.items(): cert = CertificateKeyPair.objects.filter(managed=MANAGED_DISCOVERED % name).first() if not cert: cert = CertificateKeyPair( name=name, managed=MANAGED_DISCOVERED % name, ) dirty = False if cert.certificate_data != cert_data: cert.certificate_data = cert_data dirty = True if name in private_keys: if cert.key_data != private_keys[name]: cert.key_data = private_keys[name] dirty = True if dirty: cert.save() self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, messages=[ _("Successfully imported %(count)d files." % {"count": discovered}) ], ))
def outpost_controller(self: MonitoredTask, outpost_pk: str): """Create/update/monitor the deployment of an Outpost""" logs = [] outpost: Outpost = Outpost.objects.get(pk=outpost_pk) self.set_uid(slugify(outpost.name)) try: controller = controller_for_outpost(outpost) if not controller: return logs = controller.up_with_logs() LOGGER.debug("---------------Outpost Controller logs starting----------------") for log in logs: LOGGER.debug(log) LOGGER.debug("-----------------Outpost Controller logs end-------------------") except ControllerException as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc)) else: self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, logs))
def outpost_service_connection_monitor(self: MonitoredTask): """Regularly check the state of Outpost Service Connections""" connections = OutpostServiceConnection.objects.all() for connection in connections.iterator(): outpost_service_connection_state.delay(connection.pk) self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, [f"Successfully updated {len(connections)} connections."], ))
def outpost_token_ensurer(self: MonitoredTask): """Periodically ensure that all Outposts have valid Service Accounts and Tokens""" all_outposts = Outpost.objects.all() for outpost in all_outposts: _ = outpost.token self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, [f"Successfully checked {len(all_outposts)} Outposts."], ))
def clean_expired_models(self: MonitoredTask): """Remove expired objects""" messages = [] for cls in ExpiringModel.__subclasses__(): cls: ExpiringModel amount, _ = (cls.objects.all().exclude(expiring=False).exclude( expiring=True, expires__gt=now()).delete()) LOGGER.debug("Deleted expired models", model=cls, amount=amount) messages.append( f"Deleted {amount} expired {cls._meta.verbose_name_plural}") self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))
def save_reputation(self: MonitoredTask): """Save currently cached reputation to database""" objects_to_update = [] for _, score in cache.get_many(cache.keys(CACHE_KEY_PREFIX + "*")).items(): rep, _ = Reputation.objects.get_or_create( ip=score["ip"], identifier=score["identifier"], ) rep.ip_geo_data = GEOIP_READER.city_dict(score["ip"]) or {} rep.score = score["score"] objects_to_update.append(rep) Reputation.objects.bulk_update(objects_to_update, ["score", "ip_geo_data"]) self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated Reputation"]))
def save_ip_reputation(self: MonitoredTask): """Save currently cached reputation to database""" objects_to_update = [] for key, score in cache.get_many(cache.keys(CACHE_KEY_IP_PREFIX + "*")).items(): remote_ip = key.replace(CACHE_KEY_IP_PREFIX, "") rep, _ = IPReputation.objects.get_or_create(ip=remote_ip) rep.score = score objects_to_update.append(rep) IPReputation.objects.bulk_update(objects_to_update, ["score"]) self.set_status( TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated IP Reputation"]))
def backup_database(self: MonitoredTask): # pragma: no cover """Database backup""" self.result_timeout_hours = 25 if SERVICE_HOST_ENV_NAME in environ and not CONFIG.y( "postgresql.s3_backup"): LOGGER.info( "Running in k8s and s3 backups are not configured, skipping") self.set_status( TaskResult( TaskResultStatus.WARNING, [ ("Skipping backup as authentik is running in Kubernetes " "without S3 backups configured."), ], )) return try: start = datetime.now() out = StringIO() management.call_command("dbbackup", quiet=True, stdout=out) self.set_status( TaskResult( TaskResultStatus.SUCCESSFUL, [ f"Successfully finished database backup {naturaltime(start)} {out.getvalue()}", ], )) LOGGER.info("Successfully backed up database.") except ( IOError, BotoCoreError, ClientError, Boto3Error, PermissionError, CommandConnectorError, ValueError, ) as exc: self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def update_latest_version(self: MonitoredTask): """Update latest version info""" if CONFIG.y_bool("disable_update_check"): cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) self.set_status( TaskResult(TaskResultStatus.WARNING, messages=["Version check disabled."])) return try: response = get_http_session().get( "https://version.goauthentik.io/version.json", ) response.raise_for_status() data = response.json() upstream_version = data.get("stable", {}).get("version") cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT) self.set_status( TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated latest Version"])) _set_prom_info() # Check if upstream version is newer than what we're running, # and if no event exists yet, create one. if LOCAL_VERSION < parse(upstream_version): # Event has already been created, don't create duplicate if Event.objects.filter( action=EventAction.UPDATE_AVAILABLE, context__new_version=upstream_version, ).exists(): return event_dict = {"new_version": upstream_version} if match := re.search(URL_FINDER, data.get("stable", {}).get("changelog", "")): event_dict["message"] = f"Changelog: {match.group()}" Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save() except (RequestException, IndexError) as exc: cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
def save_user_reputation(self: MonitoredTask): """Save currently cached reputation to database""" objects_to_update = [] for key, score in cache.get_many(cache.keys(CACHE_KEY_USER_PREFIX + "*")).items(): username = key.replace(CACHE_KEY_USER_PREFIX, "") users = User.objects.filter(username=username) if not users.exists(): LOGGER.info("User in cache does not exist, ignoring", username=username) continue rep, _ = UserReputation.objects.get_or_create(user=users.first()) rep.score = score objects_to_update.append(rep) UserReputation.objects.bulk_update(objects_to_update, ["score"]) self.set_status( TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated User Reputation"]))
def clean_expired_models(self: MonitoredTask): """Remove expired objects""" messages = [] for cls in ExpiringModel.__subclasses__(): cls: ExpiringModel objects = ( cls.objects.all().exclude(expiring=False).exclude(expiring=True, expires__gt=now()) ) for obj in objects: obj.expire_action() amount = objects.count() LOGGER.debug("Expired models", model=cls, amount=amount) messages.append(f"Expired {amount} {cls._meta.verbose_name_plural}") # Special case amount = 0 for session in AuthenticatedSession.objects.all(): cache_key = f"{KEY_PREFIX}{session.session_key}" value = cache.get(cache_key) if not value: session.delete() amount += 1 LOGGER.debug("Expired sessions", model=AuthenticatedSession, amount=amount) messages.append(f"Expired {amount} {AuthenticatedSession._meta.verbose_name_plural}") self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))