def nslcd(): """Initialize distribution PAM integration of OpenLDAP.""" patchfiles = [ ("/etc/pam.d/system-auth", "001-add-ldap-to-system-auth.patch"), ("/etc/pam.d/su", "002-add-ldap-to-su.patch"), ("/etc/pam.d/su-l", "003-add-ldap-to-su-l.patch"), ("/etc/pam.d/passwd", "004-add-ldap-to-passwd.patch"), ("/etc/pam.d/system-login", "005-add-ldap-to-system-login.patch"), ("/etc/nsswitch.conf", "006-add-ldap-to-nsswitch.patch"), ("/etc/nslcd.conf", "007-add-ldap-to-nslcd.patch") ] for x in patchfiles: if not os.path.exists(os.path.join("/usr/share/arkos/nslcd", x[1])): raise CLIException( "Patch files could not be found. Your installation may " "be corrupted. Please reinstall the `arkos-configs` package.") logger.debug('ctl:init:nslcd', 'Stopping daemon: nslcd') s = shell("systemctl stop nslcd") if s["code"] != 0: raise click.ClickException(s["stderr"].decode()) logger.info('ctl:init:nslcd', 'Patching system files') for x in patchfiles: shell("patch -N {0} {1}".format( x[0], os.path.join("/usr/share/arkos/nslcd", x[1]))) logger.debug('ctl:init:nslcd', 'Starting daemon: nslcd') shell("systemctl enable nslcd") shell("systemctl start nslcd") logger.success('ctl:init:nslcd', 'Complete')
def initialize(): """Initialize security policy tracking.""" logger.debug("TrSv", "Initializing security policy tracking") # arkOS policy = policies.get("arkos", "arkos", 2) port = [("tcp", int(config.get("genesis", "port")))] pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)", "server", port, policy) storage.policies[pol.id] = pol # uPNP policy = policies.get("arkos", "upnp", 1) pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms", "server", [("udp", 1900)], policy) if config.get("general", "enable_upnp"): storage.policies[pol.id] = pol # SSHd policy = policies.get("arkos", "sshd", 1) pol = SecurityPolicy("arkos", "sshd", "SSH", "server", [("tcp", 22)], policy) # ACME dummies for x in glob.glob("/etc/nginx/sites-enabled/acme-*"): acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1] pol = SecurityPolicy("acme", acme_name, "{0} (ACME Validation)".format(acme_name), "globe", [('tcp', 80)], 2) storage.policies[pol.id] = pol for x in policies.get_all("custom"): pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"], x["ports"], x["policy"]) storage.policies[pol.id] = pol
def scan_authorities(): """ Search proper directory for certificates, load them and store metadata. :return: list of CertificateAuthority objects :rtype: list """ logger.debug("Crts", "Scanning for certificate authorities") storage.certificate_authorities.clear() ca_cert_dir = config.get("certificates", "ca_cert_dir") ca_key_dir = config.get("certificates", "ca_key_dir") if not os.path.exists(ca_cert_dir): os.makedirs(ca_cert_dir) if not os.path.exists(ca_key_dir): os.makedirs(ca_key_dir) for x in glob.glob(os.path.join(ca_cert_dir, "*.pem")): id = os.path.splitext(os.path.split(x)[1])[0] with open(x, "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) key_path = os.path.join(ca_key_dir, "{0}.key".format(id)) with open(key_path, "rb") as f: with open(key_path, "rb") as f: key = serialization.load_pem_private_key( f.read(), password=None, backend=default_backend() ) sha1 = binascii.hexlify(cert.fingerprint(hashes.SHA1())).decode() md5 = binascii.hexlify(cert.fingerprint(hashes.MD5())).decode() kt = "RSA" if isinstance(key.public_key(), rsa.RSAPublicKey) else "DSA" ca = CertificateAuthority(id, x, key_path, cert.not_valid_after, kt, key.key_size, sha1, md5) storage.certificate_authorities[id] = ca return storage.certificate_authorities
def verify_app_dependencies(): apps = [x for x in storage.apps.get("applications") if x.installed] for x in apps: for dep in x.dependencies: # For each app-type dependency in all installed apps... if dep["type"] == "app": # If the needed app isn't yet installed, put a fail message if not dep["package"] in [y.id for y in apps]: x.loadable = False x.error = "Depends on %s, which is not installed" % dep["name"] logger.debug("*** Verify failed for %s -- dependent on %s which is not installed" % (x.name,dep["name"])) # Cascade this fail message to all apps in the dependency chain for z in get_dependent(x.id, "remove"): z = storage.apps.get("applications", z) z.loadable = False z.error = "Depends on %s, which cannot be loaded because %s is not installed" % (x.name,dep["name"]) # Also put a fail message if the app we depended on failed to load elif not storage.apps.get("applications", dep["package"]).loadable: x.loadable = False x.error = "Depends on %s, which also failed" % dep["name"] logger.debug("*** Verify failed for %s -- dependent on %s which failed to load" % (x.name,dep["name"])) # Cascade this fail message to all apps in the dependency chain for z in get_dependent(x.id, "remove"): z = storage.apps.get("applications", z) z.loadable = False z.error = "Depends on %s, which cannot be loaded because %s failed to load" % (x.name,dep["name"])
def scan(): """ Search proper directory for certificates, load them and store metadata. :return: list of Certificate objects :rtype: list """ logger.debug("Crts", "Scanning for certificates") assigns = {} if config.get("genesis", "ssl"): gen_cert = config.get("genesis", "cert_file") ssl = os.path.splitext(os.path.basename(gen_cert))[0] if ssl and ssl in assigns: assigns[ssl].append({"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}) elif ssl: assigns[ssl] = [{"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}] for x in applications.get(installed=True): if hasattr(x, "ssl"): for ssl, data in x.ssl.get_ssl_assigned(): if ssl in assigns: assigns[ssl] += data else: assigns[ssl] = [] assigns[ssl].append(data) if not os.path.exists(config.get("certificates", "cert_dir")): os.makedirs(config.get("certificates", "cert_dir")) if not os.path.exists(config.get("certificates", "key_dir")): os.makedirs(config.get("certificates", "key_dir")) storage.certificates.clear() cert_glob = os.path.join(config.get("certificates", "cert_dir"), "*.crt") for cert_path in glob.glob(cert_glob): id = os.path.splitext(os.path.basename(cert_path))[0] key_path = os.path.join( config.get("certificates", "key_dir"), "{0}.key".format(id)) storage.certificates[id] = \ _scan_a_cert(id, cert_path, key_path, assigns) acmedir = config.get("certificates", "acme_dir") if not os.path.exists(acmedir): os.makedirs(acmedir) le_cert_glob = os.path.join(acmedir, "*/cert.pem") for cert_path in glob.glob(le_cert_glob): basedir = os.path.dirname(cert_path) id = os.path.basename(basedir) key_path = os.path.join(basedir, "privkey.pem") storage.certificates[id] = \ _scan_a_cert(id, cert_path, key_path, assigns, True) return storage.certificates
def scan(verify=True): signals.emit("apps", "pre_scan") app_dir = config.get("apps", "app_dir") apps = [] if not os.path.exists(app_dir): os.makedirs(app_dir) # Get paths for installed apps, metadata for available ones installed_apps = [x for x in os.listdir(app_dir) if not x.startswith(".")] available_apps = api("https://%s/api/v1/apps" % config.get("general", "repo_server"), crit=False) if available_apps: available_apps = available_apps["applications"] else: available_apps = [] # Create objects for installed apps with appropriate metadata for x in installed_apps: try: with open(os.path.join(app_dir, x, "manifest.json"), "r") as f: data = json.loads(f.read()) except ValueError: logger.warn("Failed to load %s due to a JSON parsing error" % x) continue except IOError: logger.warn("Failed to load %s: manifest file inaccessible or not present" % x) continue logger.debug(" *** Loading %s" % data["id"]) app = App(**data) app.installed = True for y in enumerate(available_apps): if app.id == y[1]["id"] and app.version != y[1]["version"]: app.upgradable = y[1]["version"] if app.id == y[1]["id"]: app.assets = y[1]["assets"] available_apps[y[0]]["installed"] = True app.load() apps.append(app) # Convert available apps payload to objects for x in available_apps: if not x.get("installed"): app = App(**x) app.installed = False apps.append(app) storage.apps.set("applications", apps) if verify: verify_app_dependencies() signals.emit("apps", "post_scan") return storage.apps.get("applications")
def add(by, id, sig, func): """ Register a new listener with the system. :param str by: the name of the module that registered this listener :param str id: identifier for this listener :param str sig: signal ID to listen for :param func func: hook function to execute """ if not storage.signals.get(id): storage.signals[id] = [] storage.signals[id].append(Listener(by, id, sig, func)) logger.debug("Sign", "Registered {0} to {1} for {2}".format(sig, id, by))
def load(self, verify=True): try: signals.emit("apps", "pre_load", self) if verify: self.verify_dependencies() # Load the application module into Python imp.load_module(self.id, *imp.find_module(self.id, [os.path.join(config.get("apps", "app_dir"))])) # Get module and its important classes and track them on this object for module in self.modules: submod = imp.load_module("%s.%s" % (self.id, module), *imp.find_module(module, [os.path.join(config.get("apps", "app_dir"), self.id)])) classes = inspect.getmembers(submod, inspect.isclass) mgr = None for y in classes: if y[0] in ["DatabaseManager", "Site", "BackupController"]: mgr = y[1] break logger.debug(" *** Registering %s module on %s" % (module, self.id)) if module == "database": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_database_mgr", y[1]) elif module == "website": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_website", y[1]) elif module == "backup": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_backup", y[1]) elif module == "api": if hasattr(self, "_backend"): setattr(submod, self.id, self._backend) setattr(self, "_api", submod) elif module == "ssl": self.ssl = submod else: setattr(self, "_%s" % module, submod) # Set up tracking of ports associated with this app for s in self.services: if s["ports"]: tracked_services.register(self.id, s["binary"], s["name"], self.icon, s["ports"], default_policy=s.get("default_policy", 2), fw=False) signals.emit("apps", "post_load", self) except Exception, e: self.loadable = False self.error = "Module error: %s" % str(e) logger.warn("Failed to load %s -- %s" % (self.name, str(e)))
def register_frameworks(app): """ Register an API framework (set of endpoints) with the server. :param Flask app: Flask app """ fmwkdir = os.path.join(os.path.dirname(__file__), "frameworks") for x in os.listdir(fmwkdir): if x.startswith((".", "_")) or x.endswith((".pyc", ".pyo")): continue x = x.split(".py")[0] mod = imp.load_module(x, *imp.find_module(x, [fmwkdir])) logger.debug("Init", " *** Registering {0}...".format(x)) app.register_blueprint(mod.backend)
def site_load(site): """ Create a BackupController when a Website is first created/loaded. :param Website site: Site to create controller for """ if site.__class__.__name__ != "ReverseProxy": logger.debug( "Back", "Registering backupcontroller for {0}".format(site.id)) controller = site.app.get_module("backup") or BackupController site.backup = controller(site.id, site.app.icon, site, site.app.version) else: site.backup = None
def _upnp_igd_connect(): logger.debug("TrSv", "Attempting to connect to uPnP IGD") upnpc = miniupnpc.UPnP() upnpc.discoverdelay = 3000 devs = upnpc.discover() if devs == 0: msg = "Failed to connect to uPnP IGD: no devices found" logger.warning("TrSv", msg) return try: upnpc.selectigd() except Exception as e: msg = "Failed to connect to uPnP IGD: {0}" logger.warning("TrSv", msg.format(str(e))) return upnpc
def scan(): """ Retrieve a list of all databases registered with arkOS. :return: Database(s) :rtype: Database or list thereof """ logger.debug("DBas", "Scanning for databases") storage.databases.clear() for x in get_managers(): try: for y in x.get_dbs(): storage.databases[y.id] = y except: continue return storage.databases
def verify_app_dependencies(): """ Verify that any dependent arkOS apps are properly installed/verified. Assigns ``loadable`` and ``error`` properties to all apps in the cache. """ for x in filter(lambda x: x.installed, storage.applications.values()): for dep in x.dependencies: # For each app-type dependency in all installed apps... if dep["type"] == "app": # If the needed app isn't yet installed, put a fail message pre_app = storage.applications.get(dep["package"]) if not pre_app or not pre_app.installed: x.loadable = False x.error = "Depends on {0}, which is not installed"\ .format(dep["name"]) error_str = "*** Verify failed for {0} -- dependent on "\ "{1} which is not installed" error_str = error_str.format(x.name, dep["name"]) logger.debug("Apps", error_str) # Cascade this fail message to all apps in dependency chain for z in get_dependent(x.id, "remove"): z = storage.applications.get(z) if not z: continue z.loadable = False error_str = "Depends on {0}, which cannot be loaded "\ "because {1} is not installed" z.error = error_str.format(x.name, dep["name"]) # Also put fail msg if the app we depended on failed to load elif not pre_app.loadable: x.loadable = False x.error = "Depends on {0}, which also failed"\ .format(dep["name"]) error_str = "*** Verify failed for {0} -- dependent on "\ "{1} which failed to load" error_str = error_str.format(x.name, dep["name"]) logger.debug("Apps", error_str) # Cascade this fail message to all apps in dependency chain for z in get_dependent(x.id, "remove"): z = storage.applications.get(z) if not z: continue z.loadable = False error_str = "Depends on {0}, which cannot be loaded"\ " because {1} failed to load" z.error = error_str.format(x.name, dep["name"])
def scan_shares(): """ Retrieve a list of all file shares registered with arkOS. :return: Share(s) :rtype: Share or list thereof """ storage.shares.clear() for x in get_sharers(): try: for y in x.get_shares(): storage.shares[y.id] = y except Exception as e: logger.warning("Sharers", "Could not get shares for {0}".format(x.name)) logger.debug("Sharers", str(e)) return storage.shares
def verify_dependencies(self): verify, error, to_pacman = True, "", [] # If dependency isn't installed, add it to "to install" list # If it can't be installed, mark the app as not loadable and say why for dep in self.dependencies: if dep["type"] == "system": if (dep["binary"] and not find_executable(dep["binary"])) \ or not pacman.is_installed(dep["package"]): to_pacman.append(dep["package"]) if dep.has_key("internal") and dep["internal"]: error = "Restart required" verify = False if dep["type"] == "python": to_pip = "" if dep["module"]: try: __import__(dep["module"]) except ImportError: to_pip = dep["package"] else: if not python.is_installed(dep["package"]): to_pip = dep["package"] if to_pip: try: logger.debug(" *** Installing %s (via pip)..." % to_pip) python.install(to_pip) except: error = "Couldn't install %s" % to_pip verify = False finally: if dep.has_key("internal") and dep["internal"]: error = "Restart required" verify = False # Execute the "to install" list actions if to_pacman: pacman.refresh() for x in to_pacman: try: logger.debug(" *** Installing %s..." % x) pacman.install(x) except: error = "Couldn't install %s" % x verify = False self.loadable = verify self.error = error return verify
def install(self, install_deps=True, load=True, force=False, message=DefaultMessage()): if self.installed and not force: return signals.emit("apps", "pre_install", self) # Get all apps that this app depends on and install them first deps = get_dependent(self.id, "install") if install_deps and deps: for x in deps: logger.debug("Installing %s (dependency for %s)" % (x, self.name)) message.update("info", "Installing dependencies for %s... (%s)" % (self.name, x)) _install(x, load=load) # Install this app logger.debug("Installing %s" % self.name) message.update("info", "Installing %s..." % self.name) _install(self.id, load=load) verify_app_dependencies() signals.emit("apps", "post_install", self)
def redis(): """Initialize distribution Redis integration.""" paths = ["arkos-redis.service", "arkos-redis.conf"] for x in paths: if not os.path.exists(os.path.join("/usr/share/arkos/redis", x)): raise CLIException( "Template files could not be found. Your installation may " "be corrupted. Please reinstall the `arkos-configs` package.") logger.debug('ctl:init:redis', 'Stopping daemon if exists: arkos-redis') shell("systemctl stop arkos-redis") logger.info('ctl:init:redis', 'Copying files') ruid, rgid = pwd.getpwnam("redis").pw_uid, grp.getgrnam("redis").gr_gid shutil.copy("/usr/share/arkos/redis/arkos-redis.conf", "/etc/arkos-redis.conf") os.chown("/etc/arkos-redis.conf", ruid, rgid) os.chmod("/etc/arkos-redis.conf", 0o660) shutil.copy("/usr/share/arkos/redis/arkos-redis.service", "/usr/lib/systemd/system/arkos-redis.service") os.chmod("/usr/lib/systemd/system/arkos-redis.service", 0o644) if not os.path.exists("/var/lib/arkos-redis"): os.makedirs("/var/lib/arkos-redis") os.chmod("/var/lib/arkos-redis", 0o700) os.chown("/var/lib/arkos-redis", ruid, rgid) logger.info('ctl:init:redis', 'Setting admin password') redis_passwd = random_string(16) with open("/etc/arkos-redis.conf", "r") as f: data = f.read() data = data.replace("%REDISPASS%", redis_passwd) with open("/etc/arkos-redis.conf", "w") as f: f.write(data) secrets.set("redis", redis_passwd) secrets.save() logger.debug('ctl:init:redis', 'Starting daemon: arkos-redis') shell("systemctl daemon-reload") shell("systemctl enable arkos-redis") shell("systemctl start arkos-redis") logger.success('ctl:init:redis', 'Complete')
def add(self, passwd): """ Add the user to LDAP. :param str passwd: user password to set """ try: ldif = conns.LDAP.search_s(self.ldap_id, ldap.SCOPE_BASE, "(objectClass=*)", None) msg = "A user named {0} already exists".format(self.name) raise errors.InvalidConfigError(msg) except ldap.NO_SUCH_OBJECT: pass # Create LDAP user with proper metadata ldif = { "objectClass": [b"mailAccount", b"inetOrgPerson", b"posixAccount"], "givenName": [b(self.first_name)], "sn": [b(self.last_name)] if self.last_name else [b"NONE"], "displayName": [b(self.full_name)], "cn": [b(self.full_name)], "uid": [b(self.name)], "mail": [b(self.name + "@" + self.domain)], "maildrop": [b(self.name)], "userPassword": [b(ldap_sha512_crypt.encrypt(passwd))], "gidNumber": [b"100"], "uidNumber": [b(str(self.uid))], "homeDirectory": [b("/home/" + self.name)], "loginShell": [b"/usr/bin/bash"] } ldif = ldap.modlist.addModlist(ldif) signals.emit("users", "pre_add", self) logger.debug("Roles", "Adding user: {0}".format(self.ldap_id)) conns.LDAP.add_s(self.ldap_id, ldif) modes = ["admin" if self.admin else "", "sudo" if self.sudo else ""] msg = "Setting user modes: {0}".format(", ".join(modes)) logger.debug("Roles", msg) self.update_adminsudo() self.update_samba(passwd) signals.emit("users", "post_add", {"user": self, "passwd": passwd})
def nginx(): """Initialize default nginx configuration.""" if not os.path.exists("/usr/share/arkos/nginx.conf"): raise CLIException( "Template files could not be found. Your installation may " "be corrupted. Please reinstall the `arkos-configs` package.") logger.info('ctl:init:nginx', 'Copying files') if not os.path.exists("/srv/http/webapps"): os.makedirs("/srv/http/webapps") if not os.path.exists("/etc/nginx/sites-available"): os.makedirs("/etc/nginx/sites-available") if not os.path.exists("/etc/nginx/sites-enabled"): os.makedirs("/etc/nginx/sites-enabled") shutil.copy("/usr/share/arkos/nginx.conf", "/etc/nginx/nginx.conf") logger.debug('ctl:init:nginx', 'Restarting daemon: nginx') shell("systemctl enable nginx") shell("systemctl restart nginx") logger.success('ctl:init:nginx', 'Completed')
def uninstall(self, force=False, message=DefaultMessage()): signals.emit("apps", "pre_remove", self) message.update("info", "Uninstalling application...") exclude = ["openssl", "openssh", "nginx", "python2", "git", "nodejs", "npm"] # Make sure this app can be successfully removed, and if so also remove # any system-level packages that *only* this app requires for x in get(installed=True): for item in x.dependencies: if item["type"] == "app" and item["package"] == self.id and not force: raise Exception("Cannot remove, %s depends on this application" % x.name) elif item["type"] == "system": exclude.append(item["package"]) # Stop any running services associated with this app for item in self.dependencies: if item["type"] == "system" and not item["package"] in exclude: if item.has_key("daemon") and item["daemon"]: services.stop(item["daemon"]) services.disable(item["daemon"]) pacman.remove([item["package"]], purge=config.get("apps", "purge", False)) logger.debug("Uninstalling %s" % self.name) # Remove the app's directory and cleanup the app object shutil.rmtree(os.path.join(config.get("apps", "app_dir"), self.id)) self.loadable = False self.installed = False # Regenerate the firewall and re-block the abandoned ports regen_fw = False for x in self.services: if x["ports"]: regen_fw = True if regen_fw: tracked_services.deregister(self.id) signals.emit("apps", "post_remove", self)
def verify_dependencies(self, cry, installed): """ Verify that the associated dependencies are all properly installed. Checks system-level packages, Python packages and arkOS Apps for installed status. Sets ``self.loadable`` with verify status and ``self.error`` with error message encountered on check. :returns: True if all verify checks passed :rtype: bool """ verify, error = True, "" # If dependency isn't installed, add it to "to install" list # If it can't be installed, mark the app as not loadable and say why if not installed: pacman.refresh() installed["sys"] = pacman.get_installed() installed["py"] = python.get_installed() installed["py2"] = python.get_installed(py2=True) installed["rb"] = ruby.get_installed() for dep in self.dependencies: if dep["type"] == "system": pack = next( filter(lambda x: x["id"] == dep["package"], installed["sys"]), None ) invalid_ver = False if pack and dep.get("version"): invalid_ver = compare_versions( pack["version"], "lt", dep["version"] ) if not pack or invalid_ver: logger.debug( "Apps", "{0} not found. Attempting install..." .format(dep["package"])) try: pacman.install(dep["package"]) except: error = "Couldn't install {0}".format(dep["package"]) verify = False if cry: raise AppDependencyError(dep["package"], "system") if dep.get("internal"): error = "Reload required" verify = False if dep["type"] == "python": ilist = installed["py2"] if dep.get("py2") else installed["py"] pack = next( filter(lambda x: x["id"].lower() == dep["package"].lower(), ilist), None ) invalid_ver = False if pack and dep.get("version"): invalid_ver = compare_versions( pack["version"], "lt", dep["version"] ) if not pack or invalid_ver: logger.debug( "Apps", "{0} not found. Attempting install..." .format(dep["package"])) try: python.install( dep["package"], version=dep.get("version"), py2=True if dep.get("py2") else False ) except: error = "Couldn't install {0}".format(dep["package"]) verify = False if cry: raise AppDependencyError(dep["package"], "python") if dep.get("internal"): error = "Reload required" verify = False if dep["type"] == "ruby": pack = next( filter(lambda x: x["id"] == dep["package"], installed["rb"]), None ) invalid_ver = False if pack and dep.get("version"): invalid_ver = compare_versions( pack["version"], "lt", dep["version"] ) if not pack or invalid_ver: logger.debug( "Apps", "{0} not found. Attempting install..." .format(dep["package"])) try: ruby.install( dep["package"], version=dep.get("version") ) except: error = "Couldn't install {0}".format(dep["package"]) verify = False if cry: raise AppDependencyError(dep["package"], "ruby") if dep.get("internal"): error = "Reload required" verify = False self.loadable = verify self.error = error return verify
def scan(verify=True, cry=True): """ Search app directory for applications, load them and store metadata. Also contacts arkOS repo servers to obtain current list of available apps, and merges in any updates as necessary. :param bool verify: Verify app dependencies as the apps are scanned :param bool cry: Raise exception on dependency install failure? :return: list of Application objects :rtype: list """ signals.emit("apps", "pre_scan") logger.debug("Apps", "Scanning for applications") app_dir = config.get("apps", "app_dir") if not os.path.exists(app_dir): os.makedirs(app_dir) pacman.refresh() logger.debug("Apps", "Getting system/python/ruby installed list") inst_list = { "sys": pacman.get_installed(), "py": python.get_installed(), "py2": python.get_installed(py2=True), "rb": ruby.get_installed() } # Get paths for installed apps, metadata for available ones installed_apps = [x for x in os.listdir(app_dir) if not x.startswith(".")] api_url = ("https://{0}/api/v1/apps" .format(config.get("general", "repo_server"))) logger.debug("Apps", "Fetching available apps: {0}".format(api_url)) try: available_apps = api(api_url) except Exception as e: available_apps = [] logger.error("Apps", "Could not get available apps from GRM.") logger.error("Apps", str(e)) if available_apps: available_apps = available_apps["applications"] else: available_apps = [] # Create objects for installed apps with appropriate metadata for x in installed_apps: try: with open(os.path.join(app_dir, x, "manifest.json"), "r") as f: data = json.loads(f.read()) except ValueError: warn_str = "Failed to load {0} due to a JSON parsing error" logger.warning("Apps", warn_str.format(x)) continue except IOError: warn_str = "Failed to load {0}: manifest file inaccessible "\ "or not present" logger.warning("Apps", warn_str.format(x)) continue logger.debug("Apps", " *** Loading {0}".format(data["id"])) app = App(**data) app.installed = True for y in enumerate(available_apps): if app.id == y[1]["id"] and app.version != y[1]["version"]: app.upgradable = y[1]["version"] if app.id == y[1]["id"]: app.assets = y[1]["assets"] available_apps[y[0]]["installed"] = True app.load(verify=verify, cry=cry, installed=inst_list) storage.applications[app.id] = app # Convert available apps payload to objects for x in available_apps: if not x.get("installed"): app = App(**x) app.installed = False storage.applications[app.id] = app if verify: verify_app_dependencies() signals.emit("apps", "post_scan") return storage.applications
def scan(): """Search website directories for sites, load them and store metadata.""" from arkos import certificates logger.debug("Webs", "Scanning for websites") for x in os.listdir("/etc/nginx/sites-available"): path = os.path.join("/srv/http/webapps", x) if not os.path.exists(path): continue # Read metadata meta = configparser.SafeConfigParser() if not meta.read(os.path.join(path, ".arkos")): continue # Create the proper type of website object app = None app_type = meta.get("website", "app") app = applications.get(app_type) if app and app.type == "website": # If it's a regular website, initialize its class, metadata, etc if not app or not app.loadable or not app.installed: logger.debug( "Webs", "Website found but could not be loaded: {0}".format( meta.get("website", "id"))) continue site = app._website(id=meta.get("website", "id")) site.app = app site.data_path = (meta.get("website", "data_path") or "") \ if meta.has_option("website", "data_path") else "" site.db = databases.get(site.id) \ if meta.has_option("website", "dbengine") else None elif app: # If it's a reverse proxy, follow a simplified procedure site = ReverseProxy(id=meta.get("website", "id")) site.app = app else: # Unknown website type. logger.debug( "Webs", "Unknown website found and ignoring, id {0}".format( meta.get("website", "id"))) continue certname = meta.get("website", "ssl", fallback="None") site.cert = certificates.get(certname) if certname != "None" else None if site.cert: site.cert.assigns.append({ "type": "website", "id": site.id, "name": site.id if site.app else site.name }) site.version = meta.get("website", "version", fallback=None) site.enabled = os.path.exists( os.path.join("/etc/nginx/sites-enabled", x)) site.installed = True # Load the proper nginx serverblock and get more data try: block = nginx.loadf(os.path.join("/etc/nginx/sites-available", x)) for y in block.servers: if "ssl" in y.filter("Key", "listen")[0].value: site.ssl = True server = y break else: server = block.server port_regex = re.compile("(\\d+)\s*(.*)") listen = server.filter("Key", "listen")[0].value.lstrip("[::]:") site.port = int(re.match(port_regex, listen).group(1)) site.domain = server.filter("Key", "server_name")[0].value site.path = server.filter("Key", "root")[0].value site.php = "php" in server.filter("Key", "index")[0].value except IndexError: pass storage.websites[site.id] = site signals.emit("websites", "site_loaded", site) return storage.websites
def info(self, message): logger.debug("WSGI", message)
def debug(self, message): logger.debug("WSGI", message)
def run_daemon(environment, config_file, secrets_file, policies_file, debug): """Run the Kraken server daemon.""" app.debug = debug or environment in ["dev", "vagrant"] app.config["SECRET_KEY"] = random_string() # Open and load configuraton config = arkos.init(config_file, secrets_file, policies_file, app.debug, environment in ["dev", "vagrant"], app.logger) storage.connect() if environment not in ["dev", "vagrant"]: filehdlr = RotatingFileHandler( '/var/log/kraken.log', maxBytes=2097152, backupCount=5 ) st = "{asctime} [{cls}] [{levelname}] {comp}: {message}" filehdlr.setLevel(logging.DEBUG if app.debug else logging.INFO) filehdlr.setFormatter(FileFormatter(st)) logger.logger.addHandler(filehdlr) apihdlr = APIHandler() apihdlr.setLevel(logging.DEBUG if app.debug else logging.INFO) apihdlr.addFilter(NotificationFilter()) logger.logger.addHandler(apihdlr) logger.info("Init", "arkOS Kraken {0}".format(arkos.version)) if environment in ["dev", "vagrant"]: logger.debug("Init", "*** TEST MODE ***") logger.info("Init", "Using config file at {0}".format(config.filename)) app.conf = config arch = config.get("enviro", "arch", "Unknown") board = config.get("enviro", "board", "Unknown") platform = detect_platform() hwstr = "Detected architecture/hardware: {0}, {1}" logger.info("Init", hwstr.format(arch, board)) logger.info("Init", "Detected platform: {0}".format(platform)) logger.info("Init", "Environment: {0}".format(environment)) config.set("enviro", "run", environment) for code in list(default_exceptions.keys()): app.register_error_handler(code, make_json_error) app.register_blueprint(auth.backend) logger.info("Init", "Loading applications and scanning system...") arkos.initial_scans() # Load framework blueprints logger.info("Init", "Loading frameworks...") register_frameworks(app) logger.info("Init", "Initializing Genesis (if present)...") app.register_blueprint(genesis.backend) hasgen = genesis.verify_genesis() if not hasgen: errmsg = ("A compiled distribution of Genesis was not found. " "Kraken will finish loading but you may not be able to " "access the Web interface.") logger.warning("Init", errmsg) app.after_request(add_cors_to_response) logger.info("Init", "Server is up and ready") try: import eventlet pubsub = storage.redis.pubsub(ignore_subscribe_messages=True) pubsub.subscribe(["arkos:notifications", "arkos:records:push", "arkos:records:purge"]) eventlet.spawn(handle_pubsub, pubsub, socketio) eventlet_socket = eventlet.listen( (config.get("genesis", "host"), config.get("genesis", "port")) ) if config.get("genesis", "ssl", False): eventlet_socket = eventlet.wrap_ssl( eventlet_socket, certfile=config.get("genesis", "cert_file"), keyfile=config.get("genesis", "cert_key"), ssl_version=ssl.PROTOCOL_TLSv1_2, server_side=True) eventlet.wsgi.server( eventlet_socket, app, log=WSGILogWrapper(), log_format=('%(client_ip)s - "%(request_line)s" %(status_code)s ' '%(body_length)s %(wall_seconds).6f')) except KeyboardInterrupt: logger.info("Init", "Received interrupt") raise
def ldap(): """Initialize distribution copy of OpenLDAP.""" paths = ["slapd.conf", "ldap.conf", "base.ldif"] for x in paths: if not os.path.exists(os.path.join("/usr/share/arkos/openldap", x)): raise CLIException( "Template files could not be found. Your installation may " "be corrupted. Please reinstall the `arkos-configs` package.") logger.debug('ctl:init:ldap', 'Stopping daemon: slapd') s = shell("systemctl stop slapd") if s["code"] != 0: raise click.ClickException(s["stderr"].decode()) logger.info('ctl:init:ldap', 'Cleaning up old LDAP database') if os.path.exists("/etc/openldap/slapd.ldif"): os.unlink("/etc/openldap/slapd.ldif") slapdir = "/etc/openldap/slapd.d" for x in os.listdir(slapdir): fpath = os.path.join(slapdir, x) if os.path.isdir(fpath): shutil.rmtree(fpath) else: os.unlink(fpath) logger.info('ctl:init:ldap', 'Installing initial configuration') shutil.copy("/usr/share/arkos/openldap/slapd.conf", "/etc/openldap/slapd.conf") shutil.copy("/usr/share/arkos/openldap/ldap.conf", "/etc/openldap/ldap.conf") if os.path.exists("/usr/share/doc/sudo/schema.OpenLDAP"): shutil.copy("/usr/share/doc/sudo/schema.OpenLDAP", "/etc/openldap/schema/sudo.schema") shutil.copy("/usr/share/arkos/openldap/mailserver.schema", "/etc/openldap/schema/mailserver.schema") shutil.copy("/usr/share/arkos/openldap/samba.schema", "/etc/openldap/schema/samba.schema") logger.info('ctl:init:ldap', 'Setting admin password') ldap_passwd = random_string(16) ldap_pwhash = ldap_sha512_crypt.encrypt(ldap_passwd) with open("/etc/openldap/slapd.conf", "r") as f: data = f.read() data = data.replace("%ROOTPW%", ldap_pwhash) with open("/etc/openldap/slapd.conf", "w") as f: f.write(data) secrets.set("ldap", ldap_passwd) secrets.save() logger.info('ctl:init:ldap', 'Generating new LDAP database') logger.debug('ctl:init:ldap', 'slapadd slapd.conf') shell("slapadd -f /etc/openldap/slapd.conf -F /etc/openldap/slapd.d/", stdin="") logger.debug('ctl:init:ldap', 'slaptest') shell("slaptest -f /etc/openldap/slapd.conf -F /etc/openldap/slapd.d/") luid, lgid = pwd.getpwnam("ldap").pw_uid, grp.getgrnam("ldap").gr_gid for r, d, f in os.walk("/etc/openldap/slapd.d"): for x in d: os.chown(os.path.join(r, x), luid, lgid) for x in f: os.chown(os.path.join(r, x), luid, lgid) logger.debug('ctl:init:ldap', 'slapindex') shell("slapindex") logger.debug('ctl:init:ldap', 'slapadd base.ldif') shell("slapadd -l /usr/share/arkos/openldap/base.ldif") for r, d, f in os.walk("/var/lib/openldap/openldap-data"): for x in d: os.chown(os.path.join(r, x), luid, lgid) for x in f: os.chown(os.path.join(r, x), luid, lgid) logger.debug('ctl:init:ldap', 'Restarting daemon: slapd') shell("systemctl enable slapd") shell("systemctl restart slapd") logger.success('ctl:init:ldap', 'Complete')