def initialize(): """Initialize security policy tracking.""" logger.debug("TrSv", "Initializing security policy tracking") # arkOS policy = policies.get("arkos", "arkos", 2) port = [("tcp", int(config.get("genesis", "port")))] pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)", "server", port, policy) storage.policies[pol.id] = pol # uPNP policy = policies.get("arkos", "upnp", 1) pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms", "server", [("udp", 1900)], policy) if config.get("general", "enable_upnp"): storage.policies[pol.id] = pol # SSHd policy = policies.get("arkos", "sshd", 1) pol = SecurityPolicy("arkos", "sshd", "SSH", "server", [("tcp", 22)], policy) # ACME dummies for x in glob.glob("/etc/nginx/sites-enabled/acme-*"): acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1] pol = SecurityPolicy("acme", acme_name, "{0} (ACME Validation)".format(acme_name), "globe", [('tcp', 80)], 2) storage.policies[pol.id] = pol for x in policies.get_all("custom"): pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"], x["ports"], x["policy"]) storage.policies[pol.id] = pol
def scan_authorities(): """ Search proper directory for certificates, load them and store metadata. :return: list of CertificateAuthority objects :rtype: list """ logger.debug("Crts", "Scanning for certificate authorities") storage.certificate_authorities.clear() ca_cert_dir = config.get("certificates", "ca_cert_dir") ca_key_dir = config.get("certificates", "ca_key_dir") if not os.path.exists(ca_cert_dir): os.makedirs(ca_cert_dir) if not os.path.exists(ca_key_dir): os.makedirs(ca_key_dir) for x in glob.glob(os.path.join(ca_cert_dir, "*.pem")): id = os.path.splitext(os.path.split(x)[1])[0] with open(x, "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) key_path = os.path.join(ca_key_dir, "{0}.key".format(id)) with open(key_path, "rb") as f: with open(key_path, "rb") as f: key = serialization.load_pem_private_key( f.read(), password=None, backend=default_backend() ) sha1 = binascii.hexlify(cert.fingerprint(hashes.SHA1())).decode() md5 = binascii.hexlify(cert.fingerprint(hashes.MD5())).decode() kt = "RSA" if isinstance(key.public_key(), rsa.RSAPublicKey) else "DSA" ca = CertificateAuthority(id, x, key_path, cert.not_valid_after, kt, key.key_size, sha1, md5) storage.certificate_authorities[id] = ca return storage.certificate_authorities
def generate_authority(domain): ca = CertificateAuthority(id=domain, cert_path=os.path.join(config.get("certificates", "ca_cert_dir"), domain+".pem"), key_path=os.path.join(config.get("certificates", "ca_key_dir"), domain+".key")) # Generate private key and create X509 certificate, then set options key = OpenSSL.crypto.PKey() key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048) crt = OpenSSL.crypto.X509() crt.set_version(3) crt.set_serial_number(int(systemtime.get_serial_time())) crt.get_subject().O = "arkOS Servers" crt.get_subject().CN = domain crt.gmtime_adj_notBefore(0) crt.gmtime_adj_notAfter(5*365*24*60*60) crt.set_issuer(crt.get_subject()) crt.set_pubkey(key) crt.add_extensions([ OpenSSL.crypto.X509Extension("basicConstraints", True, "CA:TRUE, pathlen:0"), OpenSSL.crypto.X509Extension("keyUsage", True, "keyCertSign, cRLSign"), OpenSSL.crypto.X509Extension("subjectKeyIdentifier", False, "hash", subject=crt), ]) crt.sign(key, "sha256") # Save to files with open(ca.cert_path, "wt") as f: f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, crt)) os.chmod(ca.cert_path, 0660) with open(ca.key_path, "wt") as f: f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)) ca.expiry = crt.get_notAfter() storage.certs.add("authorities", ca) return ca
def make_json_error(err): """Prepare a standardized error report.""" if hasattr(err, "description"): message = err.description else: message = str(err) if (isinstance(err, HTTPException) and err.code == 500)\ or not isinstance(err, HTTPException): pyver = [str(x) for x in platform.python_version_tuple()] apps = arkos_storage.applications.values() apps = [x.id for x in apps if x.installed] stacktrace = traceback.format_exc() report = "arkOS {0} Crash Report\n".format(version) report += "--------------------\n\n" report += "Running in {0}\n".format(config.get("enviro", "run")) report += "System: {0}\n".format(shell("uname -a")["stdout"].decode()) report += "Platform: {0} {1}\n".format(config.get("enviro", "arch"), config.get("enviro", "board")) report += "Python version {0}\n".format('.'.join(pyver)) report += "Config path: {0}\n\n".format(config.filename) report += "Loaded applicatons: \n{0}\n\n".format("\n".join(apps)) report += "Request: {0} {1}\n\n".format(request.method, request.path) report += stacktrace response = jsonify(errors={"msg": message, "stack": stacktrace, "report": report, "version": version, "arch": config.get("enviro", "arch")}) logger.critical("Unknown", stacktrace) else: response = jsonify(errors={"msg": message}) response.status_code = err.code if isinstance(err, HTTPException) else 500 return add_cors_to_response(response)
def create(self, mount=False): vdisk_dir = config.get("filesystems", "vdisk_dir") if not os.path.exists(os.path.join(config.get("filesystems", "vdisk_dir"))): os.mkdir(os.path.join(config.get("filesystems", "vdisk_dir"))) self.path = str(os.path.join(vdisk_dir, self.id+".img")) if os.path.exists(self.path): raise Exception("This virtual disk already exists") signals.emit("filesystems", "pre_add", self) # Create an empty file matching disk size with open(self.path, "wb") as f: written = 0 with file("/dev/zero", "r") as zero: while self.size > written: written += 1024 f.write(zero.read(1024)) # Get a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Make a filesystem s = shell("mkfs.ext4 %s" % loop.device) if s["code"] != 0: raise Exception("Failed to format loop device: %s" % s["stderr"]) loop.unmount() signals.emit("filesystems", "pre_add", self) if mount: self.mount()
def create_token(user): """ Create a JSON Web Token (JWT) for the specified user. :param User user: an arkOS user :returns: JSON Web Token (JWT) :rtype: str """ iat = systemtime.get_unix_time() try: offset = systemtime.get_offset() if offset < -3600 or offset > 3600: systemtime.set_datetime() iat = systemtime.get_unix_time() except: twarn = ("System time is not accurate or could not be verified." " Access tokens will not expire.") logger.warning("System", twarn) iat = None payload = { "uid": user.name, "ufn": user.first_name, "uln": user.last_name, } if iat: payload["iat"] = iat payload["exp"] = iat + config.get("genesis", "token_valid_for", 3600) tjwss = TimedJSONWebSignatureSerializer( secret_key=current_app.config["SECRET_KEY"], expires_in=config.get("genesis", "token_valid_for", 3600), algorithm_name="HS256") return tjwss.dumps(payload).decode("utf-8")
def encrypt(self, passwd, cipher="", keysize=0, mount=False): cipher = cipher or config.get("filesystems", "cipher") or "aes-xts-plain64" keysize = keysize or config.get("filesystems", "keysize") or 256 os.rename(self.path, os.path.join(config.get("filesystems", "vdisk_dir"), self.id+".crypt")) self.path = os.path.join(config.get("filesystems", "vdisk_dir"), self.id+".crypt") # Find an open loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Encrypt the file inside the loopback and mount s = crypto.luks_format(loop.device, passwd, cipher, int(keysize)) if s != 0: loop.unmount() os.rename(self.path, os.path.join(config.get("filesystems", "vdisk_dir"), self.id+".img")) raise Exception("Failed to encrypt %s with errno %s"%(self.id, str(s))) s = crypto.luks_open(loop.device, self.id, passwd) if s != 0: loop.unmount() raise Exception("Failed to decrypt %s with errno %s"%(self.id, str(s))) # Create a filesystem inside the encrypted device s = shell("mkfs.ext4 /dev/mapper/%s" % self.id) crypto.luks_close(self.id) loop.unmount() if s["code"] != 0: raise Exception("Failed to format loop device: %s" % s["stderr"]) self.crypt = True if mount: self.mount(passwd)
def check_updates(): """Check for updates from arkOS repo server.""" updates = [] gpg = gnupg.GPG() server = config.get("general", "repo_server") current = config.get("updates", "current_update") # Fetch updates from registry server api_url = "https://{0}/api/v1/updates/{1}" data = api(api_url.format(server, str(current)), crit=True) for x in data["updates"]: ustr, u = str(x["tasks"]), json.loads(x["tasks"]) # Get the update signature and test it sig_url = "https://{0}/api/v1/signatures/{1}" sig = api(sig_url.format(server, x["id"]), returns="raw", crit=True) with open("/tmp/{0}.sig".format(x["id"]), "w") as f: f.write(sig) v = gpg.verify_data("/tmp/{0}.sig".format(x["id"]), ustr) if v.trust_level is None: err_str = "Update {0} signature verification failed" logger.error("Updates", err_str.format(x["id"])) break else: data = { "id": x["id"], "name": x["name"], "date": x["date"], "info": x["info"], "tasks": u } updates.append(data) storage.updates = {x.id: x for x in updates} return updates
def uninstall(self, force=False, nthread=NotificationThread()): """ Uninstall the arkOS application from the system. :param bool force: Uninstall the app even if others depend on it? :param NotificationThread nthread: notification thread to use """ signals.emit("apps", "pre_remove", self) msg = "Uninstalling application..." nthread.update(Notification("info", "Apps", msg)) exclude = ["openssl", "openssh", "nginx", "python2", "git", "nodejs", "npm"] # Make sure this app can be successfully removed, and if so also remove # any system-level packages that *only* this app requires for x in get(installed=True): for item in x.dependencies: if item["type"] == "app" and item["package"] == self.id \ and not force: exc_str = "{0} depends on this application" raise errors.InvalidConfigError(exc_str.format(x.name)) elif item["type"] == "system": exclude.append(item["package"]) # Stop any running services associated with this app for item in self.dependencies: if item["type"] == "system" and not item["package"] in exclude: if item.get("daemon"): try: services.get(item["daemon"]).stop() services.get(item["daemon"]).disable() except: pass pacman.remove([item["package"]], purge=config.get("apps", "purge")) # Remove the app's directory and cleanup the app object shutil.rmtree(os.path.join(config.get("apps", "app_dir"), self.id)) self.loadable = False self.installed = False # Regenerate the firewall and re-block the abandoned ports regen_fw = False for x in self.services: if x["ports"]: regen_fw = True if regen_fw: tracked_services.deregister(self.id) ports = [] for s in self.services: if s.get("default_policy", 0) and s["ports"]: ports.append(s["ports"]) if ports and config.get("general", "enable_upnp"): tracked_services.close_all_upnp(ports) smsg = "{0} uninstalled successfully".format(self.name) nthread.complete(Notification("success", "Apps", smsg)) signals.emit("apps", "post_remove", self)
def show_version(): """Show version and diagnostic details""" click.echo(shell("uname -a")["stdout"].decode().rstrip("\n")) click.echo( click.style(" * arkOS server version: ", fg="yellow") + config.get("enviro", "version", "Unknown")) click.echo( click.style(" * Arch / Board: ", fg="yellow") + config.get("enviro", "arch", "Unknown") + " / " + config.get("enviro", "board", "Unknown"))
def scan(): certs, assigns = [], {} if config.get("genesis", "ssl"): ssl = os.path.splitext(os.path.basename(config.get("genesis", "cert_file", "")))[0] if ssl and assigns.has_key(ssl): assigns[ssl].append({"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}) elif ssl: assigns[ssl] = [{"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}] for x in applications.get(installed=True): if hasattr(x, "ssl"): for ssl, data in x.ssl.get_ssl_assigned(): if assigns.has_key(ssl): assigns[ssl] += data else: assigns[ssl] = [] assigns[ssl].append(data) if not os.path.exists(config.get("certificates", "cert_dir")): os.makedirs(config.get("certificates", "cert_dir")) if not os.path.exists(config.get("certificates", "key_dir")): os.makedirs(config.get("certificates", "key_dir")) for x in glob.glob(os.path.join(config.get("certificates", "cert_dir"), "*.crt")): id = os.path.splitext(os.path.basename(x))[0] with open(x, "r") as f: crt = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read()) with open(os.path.join(config.get("certificates", "key_dir"), id+".key"), "r") as f: key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, f.read()) sha1, md5 = get_cert_hashes(crt) c = Certificate(id=id, cert_path=x, key_path=os.path.join(config.get("certificates", "key_dir"), id+".key"), keytype="RSA" if key.type() == OpenSSL.crypto.TYPE_RSA else ("DSA" if key.type() == OpenSSL.crypto.TYPE_DSA else "Unknown"), keylength=int(key.bits()), domain=crt.get_subject().CN, assigns=assigns.get(id) or [], expiry=crt.get_notAfter(), sha1=sha1, md5=md5) certs.append(c) storage.certs.set("certificates", certs) return certs
def ssl_enable(self): # Get server-preferred ciphers if config.get("certificates", "ciphers"): ciphers = config.get("certificates", "ciphers") else: config.set("certificates", "ciphers", ciphers) config.save() block = nginx.loadf(os.path.join("/etc/nginx/sites-available/", self.id)) # If the site is on port 80, setup an HTTP redirect to new port 443 server = block.servers[0] listen = server.filter("Key", "listen")[0] if listen.value == "80": listen.value = "443 ssl" block.add(nginx.Server( nginx.Key("listen", "80"), nginx.Key("server_name", self.addr), nginx.Key("return", "301 https://%s$request_uri" % self.addr) )) for x in block.servers: if x.filter("Key", "listen")[0].value == "443 ssl": server = x break else: listen.value = listen.value.split(" ssl")[0] + " ssl" # Clean up any pre-existing SSL directives that no longer apply for x in server.all(): if type(x) == nginx.Key and x.name.startswith("ssl_"): server.remove(x) # Add the necessary SSL directives to the serverblock and save server.add( nginx.Key("ssl_certificate", self.cert.cert_path), nginx.Key("ssl_certificate_key", self.cert.key_path), nginx.Key("ssl_protocols", "TLSv1 TLSv1.1 TLSv1.2"), nginx.Key("ssl_ciphers", ciphers), nginx.Key("ssl_session_timeout", "5m"), nginx.Key("ssl_prefer_server_ciphers", "on"), nginx.Key("ssl_dhparam", "/etc/arkos/ssl/dh_params.pem"), nginx.Key("ssl_session_cache", "shared:SSL:50m"), ) nginx.dumpf(block, os.path.join("/etc/nginx/sites-available/", self.id)) # Set the certificate name in the metadata file meta = ConfigParser.SafeConfigParser() meta.read(os.path.join(self.path, ".arkos")) meta.set("website", "ssl", self.cert.id) with open(os.path.join(self.path, ".arkos"), "w") as f: meta.write(f) # Call the website type's SSL enable hook self.enable_ssl(self.cert.cert_path, self.cert.key_path)
def connect(self): """Connect to Redis server.""" try: self.redis = redis.Redis(db=config.get("genesis", "redis_db", 0), port=config.get("genesis", "redis_port", 6380), password=secrets.get("redis")) self.redis.ping() self.redis.flushdb() except redis.exceptions.ConnectionError: raise ConnectionError("arkOS Redis")
def verify_genesis(): if config.get("enviro", "run") == "vagrant": vpath = '/home/vagrant/genesis/dist' elif config.get("enviro", "run") == "dev": vpath = os.path.dirname(os.path.realpath(__file__)) vpath = os.path.abspath(os.path.join(vpath, '../../genesis/dist')) else: vpath = '/var/lib/arkos/genesis' if not os.path.exists(vpath): return False return True
def scan(verify=True): signals.emit("apps", "pre_scan") app_dir = config.get("apps", "app_dir") apps = [] if not os.path.exists(app_dir): os.makedirs(app_dir) # Get paths for installed apps, metadata for available ones installed_apps = [x for x in os.listdir(app_dir) if not x.startswith(".")] available_apps = api("https://%s/api/v1/apps" % config.get("general", "repo_server"), crit=False) if available_apps: available_apps = available_apps["applications"] else: available_apps = [] # Create objects for installed apps with appropriate metadata for x in installed_apps: try: with open(os.path.join(app_dir, x, "manifest.json"), "r") as f: data = json.loads(f.read()) except ValueError: logger.warn("Failed to load %s due to a JSON parsing error" % x) continue except IOError: logger.warn("Failed to load %s: manifest file inaccessible or not present" % x) continue logger.debug(" *** Loading %s" % data["id"]) app = App(**data) app.installed = True for y in enumerate(available_apps): if app.id == y[1]["id"] and app.version != y[1]["version"]: app.upgradable = y[1]["version"] if app.id == y[1]["id"]: app.assets = y[1]["assets"] available_apps[y[0]]["installed"] = True app.load() apps.append(app) # Convert available apps payload to objects for x in available_apps: if not x.get("installed"): app = App(**x) app.installed = False apps.append(app) storage.apps.set("applications", apps) if verify: verify_app_dependencies() signals.emit("apps", "post_scan") return storage.apps.get("applications")
def load(self, verify=True): try: signals.emit("apps", "pre_load", self) if verify: self.verify_dependencies() # Load the application module into Python imp.load_module(self.id, *imp.find_module(self.id, [os.path.join(config.get("apps", "app_dir"))])) # Get module and its important classes and track them on this object for module in self.modules: submod = imp.load_module("%s.%s" % (self.id, module), *imp.find_module(module, [os.path.join(config.get("apps", "app_dir"), self.id)])) classes = inspect.getmembers(submod, inspect.isclass) mgr = None for y in classes: if y[0] in ["DatabaseManager", "Site", "BackupController"]: mgr = y[1] break logger.debug(" *** Registering %s module on %s" % (module, self.id)) if module == "database": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_database_mgr", y[1]) elif module == "website": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_website", y[1]) elif module == "backup": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_backup", y[1]) elif module == "api": if hasattr(self, "_backend"): setattr(submod, self.id, self._backend) setattr(self, "_api", submod) elif module == "ssl": self.ssl = submod else: setattr(self, "_%s" % module, submod) # Set up tracking of ports associated with this app for s in self.services: if s["ports"]: tracked_services.register(self.id, s["binary"], s["name"], self.icon, s["ports"], default_policy=s.get("default_policy", 2), fw=False) signals.emit("apps", "post_load", self) except Exception, e: self.loadable = False self.error = "Module error: %s" % str(e) logger.warn("Failed to load %s -- %s" % (self.name, str(e)))
def genesis(path): if config.get("enviro", "run") == "vagrant": if os.path.exists('/home/vagrant/genesis/dist'): return send_from_directory('/home/vagrant/genesis/dist', path or 'index.html') elif config.get("enviro", "run") == "dev": sdir = os.path.dirname(os.path.realpath(__file__)) sdir = os.path.abspath(os.path.join(sdir, '../../genesis/dist')) return send_from_directory(sdir, path or 'index.html') elif os.path.exists('/var/lib/arkos/genesis/dist'): return send_from_directory('/var/lib/arkos/genesis/dist', path or 'index.html') else: resp = jsonify(message="Genesis does not appear to be installed.") resp.status_code = 500 return resp
def open_upnp_site(site): """Convenience function to register a website with uPnP.""" if config.get("general", "enable_upnp"): open_upnp(("tcp", site.port)) domain = site.domain if domain == "localhost" or domain.endswith(".local"): domain = None try: test_port(config.get("general", "repo_server"), site.port, domain) except: msg = ("Port {0} and/or domain {1} could not be tested." " Make sure your ports are properly forwarded and" " that your domain is properly set up.")\ .format(site.port, site.domain) Notification("error", "TrSv", msg).send()
def scan(): """ Search proper directory for certificates, load them and store metadata. :return: list of Certificate objects :rtype: list """ logger.debug("Crts", "Scanning for certificates") assigns = {} if config.get("genesis", "ssl"): gen_cert = config.get("genesis", "cert_file") ssl = os.path.splitext(os.path.basename(gen_cert))[0] if ssl and ssl in assigns: assigns[ssl].append({"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}) elif ssl: assigns[ssl] = [{"type": "genesis", "id": "genesis", "name": "arkOS Genesis/API"}] for x in applications.get(installed=True): if hasattr(x, "ssl"): for ssl, data in x.ssl.get_ssl_assigned(): if ssl in assigns: assigns[ssl] += data else: assigns[ssl] = [] assigns[ssl].append(data) if not os.path.exists(config.get("certificates", "cert_dir")): os.makedirs(config.get("certificates", "cert_dir")) if not os.path.exists(config.get("certificates", "key_dir")): os.makedirs(config.get("certificates", "key_dir")) storage.certificates.clear() cert_glob = os.path.join(config.get("certificates", "cert_dir"), "*.crt") for cert_path in glob.glob(cert_glob): id = os.path.splitext(os.path.basename(cert_path))[0] key_path = os.path.join( config.get("certificates", "key_dir"), "{0}.key".format(id)) storage.certificates[id] = \ _scan_a_cert(id, cert_path, key_path, assigns) acmedir = config.get("certificates", "acme_dir") if not os.path.exists(acmedir): os.makedirs(acmedir) le_cert_glob = os.path.join(acmedir, "*/cert.pem") for cert_path in glob.glob(le_cert_glob): basedir = os.path.dirname(cert_path) id = os.path.basename(basedir) key_path = os.path.join(basedir, "privkey.pem") storage.certificates[id] = \ _scan_a_cert(id, cert_path, key_path, assigns, True) return storage.certificates
def verify(): if config.get("genesis", "anonymous"): return token = request.headers.get("Authorization", None) if not token: resp = jsonify(message="Authorization required") resp.status_code = 401 return resp token = token.split() if token[0] != "Bearer" or len(token) > 2: resp = jsonify(message="Malformed token") resp.status_code = 400 return resp token = token[1] try: tjwss = TimedJSONWebSignatureSerializer(secret_key=current_app.config["SECRET_KEY"], expires_in=3600, algorithm_name="HS256") payload = tjwss.loads(token) except SignatureExpired: resp = jsonify(message="Token expired") resp.status_code = 401 return resp except BadSignature: resp = jsonify(message="Malformed token signature") resp.status_code = 401 return resp user = users.get(name=payload["uid"]) if not user or not user.admin: resp = jsonify(message="Authorization required") resp.status_code = 401 return resp
def create_acme_dummy(domain): """ Create a dummy directory to use for serving ACME challenge data. This function is used when no website yet exists for the desired domain. :param str domain: Domain name to use :returns: Path to directory for challenge data """ site_dir = os.path.join(config.get("websites", "site_dir"), "acme-" + domain) challenge_dir = os.path.join(site_dir, ".well-known/acme-challenge") conf = nginx.Conf( nginx.Server( nginx.Key("listen", "80"), nginx.Key("listen", "[::]:80"), nginx.Key("server_name", domain), nginx.Key("root", site_dir), nginx.Location("/.well-known/acme-challenge/", nginx.Key("root", site_dir)))) origin = os.path.join("/etc/nginx/sites-available", "acme-" + domain) target = os.path.join("/etc/nginx/sites-enabled", "acme-" + domain) uid = users.get_system("http").uid nginx.dumpf(conf, origin) if not os.path.exists(target): os.symlink(origin, target) if not os.path.exists(challenge_dir): os.makedirs(challenge_dir) os.chown(site_dir, uid, -1) os.chown(os.path.join(site_dir, ".well-known"), uid, -1) os.chown(challenge_dir, uid, -1) tracked_services.register("acme", domain, domain + "(ACME Validation)", "globe", [('tcp', 80)], 2) nginx_reload() return challenge_dir
def get(gid=None, name=None): """ Get all LDAP groups. :param str gid: ID of single group to fetch :returns: Group(s) :rtype: Group or list thereof """ r = [] qry = "ou=groups,{0}".format(config.get("general", "ldap_rootdn")) search = conns.LDAP.search_s(qry, ldap.SCOPE_SUBTREE, "(objectClass=posixGroup)", None) for x in search: for y in x[1]: if type(x[1][y]) == list and len(x[1][y]) == 1 \ and y != "memberUid": x[1][y] = x[1][y][0] g = Group(x[1]["cn"].decode(), int(x[1]["gidNumber"]), [z.decode() for z in x[1].get("memberUid", [])], x[0].split("ou=groups,")[1]) if g.gid == gid: return g elif name and g.name == name: return g r.append(g) return r if gid is None and name is None else None
def _install(self, install_deps, load, force, cry, nthread): if self.installed and not force: return signals.emit("apps", "pre_install", self) # Get all apps that this app depends on and install them first deps = get_dependent(self.id, "install") if install_deps and deps: for x in deps: msg = "Installing dependencies for {0}... ({1})" nthread.update( Notification("info", "Apps", msg.format(self.name, x)) ) _install(x, load=load, cry=cry) # Install this app msg = "Installing {0}...".format(self.name) nthread.update(Notification("info", "Apps", msg)) _install(self.id, load=load, cry=cry) ports = [] for s in self.services: if s.get("default_policy", 0) and s["ports"]: ports.append(s["ports"]) if ports and config.get("general", "enable_upnp"): tracked_services.open_all_upnp(ports) verify_app_dependencies() smsg = "{0} installed successfully.".format(self.name) nthread.complete(Notification("success", "Apps", smsg)) signals.emit("apps", "post_install", self)
def edit(self, newname=""): site_dir = config.get("websites", "site_dir") block = nginx.loadf(os.path.join("/etc/nginx/sites-available", self.id)) # If SSL is enabled and the port is changing to 443, create the port 80 redirect server = block.servers[0] if self.cert and self.port == 443: for x in block.servers: if x.filter("Key", "listen")[0].value == "443 ssl": server = x if self.port != 443: for x in block.servers: if not "ssl" in x.filter("Key", "listen")[0].value \ and x.filter("key", "return"): block.remove(x) elif self.port == 443: block.add(nginx.Server( nginx.Key("listen", "80"), nginx.Key("server_name", self.addr), nginx.Key("return", "301 https://%s$request_uri"%self.addr) )) # If the name was changed... if newname and self.id != newname: # rename the folder and files... if self.path.endswith("_site"): self.path = os.path.join(site_dir, newname, "_site") elif self.path.endswith("htdocs"): self.path = os.path.join(site_dir, newname, "htdocs") else: self.path = os.path.join(site_dir, newname) self.path = self.path.encode("utf-8") if os.path.exists(self.path): shutil.rmtree(self.path) self.nginx_disable(reload=False) shutil.move(os.path.join(site_dir, self.id), self.path) os.unlink(os.path.join("/etc/nginx/sites-available", self.id)) signals.emit("websites", "site_removed", self) self.id = newname # then update the site's arkOS metadata file with the new name meta = ConfigParser.SafeConfigParser() meta.read(os.path.join(self.path, ".arkos")) meta.set("website", "id", self.id) with open(os.path.join(self.path, ".arkos"), "w") as f: meta.write(f) self.nginx_enable(reload=False) # Pass any necessary updates to the nginx serverblock and save server.filter("Key", "listen")[0].value = str(self.port)+" ssl" if self.cert else str(self.port) server.filter("Key", "server_name")[0].value = self.addr server.filter("Key", "root")[0].value = self.path server.filter("Key", "index")[0].value = "index.php" if hasattr(self, "php") and self.php else "index.html" nginx.dumpf(block, os.path.join("/etc/nginx/sites-available", self.id)) # Call the site's edited hook, if it has one, then reload nginx signals.emit("websites", "site_loaded", self) if hasattr(self, "site_edited"): self.site_edited() nginx_reload()
def save(self, fw=True): policies.set(self.type, self.id, self.policy) policies.save() if config.get("general", "firewall", True) and fw: security.regenerate_firewall(get()) if not storage.policies.get("policies", self.id): storage.policies.add("policies", self)
def save(self, fw=True): """ Save changes to a security policy to disk. :param bool fw: Regenerate the firewall after save? """ if self.type == "custom": for x in policies.get_all("custom"): if self.id == x["id"]: policies.remove_list("custom", x) break policies.append( "custom", { "id": self.id, "name": self.name, "icon": self.icon, "ports": self.ports, "policy": self.policy }) else: policies.set(self.type, self.id, self.policy) policies.save() storage.policies[self.id] = self if config.get("general", "firewall") and fw: security.regenerate_firewall(get())
def install(self, extra_vars={}, enable=True, message=None): # Set metadata values site_dir = config.get("websites", "site_dir") self.path = self.path.encode("utf-8") or os.path.join(site_dir, self.id).encode("utf-8") try: os.makedirs(self.path) except: pass # If extra data is passed in, set up the serverblock accordingly if extra_vars: if not extra_vars.get("type") or not extra_vars.get("pass"): raise Exception("Must enter ReverseProxy type and location to pass to") elif extra_vars.get("type") in ["fastcgi", "uwsgi"]: self.block = [nginx.Location(extra_vars.get("lregex", "/"), nginx.Key("%s_pass"%extra_vars.get("type"), "%s"%extra_vars.get("pass")), nginx.Key("include", "%s_params"%extra_vars.get("type")) )] else: self.block = [nginx.Location(extra_vars.get("lregex", "/"), nginx.Key("proxy_pass", "%s"%extra_vars.get("pass")), nginx.Key("proxy_redirect", "off"), nginx.Key("proxy_buffering", "off"), nginx.Key("proxy_set_header", "Host $host") )] if extra_vars.get("xrip"): self.block[0].add(nginx.Key("proxy_set_header", "X-Real-IP $remote_addr")) if extra_vars.get("xff") == "1": self.block[0].add(nginx.Key("proxy_set_header", "X-Forwarded-For $proxy_add_x_forwarded_for")) # Create the nginx serverblock and arkOS metadata files block = nginx.Conf() server = nginx.Server( nginx.Key("listen", self.port), nginx.Key("server_name", self.addr), nginx.Key("root", self.base_path or self.path), ) server.add(*[x for x in self.block]) block.add(server) nginx.dumpf(block, os.path.join("/etc/nginx/sites-available", self.id)) meta = ConfigParser.SafeConfigParser() meta.add_section("website") meta.set("website", "id", self.id) meta.set("website", "name", self.name) meta.set("website", "type", "ReverseProxy") meta.set("website", "extra", self.type) meta.set("website", "version", "None") meta.set("website", "ssl", self.cert.id if hasattr(self, "cert") and self.cert else "None") with open(os.path.join(self.path, ".arkos"), "w") as f: meta.write(f) # Track port and reload daemon self.meta = None self.installed = True storage.sites.add("sites", self) signals.emit("websites", "site_installed", self) self.nginx_enable()
def get_date(): """ Get current date. :returns: Date in config's ``date_format`` :rtype: str """ return time.strftime(config.get("general", "date_format"))
def initialize(): policy = policies.get("arkos", "arkos", 2) storage.policies.add("policies", SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)", "fa fa-desktop", [("tcp", int(config.get("genesis", "port")))], policy)) for x in policies.get_all("custom"): storage.policies.add("policies", SecurityPolicy("custom", x["id"], x["name"], x["icon"], x["ports"], x["policy"]))
def get_time(): """ Get current time. :returns: Time in config's ``time_format`` :rtype: str """ return time.strftime(config.get("general", "time_format"))
def get_idatetime(): """ Get date and time from NTP server. :returns: Unix timestamp :rtype: float """ resp = ntp.request(config.get("general", "ntp_server"), version=3) return resp.tx_time
def get_offset(): """ Get the amount of seconds that system time is off from NTP. :returns: NTP offset :rtype: float """ resp = ntp.request(config.get("general", "ntp_server"), version=3) return resp.offset
def deregister(type, id="", fw=True): for x in get(type=type): if not id: x.remove(fw=False) elif x.id == id: x.remove(fw=False) break if config.get("general", "firewall", True) and fw: security.regenerate_firewall(get())
def verify(token=None): """ Verify a provided JSON Web Token (JWT) for authentication. :param str token: JSON Web Token (JWT) :returns: True if valid, False if not """ if config.get("genesis", "anonymous"): return if request.headers.get("X-API-Key", None): api_key = request.headers.get("X-API-Key") data = secrets.get_all("api-keys") for x in data: if x["key"] == api_key: user = users.get(name=x["user"]) if not user or not user.admin: resp = jsonify(message="Authorization required") resp.status_code = 401 return resp else: return if not token: token = request.headers.get("Authorization", None) if not token: resp = jsonify(message="Authorization required") resp.status_code = 401 return resp token = token.split() if token[0] != "Bearer" or len(token) > 2: resp = jsonify(message="Malformed token") resp.status_code = 400 return resp token = token[1] try: tjwss = TimedJSONWebSignatureSerializer( secret_key=current_app.config["SECRET_KEY"], expires_in=3600, algorithm_name="HS256") payload = tjwss.loads(token) except SignatureExpired: resp = jsonify(message="Token expired") resp.status_code = 401 return resp except BadSignature: resp = jsonify(message="Malformed token signature") resp.status_code = 401 return resp user = users.get(name=payload["uid"]) if not user or not user.admin: resp = jsonify(message="Authorization required") resp.status_code = 401 return resp
def get(id=None): results = [] qset = conns.LDAP.search_s("ou=domains,%s" % config.get("general", "ldap_rootdn", "dc=arkos-servers,dc=org"), ldap.SCOPE_SUBTREE, "virtualdomain=*", ["virtualdomain"]) for x in qset: d = Domain(name=x[1]["virtualdomain"][0], rootdn=x[0].split("ou=domains,")[1]) if d.name == id: return d results.append(d) return results
def encrypt(self, passwd, cipher=config.get("filesystems", "cipher"), keysize=config.get("filesystems", "keysize"), mount=False): """ Encrypt virtual disk image. :params str passwd: Passphrase to encrypt disk with :params str cipher: cipher suite to use (default aes-xts-plain64) :params str keysize: default key size to use (default 256) :params bool mount: mount after encrypt? """ cipher = cipher or "aes-xts-plain64" keysize = keysize or 256 vdisk_dir = config.get("filesystems", "vdisk_dir") os.rename(self.path, os.path.join(vdisk_dir, self.id + ".crypt")) self.path = os.path.join(vdisk_dir, self.id + ".crypt") # Find an open loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Encrypt the file inside the loopback and mount s = crypto.luks_format(loop.device, passwd, cipher, int(keysize)) if s != 0: loop.unmount() os.rename(self.path, os.path.join(vdisk_dir, self.id + ".img")) excstr = "Failed to encrypt {0} with errno {1}" raise errors.OperationFailedError(excstr.format(self.id, str(s))) s = crypto.luks_open(loop.device, self.id, passwd) if s != 0: loop.unmount() excstr = "Failed to decrypt {0} with errno {1}" raise errors.OperationFailedError(excstr.format(self.id, str(s))) # Create a filesystem inside the encrypted device s = shell("mkfs.ext4 /dev/mapper/{0}".format(self.id)) crypto.luks_close(self.id) loop.unmount() if s["code"] != 0: excstr = "Failed to format loop device: {0}" raise errors.OperationFailedError(excstr.format(s["stderr"])) self.crypt = True if mount: self.mount(passwd)
def get_temp(): # TODO: replace this with libsensors.so / PySensors if config.get("enviro", "board").startswith("Raspberry Pi"): with open("/sys/class/thermal/thermal_zone0/temp", "r") as f: return "%3.1f°C"%(float(f.read().rstrip("\n"))/1000) else: if os.path.exists("/sys/class/hwmon/hwmon1/temp1_input"): with open("/sys/class/hwmon/hwmon1/temp1_input", "r") as f: return "%3.1f°C"%(float(f.read().rstrip("\n"))/1000) return ""
def get(id=None): devs, mps = [], {} fstab = get_fstab() # Get mount data for all devices with open("/etc/mtab", "r") as f: for x in f.readlines(): x = x.split() mps[x[0]] = x[1] # Get physical disks available for d in parted.getAllDevices(): try: parts = parted.Disk(d).getPrimaryPartitions() except: continue for p in parts: if p.path.split("/")[-1].startswith("loop"): continue try: fstype = parted.probeFileSystem(p.geometry) except: fstype = "Unknown" try: dev = DiskPartition(id=p.path.split("/")[-1], path=p.path, mountpoint=mps.get(p.path) or None, size=int(p.getSize("B")), fstype=fstype, enabled=p.path in fstab, crypt=crypto.is_luks(p.path)==0) if id == dev.id: return dev devs.append(dev) except: continue # Replace mount data for virtual disks with loopback id dd = losetup.get_loop_devices() for x in dd: try: s = dd[x].get_status() except: continue if "/dev/loop%s" % s.lo_number in mps: mps[s.lo_filename] = mps["/dev/loop%s" % s.lo_number] # Get virtual disks available for x in glob.glob(os.path.join(config.get("filesystems", "vdisk_dir"), "*")): if not x.endswith((".img", ".crypt")): continue dname = os.path.splitext(os.path.split(x)[1])[0] dev = VirtualDisk(id=dname, path=x, size=os.path.getsize(x), mountpoint=mps.get(x) or mps.get("/dev/mapper/%s" % dname) or None, enabled=x in fstab, crypt=x.endswith(".crypt")) if id == dev.id: return dev devs.append(dev) return devs if not id else None
def backup(self, data=True, backup_location=""): if not backup_location: backup_location = config.get("backups", "location", "/var/lib/arkos/backups") if self.ctype == "site": self.version = self.site.meta.version signals.emit("backups", "pre_backup", self) # Trigger the pre-backup hook for the app/site if self.ctype == "site": self.pre_backup(self.site) else: self.pre_backup() # Create backup directory in storage backup_dir = os.path.join(backup_location, self.id) try: os.makedirs(backup_dir) except: pass # Gather config and data file paths to archive myconfig = self._get_config() data = self._get_data() if data else [] timestamp = systemtime.get_serial_time() isotime = systemtime.get_iso_time(timestamp) path = os.path.join(backup_dir, "%s-%s.tar.gz" % (self.id,timestamp)) # Zip up the gathered file paths with tarfile.open(path, "w:gz") as t: for f in myconfig+data: for x in glob.glob(f): t.add(x) if self.ctype == "site" and self.site.db: dbsql = StringIO.StringIO(self.site.db.dump()) dinfo = tarfile.TarInfo(name="/%s.sql"%self.site.id) dinfo.size = len(dbsql.buf) t.addfile(tarinfo=dinfo, fileobj=dbsql) # Create a metadata file to track information info = {"pid": self.id, "type": self.ctype, "icon": self.icon, "version": self.version, "time": isotime} if self.site: info["site_type"] = self.site.meta.id with open(os.path.join(backup_dir, "%s-%s.meta" % (self.id,timestamp)), "w") as f: f.write(json.dumps(info)) # Trigger post-backup hook for the app/site if self.ctype == "site": self.post_backup(self.site) else: self.post_backup() signals.emit("backups", "post_backup", self) return {"id": self.id+"/"+timestamp, "pid": self.id, "path": path, "icon": self.icon, "type": self.ctype, "time": isotime, "version": self.version, "size": os.path.getsize(path), "site_type": self.site.meta.id if self.site else None, "is_ready": True}
def _install(id, load=True, cry=True): """ Utility function to download and install arkOS app packages. :param str id: ID of arkOS app to install :param bool load: Load the app after install? :param bool cry: Raise exception on dependency install failure? """ app_dir = config.get("apps", "app_dir") # Download and extract the app source package api_url = "https://{0}/api/v1/apps/{1}" data = api(api_url.format(config.get("general", "repo_server"), id), returns="raw", crit=True) path = os.path.join(app_dir, "{0}.tar.gz".format(id)) with open(path, "wb") as f: f.write(data) with tarfile.open(path, "r:gz") as t: t.extractall(app_dir) os.unlink(path) # Read the app's metadata and create an object with open(os.path.join(app_dir, id, "manifest.json")) as f: data = json.loads(f.read()) app = get(id) for x in data: setattr(app, x, data[x]) app.upgradable = "" app.installed = True for x in app.services: if x.get("type") == "system" and x.get("binary") \ and not x.get("ignore_on_install"): s = services.get(x["binary"]) if s: s.enable() if s.state != "running": try: s.start() except services.ActionError as e: logger.warning( "Apps", "{0} could not be automatically started." .format(s.name)) if load: app.load(cry=cry)
def remove(id, time, backup_location=""): if not backup_location: backup_location = config.get("backups", "location", "/var/lib/arkos/backups") backups = get() for x in backups: if x["id"] == id+"/"+time: os.unlink(x["path"]) try: os.unlink(x["path"].split(".")[1]+".meta") except: pass
def _install(id, load=True): app_dir = config.get("apps", "app_dir") # Download and extract the app source package data = api("https://%s/api/v1/apps/%s" % (config.get("general", "repo_server"), id), returns="raw", crit=True) with open(os.path.join(app_dir, "%s.tar.gz" % id), "wb") as f: f.write(data) with tarfile.open(os.path.join(app_dir, "%s.tar.gz" % id), "r:gz") as t: t.extractall(app_dir) os.unlink(os.path.join(app_dir, "%s.tar.gz" % id)) # Read the app's metadata and create an object with open(os.path.join(app_dir, id, "manifest.json")) as f: data = json.loads(f.read()) app = get(id) for x in data: setattr(app, x, data[x]) app.upgradable = "" app.installed = True if load: app.load()
def get_temp(): """Get CPU temperature readings.""" # TODO: replace this with libsensors.so / PySensors if config.get("enviro", "board", "Unknown").startswith("Raspberry Pi"): with open("/sys/class/thermal/thermal_zone0/temp", "r") as f: return "{:3.1f}°C".format(float(f.read().rstrip("\n")) / 1000) else: if os.path.exists("/sys/class/hwmon/hwmon1/temp1_input"): with open("/sys/class/hwmon/hwmon1/temp1_input", "r") as f: return "{:3.1}f°C".format(float(f.read().rstrip("\n")) / 1000) return ""
def genesis_init(state): """Initialize the Genesis endpoints.""" path = "" if config.get("enviro", "run") == "vagrant": path = '/home/vagrant/genesis' elif config.get("enviro", "run") == "dev": sdir = os.path.dirname(os.path.realpath(__file__)) path = os.path.abspath(os.path.join(sdir, '../../genesis')) elif os.path.exists('/var/lib/arkos/genesis'): path = '/var/lib/arkos/genesis' if not os.path.exists(path): return backend.add_url_rule('/', defaults={'path': None}, view_func=genesis, methods=[ 'GET', ]) backend.add_url_rule('/<path:path>', view_func=genesis, methods=[ 'GET', ])
def create_token(user): iat = systemtime.get_unix_time() try: offset = systemtime.get_offset() if offset < -3600 or offset > 3600: systemtime.set_datetime() iat = systemtime.get_unix_time() except: current_app.logger.warning("System time is not accurate or could not be verified. Access tokens will not expire.") iat = None payload = { "uid": user.name, "ufn": user.first_name, "uln": user.last_name, } if iat: payload["iat"] = iat payload["exp"] = iat + config.get("genesis", "token_valid_for", 3600) tjwss = TimedJSONWebSignatureSerializer(secret_key=current_app.config["SECRET_KEY"], expires_in=config.get("genesis", "token_valid_for", 3600), algorithm_name="HS256") return tjwss.dumps(payload).decode("utf-8")
def get(backup_location=""): """ Return a list of backup dicts from the backup directory. ``Backup`` dicts are in the following format (example): { "icon": "globe", "id": "testghost/20150317124530", "is_ready": true, "path": "/var/lib/arkos/backups/testghost/testghost-xxx.tar.gz", "pid": "testghost", "site_type": "ghost", "size": 14612219, "time": "2015-03-17T12:45:30-04:00", "type": "site", "version": "0.5.10-1" } :param str backup_location: Location to scan (instead of arkOS default) :returns: backups found :rtype: Backup """ backups = [] if not backup_location: backup_location = config.get("backups", "location") if not os.path.exists(backup_location): os.makedirs(backup_location) for x in glob.glob(os.path.join(backup_location, "*/*.tar.gz")): path = x name = os.path.basename(x).split("-")[0] meta = x.split(".tar.gz")[0]+".meta" stime = x.split("-")[1].split(".tar.gz")[0] if not os.path.exists(meta): data = {"id": name+"/"+stime, "pid": name, "path": path, "icon": None, "type": "Unknown", "time": systemtime.get_iso_time(stime), "version": "Unknown", "size": os.path.getsize(path), "site_type": None, "is_ready": True} backups.append(data) continue with open(meta, "r") as f: data = json.loads(f.read()) data = {"id": "{0}/{1}".format(name, stime), "pid": name, "path": path, "icon": data["icon"], "type": data["type"], "time": data["time"], "version": data["version"], "size": os.path.getsize(path), "is_ready": True, "site_type": data.get("site_type", None)} backups.append(data) return backups
def check_updates(): updates = [] gpg = gnupg.GPG() server = config.get("general", "repo_server") current = config.get("updates", "current_update", 0) # Fetch updates from registry server data = api("https://%s/api/v1/updates/%s" % (server, str(current)), crit=True) for x in data["updates"]: ustr, u = str(x["tasks"]), json.loads(x["tasks"]) # Get the update signature and test it sig = api("https://%s/api/v1/signatures/%s" % (server, x["id"]), returns="raw", crit=True) with open("/tmp/%s.sig" % x["id"], "w") as f: f.write(sig) v = gpg.verify_data("/tmp/%s.sig" % x["id"], ustr) if v.trust_level == None: logger.error("Update %s signature verification failed" % x["id"]) break else: updates.append({"id": x["id"], "name": x["name"], "date": x["date"], "info": x["info"], "tasks": u}) storage.updates.set("updates", updates) return updates
def make_json_error(err): if hasattr(err, "description"): message = err.description else: message = str(err) if traceback.format_exc(): stacktrace = traceback.format_exc() report = "arkOS %s Crash Report\n" % version() report += "--------------------\n\n" report += "Running in %s\n" % config.get("enviro", "run") report += "System: %s\n" % shell("uname -a")["stdout"] report += "Platform: %s %s\n" % (config.get("enviro", "arch"), config.get("enviro", "board")) report += "Python version %s\n" % '.'.join([str(x) for x in platform.python_version_tuple()]) report += "Config path: %s\n\n" % config.filename report += "Loaded applicatons: \n%s\n\n" % "\n".join([x.id for x in applications.get()]) report += "Request: %s %s\n\n" % (request.method, request.path) report += stacktrace response = jsonify(message=message, stacktrace=stacktrace, report=report, version=version(), arch=config.get("enviro", "arch")) else: response = jsonify(message=message) response.status_code = err.code if isinstance(err, HTTPException) else 500 return add_cors(response)
def firstrun(): data = request.get_json() resize_boards = [ "Raspberry Pi", "Raspberry Pi 2", "Raspberry Pi 3", "Cubieboard2", "Cubietruck", "BeagleBone Black", "ODROID-U" ] if data.get("resize_sd_card", None)\ and config.get("enviro", "board") in resize_boards: part = 1 if config.get("enviro", "board").startswith("Cubie") else 2 p1str = 'd\nn\np\n1\n\n\nw\n' p2str = 'd\n2\nn\np\n2\n\n\nw\n' shell('fdisk /dev/mmcblk0', stdin=(p1str if part == 1 else p2str)) if not os.path.exists('/etc/cron.d'): os.mkdir('/etc/cron.d') with open('/etc/cron.d/resize', 'w') as f: f.write('@reboot root e2fsck -fy /dev/mmcblk0p{0}\n'.format(part)) f.write('@reboot root resize2fs /dev/mmcblk0p{0}\n'.format(part)) f.write('@reboot root rm /etc/cron.d/resize\n') f.close() if data.get("use_gpu_mem", None) \ and config.get("enviro", "board").startswith("Raspberry"): f = filesystems.get("mmcblk0p1") if not f.is_mounted: f.mountpoint = "/boot" f.mount() cfgdata = [] if os.path.exists('/boot/config.txt'): with open("/boot/config.txt", "r") as f: for x in f.readlines(): if x.startswith("gpu_mem"): x = "gpu_mem=16\n" cfgdata.append(x) if "gpu_mem=16\n" not in cfgdata: cfgdata.append("gpu_mem=16\n") with open("/boot/config.txt", "w") as f: f.writelines(cfgdata) else: with open("/boot/config.txt", "w") as f: f.write("gpu_mem=16\n") if data.get("cubie_mac", None) \ and config.get("enviro", "board").startswith("Cubie"): if config.get("enviro", "board") == "Cubieboard2": with open('/boot/uEnv.txt', 'w') as f: opt_str = 'extraargs=mac_addr={0}\n' f.write(opt_str.format(data.get("cubie_mac"))) elif config.get("enviro", "board") == "Cubietruck": with open('/etc/modprobe.d/gmac.conf', 'w') as f: opt_str = 'options sunxi_gmac mac_str="{0}"\n' f.write(opt_str.format(data.get("cubie_mac"))) if data.get("install"): as_job(install, data["install"]) rootpwd = "" if data.get("protectRoot"): rootpwd = random_string(16) shell("passwd root", stdin="{0}\n{0}\n".format(rootpwd)) security.initialize_firewall() return jsonify(rootpwd=rootpwd)
def genesis_init(state): path = "" apps = applications.get() if config.get("enviro", "run") == "vagrant": path = '/home/vagrant/genesis' elif config.get("enviro", "run") == "dev": sdir = os.path.dirname(os.path.realpath(__file__)) path = os.path.abspath(os.path.join(sdir, '../../genesis')) elif os.path.exists('/var/lib/arkos/genesis'): path = '/var/lib/arkos/genesis' if not os.path.exists(path): return backend.add_url_rule('/', defaults={'path': None}, view_func=genesis, methods=['GET',]) backend.add_url_rule('/<path:path>', view_func=genesis, methods=['GET',]) for x in os.listdir(os.path.join(path, 'lib')): if os.path.islink(os.path.join(path, 'lib', x)): os.unlink(os.path.join(path, 'lib', x)) libpaths = [] for x in apps: genpath = "/var/lib/arkos/applications/%s/genesis" % x.id if os.path.exists(genpath): libpaths.append("lib/%s"%x.id) os.symlink(genpath, os.path.join(path, 'lib', x.id)) if libpaths: with open(os.path.join(path, 'package.json'), 'r') as f: data = json.loads(f.read()) data["ember-addon"] = {"paths": libpaths} with open(os.path.join(path, 'package.json'), 'w') as f: f.write(json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))) mydir = os.getcwd() os.chdir(path) s = shell("ember build") os.chdir(mydir) if s["code"] != 0: raise Exception("Genesis rebuild process failed")
def create(self, mount=False, will_crypt=False, nthread=NotificationThread()): """ Create virtual disk image. :param bool mount: Mount after creation? :param bool will_crypt: Will this disk be encrypted later? :param NotificationThread nthread: notification thread to use """ nthread.title = "Creating virtual disk" vdisk_dir = config.get("filesystems", "vdisk_dir") if not os.path.exists(vdisk_dir): os.mkdir(vdisk_dir) self.path = str(os.path.join(vdisk_dir, self.id + ".img")) if os.path.exists(self.path): raise errors.InvalidConfigError("This virtual disk already exists") # Create an empty file matching disk size signals.emit("filesystems", "pre_add", self) msg = "Creating virtual disk..." nthread.update(Notification("info", "Filesystems", msg)) with open(self.path, "wb") as f: written = 0 with open("/dev/zero", "rb") as zero: while self.size > written: written += 1024 f.write(zero.read(1024)) if not will_crypt: # Get a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Make a filesystem msg = "Writing filesystem..." nthread.update(Notification("info", "Filesystems", msg)) s = shell("mkfs.ext4 {0}".format(loop.device)) if s["code"] != 0: excmsg = "Failed to format loop device: {0}" raise errors.OperationFailedError(excmsg.format(s["stderr"])) loop.unmount() msg = "Virtual disk created successfully" nthread.complete(Notification("success", "Filesystems", msg)) signals.emit("filesystems", "post_add", self) if mount: self.mount()
def uninstall(self, force=False, message=DefaultMessage()): signals.emit("apps", "pre_remove", self) message.update("info", "Uninstalling application...") exclude = ["openssl", "openssh", "nginx", "python2", "git", "nodejs", "npm"] # Make sure this app can be successfully removed, and if so also remove # any system-level packages that *only* this app requires for x in get(installed=True): for item in x.dependencies: if item["type"] == "app" and item["package"] == self.id and not force: raise Exception("Cannot remove, %s depends on this application" % x.name) elif item["type"] == "system": exclude.append(item["package"]) # Stop any running services associated with this app for item in self.dependencies: if item["type"] == "system" and not item["package"] in exclude: if item.has_key("daemon") and item["daemon"]: services.stop(item["daemon"]) services.disable(item["daemon"]) pacman.remove([item["package"]], purge=config.get("apps", "purge", False)) logger.debug("Uninstalling %s" % self.name) # Remove the app's directory and cleanup the app object shutil.rmtree(os.path.join(config.get("apps", "app_dir"), self.id)) self.loadable = False self.installed = False # Regenerate the firewall and re-block the abandoned ports regen_fw = False for x in self.services: if x["ports"]: regen_fw = True if regen_fw: tracked_services.deregister(self.id) signals.emit("apps", "post_remove", self)
def genesis(path): """Serve Genesis components via the API.""" gpath = '/var/lib/arkos/genesis' if path and not path.startswith(("assets", "public", "fonts", "img")): path = None if config.get("enviro", "run") == "vagrant": vpath = '/home/vagrant/genesis/dist' if os.path.exists(vpath): return send_from_directory(vpath, path or 'index.html', cache_timeout=0) elif config.get("enviro", "run") == "dev": sdir = os.path.dirname(os.path.realpath(__file__)) sdir = os.path.abspath(os.path.join(sdir, '../../genesis/dist')) return send_from_directory(sdir, path or 'index.html', cache_timeout=0) elif os.path.exists(gpath): return send_from_directory(gpath, path or 'index.html', cache_timeout=0) else: resp = jsonify( errors={"msg": "Genesis does not appear to be installed."}) resp.status_code = 500 return resp
def deregister(type, id="", fw=True): """ Deregister a security policy. :param str type: Policy type ('website', 'app', etc) :param str id: Website or app ID :param bool fw: Regenerate the firewall after save? """ for x in get(type=type): if not id: x.remove(fw=False) elif x.id == id: x.remove(fw=False) break if config.get("general", "firewall") and fw: security.regenerate_firewall(get())
def get(uid=None, name=None): """ Get all LDAP users. :param str id: ID of single user to fetch :param str name: username of single user to fetch :returns: User(s) :rtype: User or list thereof """ r = [] rootdn = config.get("general", "ldap_rootdn") ldap_users = conns.LDAP.search_s("ou=users," + rootdn, ldap.SCOPE_SUBTREE, "(objectClass=inetOrgPerson)", None) for x in ldap_users: for y in x[1]: if y == "mail": continue if type(x[1][y]) == list and len(x[1][y]) == 1: x[1][y] = x[1][y][0] u = User(x[1]["uid"].decode(), x[1]["givenName"].decode(), x[1]["sn"].decode() if x[1]["sn"] != b"NONE" else None, int(x[1]["uidNumber"]), x[1]["mail"][0].split(b"@")[1].decode(), x[0].split("ou=users,")[1], [z.decode() for z in x[1]["mail"]]) # Check if the user is a member of the admin or sudo groups try: conns.LDAP.search_s("cn={0},ou=sudo,{1}".format(u.name, u.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None) u.sudo = True except ldap.NO_SUCH_OBJECT: u.sudo = False memlist = conns.LDAP.search_s( "cn=admins,ou=groups,{0}".format(u.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None)[0][1]["member"] if b("uid={0},ou=users,{1}".format(u.name, u.rootdn)) in memlist: u.admin = True else: u.admin = False if u.uid == uid: return u elif name and u.name == name: return u r.append(u) return r if uid is None and name is None else None
def get_token(): data = request.get_json() user, pwd = data.get("username"), data.get("password") if config.get("genesis", "anonymous"): user = AnonymousUser() else: user = users.get(name=user) if user and not user.admin: resp = jsonify(message="Not an admin user") resp.status_code = 401 return resp elif user and user.verify_passwd(pwd): return jsonify(token=create_token(user)) else: resp = jsonify(message="Invalid credentials") resp.status_code = 401 return resp
def scan_authorities(): certs = [] if not os.path.exists(config.get("certificates", "ca_cert_dir")): os.makedirs(config.get("certificates", "ca_cert_dir")) if not os.path.exists(config.get("certificates", "ca_key_dir")): os.makedirs(config.get("certificates", "ca_key_dir")) for x in glob.glob(os.path.join(config.get("certificates", "ca_cert_dir"), "*.pem")): id = os.path.splitext(os.path.split(x)[1])[0] with open(x, "r") as f: cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read()) ca = CertificateAuthority(id=id, cert_path=x, expiry=cert.get_notAfter(), key_path=os.path.join(config.get("certificates", "ca_key_dir"), id+".key")) certs.append(ca) storage.certs.set("authorities", certs) return certs
def install(self, meta, extra_vars={}, enable=True, message=DefaultMessage()): message.update("info", "Preparing to install...", head="Installing website") # Make sure the chosen port is indeed open if not tracked_services.is_open_port(self.port): raise Exception("This port is taken by another site or service, please choose another") # Set some metadata values specialmsg, dbpasswd = "", "" site_dir = config.get("websites", "site_dir") self.meta = meta self.path = self.path.encode("utf-8") or os.path.join(site_dir, self.id).encode("utf-8") self.php = extra_vars.get("php") or self.php or self.meta.uses_php or False self.version = self.meta.version.rsplit("-", 1)[0] if self.meta.website_updates else None # Classify the source package type if not self.meta.download_url: ending = "" elif self.meta.download_url.endswith(".tar.gz"): ending = ".tar.gz" elif self.meta.download_url.endswith(".tgz"): ending = ".tgz" elif self.meta.download_url.endswith(".tar.bz2"): ending = ".tar.bz2" elif self.meta.download_url.endswith(".zip"): ending = ".zip" elif self.meta.download_url.endswith(".git"): ending = ".git" else: raise Exception("Only GIT repos, gzip, bzip, and zip packages supported for now") message.update("info", "Running pre-installation...", head="Installing website") # Call website type's pre-install hook try: self.pre_install(extra_vars) except Exception, e: raise Exception("Error during website config - "+str(e))
def get(uid=None, name=None): r = [] rootdn = config.get("general", "ldap_rootdn", "dc=arkos-servers,dc=org") ldap_users = conns.LDAP.search_s("ou=users,%s" % rootdn, ldap.SCOPE_SUBTREE, "(objectClass=inetOrgPerson)", None) for x in ldap_users: for y in x[1]: if y == "mail": continue if type(x[1][y]) == list and len(x[1][y]) == 1: x[1][y] = x[1][y][0] u = User(name=x[1]["uid"], uid=int(x[1]["uidNumber"]), first_name=x[1]["givenName"], last_name=x[1]["sn"] if x[1]["sn"] != "NONE" else None, mail=x[1]["mail"], domain=x[1]["mail"][0].split("@")[1], rootdn=x[0].split("ou=users,")[1]) # Check if the user is a member of the admin or sudo groups try: conns.LDAP.search_s("cn=%s,ou=sudo,%s" % (u.name,u.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None) u.sudo = True except ldap.NO_SUCH_OBJECT: u.sudo = False memlist = conns.LDAP.search_s("cn=admins,ou=groups,%s" % u.rootdn, ldap.SCOPE_SUBTREE, "(objectClass=*)", None)[0][1]["member"] if "uid=%s,ou=users,%s"%(u.name,u.rootdn) in memlist: u.admin = True else: u.admin = False if u.uid == uid: return u elif name and u.name == name: return u r.append(u) return r if uid == None and name == None else None
def get(backup_location=""): backups = [] if not backup_location: backup_location = config.get("backups", "location", "/var/lib/arkos/backups") if not os.path.exists(backup_location): os.makedirs(backup_location) for x in glob.glob(os.path.join(backup_location, "*/*.tar.gz")): path = x name = os.path.basename(x).split("-")[0] meta = x.split(".tar.gz")[0]+".meta" stime = x.split("-")[1].split(".tar.gz")[0] if not os.path.exists(meta): backups.append({"id": name+"/"+stime, "pid": name, "path": path, "icon": None, "type": "Unknown", "time": systemtime.get_iso_time(stime), "version": "Unknown", "size": os.path.getsize(path), "site_type": None, "is_ready": True}) continue with open(meta, "r") as f: data = json.loads(f.read()) backups.append({"id": name+"/"+stime, "pid": name, "path": path, "icon": data["icon"], "type": data["type"], "time": data["time"], "version": data["version"], "size": os.path.getsize(path), "site_type": data.get("site_type", None), "is_ready": True}) return backups