def regenerate_firewall(data, range=[]): """ Flush and regenerate arkOS firewall chain. If ``range`` is not specified, network module will guess what they are. :param SecurityPolicy data: Security policies to enact :param list range: Range(s) of local network(s) ('192.168.0.0/24') """ signals.emit("security", "pre_fw_regen") flush_chain("arkos-apps") default_range = range or network.get_active_ranges() # For each policy in the system, add a rule for x in data: range = getattr(x, "allowed_ranges", default_range) for port in x.ports: if x.policy == 2: add_rule("ACCEPT", port[0], port[1], ["anywhere"]) elif x.policy == 1: add_rule("ACCEPT", port[0], port[1], range) else: add_rule("REJECT", port[0], port[1]) shell("iptables -A arkos-apps -j RETURN") save_rules() signals.emit("security", "post_fw_regen")
def initialize_firewall(): """Flush all iptables rules and setup a new clean arkOS firewall chain.""" signals.emit("security", "pre_fw_init") flush_chain("INPUT") # Accept loopback shell("iptables -A INPUT -i lo -j ACCEPT") # Accept designated apps shell("iptables -N arkos-apps") shell("iptables -A INPUT -j arkos-apps") # Allow ICMP (ping) shell( "iptables -A INPUT -p icmp -m icmp --icmp-type echo-request -j ACCEPT") # Accept established/related connections shell("iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT") # Allow mDNS (Avahi/Bonjour/Zeroconf) shell("iptables -A INPUT -p udp --dport mdns -j ACCEPT") shell("iptables -A OUTPUT -p udp --dport mdns -j ACCEPT") # Reject all else by default shell("iptables -A INPUT -j DROP") save_rules() signals.emit("security", "post_fw_init")
def add(self, passwd): try: ldif = conns.LDAP.search_s("uid=%s,ou=users,%s" % (self.name,self.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None) raise Exception("A user with this name already exists") except ldap.NO_SUCH_OBJECT: pass # Create LDAP user with proper metadata ldif = { "objectClass": ["mailAccount", "inetOrgPerson", "posixAccount"], "givenName": self.first_name, "sn": self.last_name or "NONE", "displayName": self.first_name+" "+self.last_name, "cn": self.first_name+" "+self.last_name, "uid": self.name, "mail": [self.name+"@"+self.domain], "maildrop": self.name, "userPassword": hashpw(passwd, "crypt"), "gidNumber": "100", "uidNumber": str(self.uid), "homeDirectory": "/home/%s" % self.name, "loginShell": "/usr/bin/bash" } ldif = ldap.modlist.addModlist(ldif) signals.emit("users", "pre_add", self) conns.LDAP.add_s("uid=%s,ou=users,%s" % (self.name,self.rootdn), ldif) self.update_adminsudo() signals.emit("users", "post_add", self)
def update(self, newpasswd=""): try: ldif = conns.LDAP.search_s("uid=%s,ou=users,%s" % (self.name,self.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None) except ldap.NO_SUCH_OBJECT: raise Exception("This user does not exist") self.first_name = str(self.first_name) self.last_name = str(self.last_name) self.domain = str(self.domain) ldif = ldif[0][1] attrs = { "givenName": self.first_name, "sn": self.last_name, "displayName": "%s %s" % (self.first_name, self.last_name), "cn": "%s %s" % (self.first_name, self.last_name), "mail": self.mail } if newpasswd: attrs["userPassword"] = hashpw(newpasswd, "crypt") signals.emit("users", "pre_update", self) nldif = ldap.modlist.modifyModlist(ldif, attrs, ignore_oldexistent=1) conns.LDAP.modify_ext_s("uid=%s,ou=users,%s" % (self.name,self.rootdn), nldif) self.update_adminsudo() signals.emit("users", "post_update", self)
def mount(self, passwd=None): if self.mountpoint and os.path.ismount(self.mountpoint): raise Exception("Virtual disk already mounted") signals.emit("filesystems", "pre_mount", self) if not os.path.isdir(os.path.join("/media", self.id)): os.makedirs(os.path.join("/media", self.id)) mount_point = self.mountpoint if self.mountpoint else os.path.join("/media", self.id) # Find a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) if self.crypt and passwd: # If it's an encrypted virtual disk, decrypt first then mount s = crypto.luks_open(loop.device, self.id, passwd) if s != 0: loop.unmount() raise Exception("Failed to decrypt %s with errno %s" % (self.id, str(s))) s = libc.mount(ctypes.c_char_p(os.path.join("/dev/mapper", self.id)), ctypes.c_char_p(mount_point), ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p("")) if s == -1: crypto.luks_close(self.id) loop.unmount() raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno()))) elif self.crypt and not passwd: raise Exception("Must provide password to decrypt encrypted container") else: s = libc.mount(ctypes.c_char_p(loop.device), ctypes.c_char_p(mount_point), ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p("")) if s == -1: loop.unmount() raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno()))) signals.emit("filesystems", "post_mount", self) self.mountpoint = mount_point
def create(self, mount=False): vdisk_dir = config.get("filesystems", "vdisk_dir") if not os.path.exists(os.path.join(config.get("filesystems", "vdisk_dir"))): os.mkdir(os.path.join(config.get("filesystems", "vdisk_dir"))) self.path = str(os.path.join(vdisk_dir, self.id+".img")) if os.path.exists(self.path): raise Exception("This virtual disk already exists") signals.emit("filesystems", "pre_add", self) # Create an empty file matching disk size with open(self.path, "wb") as f: written = 0 with file("/dev/zero", "r") as zero: while self.size > written: written += 1024 f.write(zero.read(1024)) # Get a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Make a filesystem s = shell("mkfs.ext4 %s" % loop.device) if s["code"] != 0: raise Exception("Failed to format loop device: %s" % s["stderr"]) loop.unmount() signals.emit("filesystems", "pre_add", self) if mount: self.mount()
def install_updates(message=DefaultMessage()): updates = storage.updates.get("updates") if not updates: return signals.emit("updates", "pre_install") amount = len(updates) responses, ids = [], [] for z in enumerate(updates): message.update("info", "%s of %s..." % (z[0]+1, amount), head="Installing updates") for x in sorted(z[1]["tasks"], key=lambda y: y["step"]): getout = False if x["unit"] == "shell": s = shell(x["order"], stdin=x.get("data", None)) if s["code"] != 0: responses.append((x["step"], s["stderr"])) getout = True break elif x["unit"] == "fetch": try: download(x["order"], x["data"], True) except Exception, e: code = 1 if hasattr(e, "code"): code = e.code responses.append((x["step"], str(code))) getout = True break else: ids.append(z[1]["id"]) config.set("updates", "current_update", z[1]["id"]) config.save() continue message.complete("error", "Installation of update %s failed. See logs for details." % str(z[1]["id"])) print responses break
def add(self): """Add network connection.""" signals.emit("networks", "pre_add", self) with open(os.path.join("/etc/netctl", self.id), "w") as f: is_wireless = self.config.get("connection") == "wireless" is_static = self.config.get("addressing") == "static" f.write("# automatically generated by arkOS\n") if self.config.get("connection"): f.write("Connection=\"" + self.config["connection"] + "\"\n") if self.config.get("description"): f.write("Description=\"" + self.config["description"] + "\"\n") if self.config.get("interface"): f.write("Interface=\"" + self.config["interface"] + "\"\n") if self.config.get("security") and is_wireless: f.write("Security=\"" + self.config["security"] + "\"\n") if self.config.get("essid") and is_wireless: f.write("ESSID=\"" + self.config["essid"] + "\"\n") if self.config.get("addressing"): f.write("IP=\"" + self.config["addressing"] + "\"\n") if self.config.get("address") and is_static: f.write("Address=(\"" + self.config["address"] + "\")\n") if self.config.get("gateway") and is_static: f.write("Gateway=\"" + self.config["gateway"] + "\"\n") if self.config.get("key") and is_wireless: f.write("Key=\"" + self.config["key"] + "\"\n") signals.emit("networks", "post_add", self)
def regen_fw(data, range=[]): # Regenerate our chain. # If local ranges are not provided, get them. signals.emit("security", "pre_fw_regen") flush_fw() default_range = range or network.get_active_ranges() # For each policy in the system, add a rule for x in data: range = x.allowed_ranges if hasattr(x, "allowed_ranges") else default_range for port in x.ports: if x.policy == 2: add_fw(port[0], port[1], ["anywhere"]) elif x.policy == 1: add_fw(port[0], port[1], range) else: remove_fw(port[0], port[1]) # Create our app chain table = iptc.Table(iptc.Table.FILTER) chain = iptc.Chain(table, "arkos-apps") rule = iptc.Rule() target = iptc.Target(rule, "RETURN") rule.target = target chain.append_rule(rule) save_fw() signals.emit("security", "post_fw_regen")
def remove(self, *args, **kwargs): """Unmount a file share.""" signals.emit("shares", "pre_umount", self) self.umount() if self.id in storage.mounts: del storage.mounts[self.id] signals.emit("shares", "post_umount", self)
def remove(self, *args, **kwargs): """Remove a file share.""" signals.emit("shares", "pre_remove", self) self.remove_share() if self.id in storage.shares: del storage.shares[self.id] signals.emit("shares", "post_remove", self)
def flush_fw(): # Flush out our chain signals.emit("security", "fw_flush") table = iptc.Table(iptc.Table.FILTER) chain = iptc.Chain(table, "arkos-apps") if table.is_chain(chain): chain.flush()
def assign(self, assign): """ Assign a TLS certificate to a website or service. :param dict assign: ``Assign`` object to assign :returns: self """ signals.emit("certificates", "pre_assign", (self, assign)) nginx_reload = False if assign["type"] == "genesis": config.set("genesis", "cert_file", self.cert_path) config.set("genesis", "cert_key", self.key_path) config.set("genesis", "ssl", True) config.save() self.assigns.append(assign) elif assign["type"] == "website": w = websites.get(assign["id"]) w.cert = self w.ssl_enable() self.assigns.append(assign) nginx_reload = True else: d = applications.get(assign["aid"]).ssl_enable(self, assign["sid"]) self.assigns.append(d) if nginx_reload: websites.nginx_reload() signals.emit("certificates", "post_assign", (self, assign)) return self
def unassign(self, assign): """ Unassign a TLS certificate from a website or service. :param dict assign: ``Assign`` object to unassign :returns: self """ signals.emit("certificates", "pre_unassign", (self, assign)) nginx_reload = False if assign["type"] == "website": websites.get(assign["id"]).ssl_disable() self.assigns.remove(assign) nginx_reload = True elif assign["type"] == "genesis": config.set("genesis", "cert_file", "") config.set("genesis", "cert_key", "") config.set("genesis", "ssl", False) config.save() self.assigns.remove(assign) else: applications.get(assign["aid"]).ssl_disable(assign["sid"]) self.assigns.remove(assign) if nginx_reload: websites.nginx_reload() signals.emit("certificates", "post_unassign", (self, assign)) return None
def edit(self, newname=""): site_dir = config.get("websites", "site_dir") block = nginx.loadf(os.path.join("/etc/nginx/sites-available", self.id)) # If SSL is enabled and the port is changing to 443, create the port 80 redirect server = block.servers[0] if self.cert and self.port == 443: for x in block.servers: if x.filter("Key", "listen")[0].value == "443 ssl": server = x if self.port != 443: for x in block.servers: if not "ssl" in x.filter("Key", "listen")[0].value \ and x.filter("key", "return"): block.remove(x) elif self.port == 443: block.add(nginx.Server( nginx.Key("listen", "80"), nginx.Key("server_name", self.addr), nginx.Key("return", "301 https://%s$request_uri"%self.addr) )) # If the name was changed... if newname and self.id != newname: # rename the folder and files... if self.path.endswith("_site"): self.path = os.path.join(site_dir, newname, "_site") elif self.path.endswith("htdocs"): self.path = os.path.join(site_dir, newname, "htdocs") else: self.path = os.path.join(site_dir, newname) self.path = self.path.encode("utf-8") if os.path.exists(self.path): shutil.rmtree(self.path) self.nginx_disable(reload=False) shutil.move(os.path.join(site_dir, self.id), self.path) os.unlink(os.path.join("/etc/nginx/sites-available", self.id)) signals.emit("websites", "site_removed", self) self.id = newname # then update the site's arkOS metadata file with the new name meta = ConfigParser.SafeConfigParser() meta.read(os.path.join(self.path, ".arkos")) meta.set("website", "id", self.id) with open(os.path.join(self.path, ".arkos"), "w") as f: meta.write(f) self.nginx_enable(reload=False) # Pass any necessary updates to the nginx serverblock and save server.filter("Key", "listen")[0].value = str(self.port)+" ssl" if self.cert else str(self.port) server.filter("Key", "server_name")[0].value = self.addr server.filter("Key", "root")[0].value = self.path server.filter("Key", "index")[0].value = "index.php" if hasattr(self, "php") and self.php else "index.html" nginx.dumpf(block, os.path.join("/etc/nginx/sites-available", self.id)) # Call the site's edited hook, if it has one, then reload nginx signals.emit("websites", "site_loaded", self) if hasattr(self, "site_edited"): self.site_edited() nginx_reload()
def _install(self, install_deps, load, force, cry, nthread): if self.installed and not force: return signals.emit("apps", "pre_install", self) # Get all apps that this app depends on and install them first deps = get_dependent(self.id, "install") if install_deps and deps: for x in deps: msg = "Installing dependencies for {0}... ({1})" nthread.update( Notification("info", "Apps", msg.format(self.name, x)) ) _install(x, load=load, cry=cry) # Install this app msg = "Installing {0}...".format(self.name) nthread.update(Notification("info", "Apps", msg)) _install(self.id, load=load, cry=cry) ports = [] for s in self.services: if s.get("default_policy", 0) and s["ports"]: ports.append(s["ports"]) if ports and config.get("general", "enable_upnp"): tracked_services.open_all_upnp(ports) verify_app_dependencies() smsg = "{0} installed successfully.".format(self.name) nthread.complete(Notification("success", "Apps", smsg)) signals.emit("apps", "post_install", self)
def mount(self, passwd=None): if self.mountpoint and os.path.ismount(self.mountpoint): raise Exception("Disk partition already mounted") elif self.fstype == "Unknown": raise Exception("Cannot mount a partition of unknown type") signals.emit("filesystems", "pre_mount", self) mount_point = self.mountpoint if self.mountpoint else os.path.join("/media", self.id) if self.crypt and passwd: # Decrypt the disk first if it's an encrypted disk s = crypto.luks_open(self.path, self.id, passwd) if s != 0: raise Exception("Failed to decrypt %s with errno %s" % (self.id, str(s))) s = libc.mount(ctypes.c_char_p(os.path.join("/dev/mapper", self.id)), ctypes.c_char_p(mount_point), ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p("")) if s == -1: crypto.luks_close(self.id) raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno()))) elif self.crypt and not passwd: raise Exception("Must provide password to decrypt encrypted disk") else: s = libc.mount(ctypes.c_char_p(self.path), ctypes.c_char_p(mount_point), ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p("")) if s == -1: raise Exception("Failed to mount %s: %s"%(self.id, os.strerror(ctypes.get_errno()))) signals.emit("filesystems", "post_mount", self) self.mountpoint = mount_point
def ssl_disable(self, sid=""): signals.emit("apps", "pre_ssl_disable", self) if sid: self.ssl.ssl_disable(sid) else: self.ssl.ssl_disable() signals.emit("apps", "post_ssl_disable", self)
def install(self, extra_vars={}, enable=True, message=None): # Set metadata values site_dir = config.get("websites", "site_dir") self.path = self.path.encode("utf-8") or os.path.join(site_dir, self.id).encode("utf-8") try: os.makedirs(self.path) except: pass # If extra data is passed in, set up the serverblock accordingly if extra_vars: if not extra_vars.get("type") or not extra_vars.get("pass"): raise Exception("Must enter ReverseProxy type and location to pass to") elif extra_vars.get("type") in ["fastcgi", "uwsgi"]: self.block = [nginx.Location(extra_vars.get("lregex", "/"), nginx.Key("%s_pass"%extra_vars.get("type"), "%s"%extra_vars.get("pass")), nginx.Key("include", "%s_params"%extra_vars.get("type")) )] else: self.block = [nginx.Location(extra_vars.get("lregex", "/"), nginx.Key("proxy_pass", "%s"%extra_vars.get("pass")), nginx.Key("proxy_redirect", "off"), nginx.Key("proxy_buffering", "off"), nginx.Key("proxy_set_header", "Host $host") )] if extra_vars.get("xrip"): self.block[0].add(nginx.Key("proxy_set_header", "X-Real-IP $remote_addr")) if extra_vars.get("xff") == "1": self.block[0].add(nginx.Key("proxy_set_header", "X-Forwarded-For $proxy_add_x_forwarded_for")) # Create the nginx serverblock and arkOS metadata files block = nginx.Conf() server = nginx.Server( nginx.Key("listen", self.port), nginx.Key("server_name", self.addr), nginx.Key("root", self.base_path or self.path), ) server.add(*[x for x in self.block]) block.add(server) nginx.dumpf(block, os.path.join("/etc/nginx/sites-available", self.id)) meta = ConfigParser.SafeConfigParser() meta.add_section("website") meta.set("website", "id", self.id) meta.set("website", "name", self.name) meta.set("website", "type", "ReverseProxy") meta.set("website", "extra", self.type) meta.set("website", "version", "None") meta.set("website", "ssl", self.cert.id if hasattr(self, "cert") and self.cert else "None") with open(os.path.join(self.path, ".arkos"), "w") as f: meta.write(f) # Track port and reload daemon self.meta = None self.installed = True storage.sites.add("sites", self) signals.emit("websites", "site_installed", self) self.nginx_enable()
def remove(self): """Delete domain.""" if self.name in [x.domain for x in users.get()]: emsg = "A user is still using this domain" raise errors.InvalidConfigError(emsg) signals.emit("domains", "pre_remove", self) conns.LDAP.delete_s(self.ldap_id) signals.emit("domains", "post_remove", self)
def disconnect(self): signals.emit("networks", "pre_disconnect", self) s = shell("netctl stop %s" % self.id) if s["code"] == 0: self.connected = False signals.emit("networks", "post_disconnect", self) else: raise Exception("Network disconnection failed")
def ssl_enable(self, cert, sid=""): signals.emit("apps", "pre_ssl_enable", self) if sid: d = self.ssl.ssl_enable(cert, sid) else: self.ssl.ssl_enable(cert) signals.emit("apps", "post_ssl_enable", self) return d
def disconnect(self): """Disconnect from network.""" signals.emit("networks", "pre_disconnect", self) s = shell("netctl stop {0}".format(self.id)) if s["code"] == 0: self.connected = False signals.emit("networks", "post_disconnect", self) else: raise errors.OperationFailedError("Network disconnection failed")
def uninstall(self, force=False, nthread=NotificationThread()): """ Uninstall the arkOS application from the system. :param bool force: Uninstall the app even if others depend on it? :param NotificationThread nthread: notification thread to use """ signals.emit("apps", "pre_remove", self) msg = "Uninstalling application..." nthread.update(Notification("info", "Apps", msg)) exclude = ["openssl", "openssh", "nginx", "python2", "git", "nodejs", "npm"] # Make sure this app can be successfully removed, and if so also remove # any system-level packages that *only* this app requires for x in get(installed=True): for item in x.dependencies: if item["type"] == "app" and item["package"] == self.id \ and not force: exc_str = "{0} depends on this application" raise errors.InvalidConfigError(exc_str.format(x.name)) elif item["type"] == "system": exclude.append(item["package"]) # Stop any running services associated with this app for item in self.dependencies: if item["type"] == "system" and not item["package"] in exclude: if item.get("daemon"): try: services.get(item["daemon"]).stop() services.get(item["daemon"]).disable() except: pass pacman.remove([item["package"]], purge=config.get("apps", "purge")) # Remove the app's directory and cleanup the app object shutil.rmtree(os.path.join(config.get("apps", "app_dir"), self.id)) self.loadable = False self.installed = False # Regenerate the firewall and re-block the abandoned ports regen_fw = False for x in self.services: if x["ports"]: regen_fw = True if regen_fw: tracked_services.deregister(self.id) ports = [] for s in self.services: if s.get("default_policy", 0) and s["ports"]: ports.append(s["ports"]) if ports and config.get("general", "enable_upnp"): tracked_services.close_all_upnp(ports) smsg = "{0} uninstalled successfully".format(self.name) nthread.complete(Notification("success", "Apps", smsg)) signals.emit("apps", "post_remove", self)
def set_timezone(region, zone=None): if zone and not zone in ["GMT", "UTC"]: zonepath = os.path.join("/usr/share/zoneinfo", region, zone) else: zonepath = os.path.join("/usr/share/zoneinfo", region) if os.path.exists("/etc/localtime"): os.remove("/etc/localtime") os.symlink(zonepath, "/etc/localtime") signals.emit("config", "tz_changed", (region, zone))
def remove(self, message=None): shutil.rmtree(self.path) self.nginx_disable(reload=True) try: os.unlink(os.path.join("/etc/nginx/sites-available", self.id)) except: pass storage.sites.remove("sites", self) signals.emit("websites", "site_removed", self)
def set_timezone(region, zone=None): """Set system timezone.""" if zone and zone not in ["GMT", "UTC"]: zonepath = os.path.join("/usr/share/zoneinfo", region, zone) else: zonepath = os.path.join("/usr/share/zoneinfo", region) if os.path.exists("/etc/localtime"): os.remove("/etc/localtime") os.symlink(zonepath, "/etc/localtime") signals.emit("config", "tz_changed", (region, zone))
def set_datetime(ut=0): # Sets system time from provided Unix timestamp (or current time via NTP) ut = int(ut) if ut else int(get_idatetime()) librt = ctypes.CDLL(ctypes.util.find_library("rt"), use_errno=True) ts = timespec() ts.tv_sec, ts.tv_nsec = ut, 0 res = librt.clock_settime(0, ctypes.byref(ts)) if res == -1: raise Exception("Could not set time: %s" % os.strerror(ctypes.get_errno())) signals.emit("config", "time_changed", ut)
def remove(self): signals.emit("certificates", "pre_remove", self) for x in self.assigns: self.unassign(x) if os.path.exists(self.cert_path): os.unlink(self.cert_path) if os.path.exists(self.key_path): os.unlink(self.key_path) storage.certs.remove("certificates", self) signals.emit("certificates", "post_remove", self)
def connect(self): signals.emit("networks", "pre_connect", self) for x in get_connections(iface=self.config.get("interface")): x.disconnect() s = shell("netctl start %s" % self.id) if s["code"] == 0: self.connected = True signals.emit("networks", "post_connect", self) else: raise Exception("Network connection failed")
def backup(self, data=True, backup_location=""): if not backup_location: backup_location = config.get("backups", "location", "/var/lib/arkos/backups") if self.ctype == "site": self.version = self.site.meta.version signals.emit("backups", "pre_backup", self) # Trigger the pre-backup hook for the app/site if self.ctype == "site": self.pre_backup(self.site) else: self.pre_backup() # Create backup directory in storage backup_dir = os.path.join(backup_location, self.id) try: os.makedirs(backup_dir) except: pass # Gather config and data file paths to archive myconfig = self._get_config() data = self._get_data() if data else [] timestamp = systemtime.get_serial_time() isotime = systemtime.get_iso_time(timestamp) path = os.path.join(backup_dir, "%s-%s.tar.gz" % (self.id,timestamp)) # Zip up the gathered file paths with tarfile.open(path, "w:gz") as t: for f in myconfig+data: for x in glob.glob(f): t.add(x) if self.ctype == "site" and self.site.db: dbsql = StringIO.StringIO(self.site.db.dump()) dinfo = tarfile.TarInfo(name="/%s.sql"%self.site.id) dinfo.size = len(dbsql.buf) t.addfile(tarinfo=dinfo, fileobj=dbsql) # Create a metadata file to track information info = {"pid": self.id, "type": self.ctype, "icon": self.icon, "version": self.version, "time": isotime} if self.site: info["site_type"] = self.site.meta.id with open(os.path.join(backup_dir, "%s-%s.meta" % (self.id,timestamp)), "w") as f: f.write(json.dumps(info)) # Trigger post-backup hook for the app/site if self.ctype == "site": self.post_backup(self.site) else: self.post_backup() signals.emit("backups", "post_backup", self) return {"id": self.id+"/"+timestamp, "pid": self.id, "path": path, "icon": self.icon, "type": self.ctype, "time": isotime, "version": self.version, "size": os.path.getsize(path), "site_type": self.site.meta.id if self.site else None, "is_ready": True}
def delete(self, delete_home=True): signals.emit("users", "pre_remove", self) self.admin, self.sudo = False, False self.update_adminsudo() if delete_home: hdir = conns.LDAP.search_s("uid=%s,ou=users,%s" % (self.name,self.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", ["homeDirectory"])[0][1]["homeDirectory"][0] if os.path.exists(hdir): shutil.rmtree(hdir) conns.LDAP.delete_s("uid=%s,ou=users,%s" % (self.name,self.rootdn)) signals.emit("users", "post_remove", self)
def add(self): """ Add a database. Calls the function declared in the subclass to execute actions as per that application's needs. """ signals.emit("databases", "pre_add", self) self.add_db() storage.databases[self.id] = self signals.emit("databases", "post_add", self)
def umount(self): signals.emit("filesystems", "pre_umount", self) if not self.mountpoint: return s = libc.umount2(ctypes.c_char_p(self.mountpoint), 0) if s == -1: raise Exception("Failed to unmount %s: %s"%(self.id, os.strerror(ctypes.get_errno()))) if self.crypt: crypto.luks_close(self.id) signals.emit("filesystems", "post_umount", self) self.mountpoint = None
def connect(self): """Connect to network.""" signals.emit("networks", "pre_connect", self) for x in get_connections(iface=self.config.get("interface")): x.disconnect() s = shell("netctl start {0}".format(self.id)) if s["code"] == 0: self.connected = True signals.emit("networks", "post_connect", self) else: raise errors.OperationFailedError("Network connection failed")
def add(self): try: ldif = conns.LDAP.search_s("virtualdomain=%s,ou=domains,%s" % (self.name,self.rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", None) raise Exception("This domain is already present here") except ldap.NO_SUCH_OBJECT: pass ldif = {"virtualdomain": self.name, "objectClass": ["mailDomain", "top"]} signals.emit("domains", "pre_add", self) conns.LDAP.add_s("virtualdomain=%s,ou=domains,%s" % (self.name,self.rootdn), ldap.modlist.addModlist(ldif)) signals.emit("domains", "post_add", self)
def ssl_disable(self, sid=""): """ Disable TLS on the selected application and service. :param str sid: ID for the associated app's service to disable TLS on. """ signals.emit("apps", "pre_ssl_disable", self) if sid: self.ssl.ssl_disable(sid) else: self.ssl.ssl_disable() signals.emit("apps", "post_ssl_disable", self)
def remove(self): """ Remove a database. Calls the function declared in the subclass to execute actions as per that application's needs. """ signals.emit("databases", "pre_remove", self) self.remove_db() if self.id in storage.databases: del storage.databases[self.id] signals.emit("databases", "post_remove", self)
def add(self, enable=True): signals.emit("services", "pre_add", self) title = "program:%s" % self.name c = ConfigParser.RawConfigParser() c.add_section(title) for x in self.cfg: c.set(title, x, self.cfg[x]) with open(os.path.join("/etc/supervisor.d", self.name+".ini"), "w") as f: c.write(f) if enable: self.enable() signals.emit("services", "post_add", self)
def scan(verify=True): signals.emit("apps", "pre_scan") app_dir = config.get("apps", "app_dir") apps = [] if not os.path.exists(app_dir): os.makedirs(app_dir) # Get paths for installed apps, metadata for available ones installed_apps = [x for x in os.listdir(app_dir) if not x.startswith(".")] available_apps = api("https://%s/api/v1/apps" % config.get("general", "repo_server"), crit=False) if available_apps: available_apps = available_apps["applications"] else: available_apps = [] # Create objects for installed apps with appropriate metadata for x in installed_apps: try: with open(os.path.join(app_dir, x, "manifest.json"), "r") as f: data = json.loads(f.read()) except ValueError: logger.warn("Failed to load %s due to a JSON parsing error" % x) continue except IOError: logger.warn("Failed to load %s: manifest file inaccessible or not present" % x) continue logger.debug(" *** Loading %s" % data["id"]) app = App(**data) app.installed = True for y in enumerate(available_apps): if app.id == y[1]["id"] and app.version != y[1]["version"]: app.upgradable = y[1]["version"] if app.id == y[1]["id"]: app.assets = y[1]["assets"] available_apps[y[0]]["installed"] = True app.load() apps.append(app) # Convert available apps payload to objects for x in available_apps: if not x.get("installed"): app = App(**x) app.installed = False apps.append(app) storage.apps.set("applications", apps) if verify: verify_app_dependencies() signals.emit("apps", "post_scan") return storage.apps.get("applications")
def add(self, enable=True): """Add a new Supervisor service.""" signals.emit("services", "pre_add", self) title = "program:{0}".format(self.name) c = configparser.RawConfigParser() c.add_section(title) for x in self.cfg: c.set(title, x, self.cfg[x]) with open(os.path.join("/etc/supervisor.d", self.sfname), "w") as f: c.write(f) if enable: self.enable() signals.emit("services", "post_add", self)
def load(self, verify=True): try: signals.emit("apps", "pre_load", self) if verify: self.verify_dependencies() # Load the application module into Python imp.load_module(self.id, *imp.find_module(self.id, [os.path.join(config.get("apps", "app_dir"))])) # Get module and its important classes and track them on this object for module in self.modules: submod = imp.load_module("%s.%s" % (self.id, module), *imp.find_module(module, [os.path.join(config.get("apps", "app_dir"), self.id)])) classes = inspect.getmembers(submod, inspect.isclass) mgr = None for y in classes: if y[0] in ["DatabaseManager", "Site", "BackupController"]: mgr = y[1] break logger.debug(" *** Registering %s module on %s" % (module, self.id)) if module == "database": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_database_mgr", y[1]) elif module == "website": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_website", y[1]) elif module == "backup": for y in classes: if issubclass(y[1], mgr) and y[1] != mgr: setattr(self, "_backup", y[1]) elif module == "api": if hasattr(self, "_backend"): setattr(submod, self.id, self._backend) setattr(self, "_api", submod) elif module == "ssl": self.ssl = submod else: setattr(self, "_%s" % module, submod) # Set up tracking of ports associated with this app for s in self.services: if s["ports"]: tracked_services.register(self.id, s["binary"], s["name"], self.icon, s["ports"], default_policy=s.get("default_policy", 2), fw=False) signals.emit("apps", "post_load", self) except Exception, e: self.loadable = False self.error = "Module error: %s" % str(e) logger.warn("Failed to load %s -- %s" % (self.name, str(e)))
def generate_certificate( id, domain, country, state="", locale="", email="", keytype="RSA", keylength=2048, message=DefaultMessage()): signals.emit("certificates", "pre_add", id) # Check to see that we have a CA ready; if not, generate one basehost = ".".join(domain.split(".")[-2:]) ca = get_authorities(id=basehost) if not ca: message.update("info", "Generating certificate authority...") ca = generate_authority(basehost) with open(ca.cert_path, "r") as f: ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read()) with open(ca.key_path, "r") as f: ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, f.read()) # Check to see that we have DH params, if not then do that too if not os.path.exists("/etc/arkos/ssl/dh_params.pem"): message.update("info", "Generating Diffie-Hellman parameters...") s = shell("openssl dhparam 2048 -out /etc/arkos/ssl/dh_params.pem") if s["code"] != 0: raise Exception("Failed to generate Diffie-Hellman parameters") os.chown("/etc/arkos/ssl/dh_params.pem", -1, gid) os.chmod("/etc/arkos/ssl/dh_params.pem", 0750) # Generate private key and create X509 certificate, then set options message.update("info", "Generating certificate...") kt = OpenSSL.crypto.TYPE_DSA if keytype == "DSA" else OpenSSL.crypto.TYPE_RSA try: key = OpenSSL.crypto.PKey() key.generate_key(kt, keylength) crt = OpenSSL.crypto.X509() crt.set_version(3) crt.get_subject().C = country crt.get_subject().CN = domain if state: crt.get_subject().ST = state if locale: crt.get_subject().L = locale if email: crt.get_subject().emailAddress = email crt.get_subject().O = "arkOS Servers" crt.set_serial_number(int(systemtime.get_serial_time())) crt.gmtime_adj_notBefore(0) crt.gmtime_adj_notAfter(2*365*24*60*60) crt.set_issuer(ca_cert.get_subject()) crt.set_pubkey(key) crt.sign(ca_key, "sha256") except Exception, e: raise Exception("Error generating self-signed certificate: "+str(e))
def umount(self): """Unmount partition.""" signals.emit("filesystems", "pre_umount", self) if not self.mountpoint: return s = libc.umount2(ctypes.c_char_p(b(self.mountpoint)), 0) if s == -1: excmsg = "Failed to unmount {0}: {1}" raise errors.OperationFailedError( excmsg.format(self.id, os.strerror(ctypes.get_errno()))) if self.crypt: crypto.luks_close(b(self.id)) signals.emit("filesystems", "post_umount", self) self.mountpoint = None
def update(self): """Update a group object in LDAP. Change params on the object first.""" try: ldif = conns.LDAP.search_s(self.ldap_id, ldap.SCOPE_SUBTREE, "(objectClass=*)", None) except ldap.NO_SUCH_OBJECT: raise errors.InvalidConfigError("This group does not exist") ldif = ldap.modlist.modifyModlist( ldif[0][1], {"memberUid": [b(u) for u in self.users]}, ignore_oldexistent=1) signals.emit("groups", "pre_update", self) conns.LDAP.modify_s(self.ldap_id, ldif) signals.emit("groups", "post_update", self)
def add(self): """Add the domain to LDAP.""" try: ldif = conns.LDAP.search_s(self.ldap_id, ldap.SCOPE_SUBTREE, "(objectClass=*)", None) emsg = "This domain is already present here" raise errors.InvalidConfigError(emsg) except ldap.NO_SUCH_OBJECT: pass ldif = {"virtualdomain": [b(self.name)], "objectClass": [b"mailDomain", b"top"]} signals.emit("domains", "pre_add", self) conns.LDAP.add_s(self.ldap_id, ldap.modlist.addModlist(ldif)) signals.emit("domains", "post_add", self)
def create(self, mount=False, will_crypt=False, nthread=NotificationThread()): """ Create virtual disk image. :param bool mount: Mount after creation? :param bool will_crypt: Will this disk be encrypted later? :param NotificationThread nthread: notification thread to use """ nthread.title = "Creating virtual disk" vdisk_dir = config.get("filesystems", "vdisk_dir") if not os.path.exists(vdisk_dir): os.mkdir(vdisk_dir) self.path = str(os.path.join(vdisk_dir, self.id + ".img")) if os.path.exists(self.path): raise errors.InvalidConfigError("This virtual disk already exists") # Create an empty file matching disk size signals.emit("filesystems", "pre_add", self) msg = "Creating virtual disk..." nthread.update(Notification("info", "Filesystems", msg)) with open(self.path, "wb") as f: written = 0 with open("/dev/zero", "rb") as zero: while self.size > written: written += 1024 f.write(zero.read(1024)) if not will_crypt: # Get a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) # Make a filesystem msg = "Writing filesystem..." nthread.update(Notification("info", "Filesystems", msg)) s = shell("mkfs.ext4 {0}".format(loop.device)) if s["code"] != 0: excmsg = "Failed to format loop device: {0}" raise errors.OperationFailedError(excmsg.format(s["stderr"])) loop.unmount() msg = "Virtual disk created successfully" nthread.complete(Notification("success", "Filesystems", msg)) signals.emit("filesystems", "post_add", self) if mount: self.mount()
def mount(self, passwd=None): """ Mount partition. :param str passwd: If disk is encrypted, use this passphrase to unlock """ if self.mountpoint and os.path.ismount(self.mountpoint): raise errors.InvalidConfigError("Virtual disk already mounted") signals.emit("filesystems", "pre_mount", self) if not os.path.isdir(os.path.join("/media", self.id)): os.makedirs(os.path.join("/media", self.id)) mount_point = self.mountpoint or os.path.join("/media", self.id) luks_point = os.path.join("/dev/mapper", self.id) # Find a free loopback device and mount loop = losetup.find_unused_loop_device() loop.mount(str(self.path), offset=1048576) if self.crypt and passwd: # If it's an encrypted virtual disk, decrypt first then mount s = crypto.luks_open(loop.device, self.id, passwd) if s != 0: loop.unmount() excmsg = "Failed to decrypt {0} with errno {1}" raise errors.OperationFailedError( excmsg.format(self.id, str(s))) s = libc.mount(ctypes.c_char_p(b(luks_point)), ctypes.c_char_p(b(mount_point)), ctypes.c_char_p(b(self.fstype)), 0, ctypes.c_char_p(b"")) if s == -1: crypto.luks_close(self.id) loop.unmount() excmsg = "Failed to mount {0}: {1}" raise errors.OperationFailedError( excmsg.format(self.id, os.strerror(ctypes.get_errno()))) elif self.crypt and not passwd: excstr = "Must provide password to decrypt encrypted container" raise errors.InvalidConfigError(excstr) else: s = libc.mount(ctypes.c_char_p(b(loop.device)), ctypes.c_char_p(b(mount_point)), ctypes.c_char_p(b(self.fstype)), 0, ctypes.c_char_p(b"")) if s == -1: loop.unmount() excstr = "Failed to mount {0}: {1}" raise errors.OperationFailedError( excmsg.format(self.id, os.strerror(ctypes.get_errno()))) signals.emit("filesystems", "post_mount", self) self.mountpoint = mount_point
def remove(self, nthread=None): """ Remove reverse proxy, including prep and app recipes. :param message message: Message object to update with status """ shutil.rmtree(self.path) self.nginx_disable(reload=True) try: os.unlink(os.path.join("/etc/nginx/sites-available", self.id)) except: pass if self.id in storage.websites: del storage.websites[self.id] signals.emit("websites", "site_removed", self)
def set_datetime(ut=0): """ Set system time from provided Unix timestamp (or current time via NTP). :param int ut: Unix timestamp """ ut = int(ut) if ut else int(get_idatetime()) librt = ctypes.CDLL(ctypes.util.find_library("rt"), use_errno=True) ts = timespec() ts.tv_sec, ts.tv_nsec = ut, 0 res = librt.clock_settime(0, ctypes.byref(ts)) if res == -1: raise errors.OperationFailedError("Could not set time: {0}".format( os.strerror(ctypes.get_errno()))) signals.emit("config", "time_changed", ut)
def remove(self): signals.emit("services", "pre_remove", self) if self.stype == "supervisor": supervisor_ping() if self.state == "running": self.stop() try: os.unlink(os.path.join("/etc/supervisor.d", self.name+".ini")) os.unlink(os.path.join("/etc/supervisor.d", self.name+".ini.disabled")) except: pass self.state = "stopped" self.enabled = False conns.Supervisor.restart() signals.emit("services", "post_remove", self)
def install_updates(nthread=NotificationThread()): """ Install all available updates from arkOS repo server. :param message message: Message object to update with status """ nthread.title = "Installing updates" updates = storage.updates if not updates: return signals.emit("updates", "pre_install") amount = len(updates) responses, ids = [], [] for z in enumerate(updates.values()): msg = "{0} of {1}...".format(z[0] + 1, amount) nthread.update(Notification("info", "Updates", msg)) for x in sorted(z[1]["tasks"], key=lambda y: y["step"]): if x["unit"] == "shell": s = shell(x["order"], stdin=x.get("data", None)) if s["code"] != 0: responses.append((x["step"], s["stderr"])) break elif x["unit"] == "fetch": try: download(x["order"], x["data"], True) except Exception as e: code = getattr(e, "code", 1) responses.append((x["step"], str(code))) break else: ids.append(z[1]["id"]) config.set("updates", "current_update", z[1]["id"]) config.save() continue for x in responses: nthread.update(Notification("debug", "Updates", x)) msg = "Installation of update {0} failed. See logs for details." msg = msg.format(z[1]["id"]) nthread.complete(Notification("error", "Updates", msg)) break else: signals.emit("updates", "post_install") for x in responses: nthread.update(Notification("debug", "Updates", x)) msg = "Please restart your system for the updates to take effect." nthread.complete(Notification("success", "Updates", msg)) return ids
def delete(self, delete_home=True): """ Delete user. :param bool delete_home: Delete the user's home directory too? """ signals.emit("users", "pre_remove", self) self.admin = self.sudo = False self.update_adminsudo() if delete_home: hdir = conns.LDAP.search_s(self.ldap_id, ldap.SCOPE_SUBTREE, "(objectClass=*)", ["homeDirectory"]) hdir = hdir[0][1]["homeDirectory"][0] if os.path.exists(hdir): shutil.rmtree(hdir) conns.LDAP.delete_s(self.ldap_id) signals.emit("users", "post_remove", self)
def install(self, install_deps=True, load=True, force=False, message=DefaultMessage()): if self.installed and not force: return signals.emit("apps", "pre_install", self) # Get all apps that this app depends on and install them first deps = get_dependent(self.id, "install") if install_deps and deps: for x in deps: logger.debug("Installing %s (dependency for %s)" % (x, self.name)) message.update("info", "Installing dependencies for %s... (%s)" % (self.name, x)) _install(x, load=load) # Install this app logger.debug("Installing %s" % self.name) message.update("info", "Installing %s..." % self.name) _install(self.id, load=load) verify_app_dependencies() signals.emit("apps", "post_install", self)
def mount(self, passwd=None): """ Mount partition. :param str passwd: If disk is encrypted, use this passphrase to unlock """ if self.mountpoint and os.path.ismount(self.mountpoint): raise errors.InvalidConfigError("Disk partition already mounted") elif self.fstype == "Unknown": emsg = "Cannot mount a partition of unknown type" raise errors.InvalidConfigError(emsg) signals.emit("filesystems", "pre_mount", self) mount_point = self.mountpoint or os.path.join("/media", self.id) luks_point = os.path.join("/dev/mapper", self.id) if not os.path.isdir(mount_point): os.makedirs(mount_point) if self.crypt and passwd: # Decrypt the disk first if it's an encrypted disk s = crypto.luks_open(self.path, self.id, passwd) if s != 0: excmsg = "Failed to decrypt {0} with errno {1}" excmsg = excmsg.format(self.id, str(s)) raise errors.OperationFailedError(excmsg) s = libc.mount(ctypes.c_char_p(b(luks_point)), ctypes.c_char_p(b(mount_point)), ctypes.c_char_p(b(self.fstype)), 0, ctypes.c_char_p(b"")) if s == -1: crypto.luks_close(self.id) excmsg = "Failed to mount {0}: {1}" raise errors.OperationFailedError( excmsg.format(self.id, os.strerror(ctypes.get_errno()))) elif self.crypt and not passwd: emsg = "Must provide password to decrypt encrypted disk" raise errors.InvalidConfigError(emsg) else: s = libc.mount(ctypes.c_char_p(b(self.path)), ctypes.c_char_p(b(mount_point)), ctypes.c_char_p(b(self.fstype)), 0, ctypes.c_char_p(b"")) if s == -1: excmsg = "Failed to mount {0}: {1}" raise errors.OperationFailedError( excmsg.format(self.id, os.strerror(ctypes.get_errno()))) signals.emit("filesystems", "post_mount", self) self.mountpoint = mount_point
def initialize_fw(): signals.emit("security", "pre_fw_init") table = iptc.Table(iptc.Table.FILTER) chain = iptc.Chain(table, "INPUT") chain.flush() # Accept loopback rule = iptc.Rule() rule.in_interface = "lo" target = iptc.Target(rule, "ACCEPT") rule.target = target chain.append_rule(rule) # Accept designated apps app_chain = iptc.Chain(table, "arkos-apps") if not table.is_chain(app_chain): table.create_chain(app_chain) rule = iptc.Rule() target = iptc.Target(rule, "arkos-apps") rule.target = target chain.append_rule(rule) # Allow ICMP (ping) rule = iptc.Rule() rule.protocol = "icmp" target = iptc.Target(rule, "ACCEPT") rule.target = target match = iptc.Match(rule, "icmp") match.icmp_type = "echo-request" chain.append_rule(rule) # Accept established/related connections rule = iptc.Rule() target = iptc.Target(rule, "ACCEPT") rule.target = target match = iptc.Match(rule, "conntrack") match.ctstate = "ESTABLISHED,RELATED" chain.append_rule(rule) # Reject all else by default rule = iptc.Rule() target = iptc.Target(rule, "DROP") rule.target = target chain.append_rule(rule) save_fw() signals.emit("security", "post_fw_init")
def restore(self, data): signals.emit("backups", "pre_restore", self) # Trigger pre-restore hook for the app/site self.pre_restore() # Extract all files in archive sitename = "" with tarfile.open(data["path"], "r:gz") as t: for x in t.getnames(): if x.startswith("etc/nginx/sites-available"): sitename = os.path.basename(x) t.extractall("/") # If it's a website that had a database, restore DB via SQL file too dbpasswd = "" if self.ctype == "site" and sitename: self.site = websites.get(sitename) if not self.site: websites.scan() self.site = websites.get(sitename) meta = ConfigParser.SafeConfigParser() meta.read(os.path.join(self.site.path, ".arkos")) if meta.get("website", "dbengine", None) and os.path.exists("/%s.sql"%sitename): dbmgr = databases.get_managers(meta.get("website", "dbengine")) if databases.get(sitename): databases.get(sitename).remove() db = dbmgr.add_db(sitename) with open("/%s.sql"%sitename, "r") as f: db.execute(f.read()) os.unlink("/%s.sql"%sitename) if dbmgr.meta.database_multiuser: dbpasswd = random_string()[0:16] if databases.get_user(sitename): databases.get_user(sitename).remove() db_user = dbmgr.add_user(sitename, dbpasswd) db_user.chperm("grant", db) # Trigger post-restore hook for the app/site if self.ctype == "site": self.post_restore(self.site, dbpasswd) self.site.nginx_enable() else: self.post_restore() signals.emit("backups", "post_restore", self) data["is_ready"] = True return data
def remove(self): """Remove supervisor service.""" signals.emit("services", "pre_remove", self) if self.stype == "supervisor": supervisor_ping() if self.state == "running": self.stop() try: disfsname = "{0}.disabled".format(self.sfname) os.unlink(os.path.join("/etc/supervisor.d", self.sfname)) os.unlink(os.path.join("/etc/supervisor.d", disfsname)) except: pass self.state = "stopped" self.enabled = False conns.Supervisor.restart() signals.emit("services", "post_remove", self)
def ssl_enable(self, cert, sid=""): """ Enable TLS on the selected application and service. The accompanying service ID is forwarded to the app-specific code to act as an identifier for which internal service is being specified. Ex. the XMPP plugin uses the domain name (xmpp.example.com) as ``sid``. :param Certificate cert: Certificate object to enable TLS with. :param str sid: ID for the associated app's service to enable TLS on. """ signals.emit("apps", "pre_ssl_enable", self) if sid: d = self.ssl.ssl_enable(cert, sid) else: self.ssl.ssl_enable(cert) signals.emit("apps", "post_ssl_enable", self) return d
def _remove(self, nthread): nthread.title = "Removing website" # Call site type's pre-removal hook msg = "Running pre-removal. This may take a few minutes..." nthread.update(Notification("info", "Webs", msg)) self.pre_remove() # Remove source directories msg = "Cleaning directories..." nthread.update(Notification("info", "Webs", msg)) if os.path.islink(self.path): os.unlink(self.path) else: shutil.rmtree(self.path) # If there's a database, get rid of that too if self.db: msg = "Removing database..." nthread.update(Notification("info", "Webs", msg)) if self.db.manager.meta.database_multiuser: db_user = databases.get_users(self.db.id) if db_user: db_user.remove() self.db.remove() self.nginx_disable(reload=True) try: os.unlink(os.path.join("/etc/nginx/sites-available", self.id)) except: pass # Call site type's post-removal hook msg = "Running post-removal..." nthread.update(Notification("info", "Webs", msg)) self.post_remove() create_acme_dummy(self.domain) if self.id in storage.websites: del storage.websites[self.id] signals.emit("websites", "site_removed", self) msg = "{0} site removed successfully".format(self.app.name) nthread.complete(Notification("success", "Webs", msg))