def restartSquid(self): """ Compile the blacklist database and reload squid. """ r = AF().log(PLUGIN_NAME, AA.PROXY_RESTART_SQUID) mmctools.shlaunch(self.config.sgBinary + ' -C ' + self.config.sgBlacklist) mmctools.shlaunch("chown " + self.config.squidGroup + "." + self.config.squidUser + " " + self.config.sgBlacklist + "*") psout = os.popen(self.config.squidReload, 'r') read = psout.read() if psout.close(): read = "error reloading squid" r.commit() return read
def log(self, service="", filter=""): service = service.replace(".service", "", 1) result = [] service_filter = "" fields = ("PRIORITY", "_HOSTNAME", "TIMESTAMP", "_UID", "_GID", "_PID", "MESSAGE", "_SYSTEMD_UNIT") if service: service_filter += '_SYSTEMD_UNIT=%s.service' % service code, out, err = shlaunch( '%s -n 500 -o json %s' % (self.config.journalctl_path, service_filter)) out = json.loads("".join(out)) for message in out: if "MESSAGE" in message and isinstance(message["MESSAGE"], basestring): if "_SOURCE_REALTIME_TIMESTAMP" in message: message["TIMESTAMP"] = int( int(message["_SOURCE_REALTIME_TIMESTAMP"]) / 1000000) else: message["TIMESTAMP"] = False # remove unneeded fields for key, value in message.copy().items(): if key not in fields: del message[key] if filter and any(filter in str(v) for k, v in message.items()): result.append(message) if not filter: result.append(message) return result[::-1]
def getSmbStatus(self): """ Return SAMBA shares connection status """ code, output, err = shlaunch('/usr/bin/net status shares parseable') service = {} for line in output: if line.strip(): tab = line.strip().split('\\',7) serviceitem = {} serviceitem['pid'] = tab[0] # Create unix timestamp serviceitem['lastConnect'] = mktime(strptime(tab[6])) serviceitem['machine'] = tab[4] if tab[2]: serviceitem['useruid'] = tab[2] serviceitem['ip'] = tab[5] else: serviceitem['useruid'] = 'anonymous' if tab[0]==tab[2]: indIndex = "homes" else: indIndex = tab[0] if not indIndex in service: service[indIndex] = list() service[indIndex].append(serviceitem) return service
def serialize(self): parts = psutil.disk_partitions() partitions = [] # get --bind mounts bind_mounts = [] exitcode, stdout, stderr = shlaunch( "findmnt -nr | fgrep [ | cut -d' ' -f1") if exitcode == 0: bind_mounts = stdout for part in parts: if 'loop' not in part.device and part.mountpoint not in bind_mounts: try: usage = psutil.disk_usage(part.mountpoint) partitions.append({ 'device': part.device, 'mountpoint': part.mountpoint, 'fstype': part.fstype, 'opts': part.opts, 'usage': { 'total': size_format(usage.total), 'used': size_format(usage.used), 'free': size_format(usage.free), 'percent': usage.percent } }) except OSError: pass return { 'partitions': partitions, }
def getSmbStatus(self): """ Return SAMBA shares connection status """ code, output, err = shlaunch('/usr/bin/net status shares parseable') service = {} for line in output: if line.strip(): tab = line.strip().split('\\', 7) serviceitem = {} serviceitem['pid'] = tab[0] # Create unix timestamp serviceitem['lastConnect'] = mktime(strptime(tab[6])) serviceitem['machine'] = tab[4] if tab[2]: serviceitem['useruid'] = tab[2] serviceitem['ip'] = tab[5] else: serviceitem['useruid'] = 'anonymous' if tab[0] == tab[2]: indIndex = "homes" else: indIndex = tab[0] if indIndex not in service: service[indIndex] = list() service[indIndex].append(serviceitem) return service
def serialize(self): parts = psutil.disk_partitions() partitions = [] # get --bind mounts bind_mounts = [] exitcode, stdout, stderr = shlaunch("findmnt -nr | fgrep [ | cut -d' ' -f1") if exitcode == 0: bind_mounts = stdout for part in parts: if 'loop' not in part.device and part.mountpoint not in bind_mounts: usage = psutil.disk_usage(part.mountpoint) partitions.append({ 'device': part.device, 'mountpoint': part.mountpoint, 'fstype': part.fstype, 'opts': part.opts, 'usage': { 'total': size_format(usage.total), 'used': size_format(usage.used), 'free': size_format(usage.free), 'percent': usage.percent } }) return { 'partitions': partitions, }
def log(self, service="", filter=""): service = service.replace(".service", "", 1) result = [] service_filter = "" fields = ("PRIORITY", "_HOSTNAME", "TIMESTAMP", "_UID", "_GID", "_PID", "MESSAGE", "_SYSTEMD_UNIT") if service: service_filter += '_SYSTEMD_UNIT=%s.service' % service code, out, err = shlaunch('%s -n 500 -o json %s' % (self.config.journalctl_path, service_filter)) logs = [] for line in out: try: logs.append(json.loads(line)) except: if "Reboot" in line: logs.append({"MESSAGE": "Reboot"}) for message in logs: if "MESSAGE" in message and isinstance(message["MESSAGE"], basestring): if "_SOURCE_REALTIME_TIMESTAMP" in message: message["TIMESTAMP"] = int(int(message["_SOURCE_REALTIME_TIMESTAMP"]) / 1000000) else: message["TIMESTAMP"] = False # remove unneeded fields for key, value in message.copy().items(): if key not in fields: del message[key] if filter and any(filter in str(v) for k, v in message.items()): result.append(message) if not filter: result.append(message) return result[::-1]
def serialize(self): exitcode, stdout, stderr = shlaunch("ps aux | grep 'pulse\|mmc' | grep -v 'defunct' | grep -v 'grep' | awk '{ if ($11 == \"/usr/bin/python\" || $11 == \"python\" || $11 == \"/bin/sh\") print $12; else print $11 }'") if exitcode == 0: return { 'process': stdout, } else: return { 'process': stderr, }
def check(self): if not os.path.exists(self.sgBinary): raise ConfigException("Can't find squidguard binary: %s" % self.sgBinary) # Try to get squidguard version string code, out, err = mmctools.shlaunch("%s -v" % self.sgBinary) if code != 0: raise ConfigException("Can't start %s -v: %s (%s)'" % (self.sgBinary, "\n".join(err), str(code))) self.sgVersion = err.strip() if not os.path.exists(self.sgBlacklist): raise ConfigException("Can't find squidguard blacklist: %s" % self.sgBlacklist)
def serialize(self): exitcode, stdout, stderr = shlaunch( "ps aux | grep 'pulse\|mmc' | grep -v 'defunct' | grep -v 'grep' | awk '{ if ($11 == \"/usr/bin/python\" || $11 == \"python\" || $11 == \"/bin/sh\") print $12; else print $11 }'" ) if exitcode == 0: return { 'process': stdout, } else: return { 'process': stderr, }
def _samba_tool(self, cmd): samba_tool = os.path.join(self.smb_conf.prefix, "bin/samba-tool") cmd = samba_tool + " " + cmd exit_code, std_out, std_err = shlaunch(cmd) if exit_code != 0: error_msg = "Error processing `%s`:\n" % cmd if std_err: error_msg += "\n".join(std_err) if std_out: error_msg += "\n".join(std_out) logger.error(error_msg) raise SambaToolException(error_msg) return std_out
def deleteQuotaOnFS(self): if not self.tempdelfilename: return cmd = "%s %s" % (self.configuserquota.runquotascript, self.tempdelfilename) logger.debug("Removing quotas: " + cmd); code, out, err = mmctools.shlaunch(cmd) if code != 0: logger.error("Error while removing quotas: " + err) logger.error("See: " + self.tempdelfilename + " for details of the commands run") else: logger.debug("Quotas removed") os.remove(self.tempdelfilename) self.tempdelfilename = False return True
def applyQuotaToFS(self): if not self.tempfilename: return cmd = "%s %s" % (self.configuserquota.runquotascript, self.tempfilename) logger.debug("Applying quotas: " + cmd); code, out, err = mmctools.shlaunch(cmd) if code != 0: logger.error("Error applying quotas: " + err) logger.error("See: " + self.tempfilename + " for details of the commands run") raise Exception("Error applying quotas: %s" % err) else: logger.debug("Quotas applied") os.remove(self.tempfilename) self.tempfilename = False return True
def deleteQuotaOnFS(self): if not self.tempdelfilename: return cmd = "%s %s" % (self.configuserquota.runquotascript, self.tempdelfilename) logger.debug("Removing quotas: " + cmd) code, out, err = mmctools.shlaunch(cmd) if code != 0: logger.error("Error while removing quotas: " + err) logger.error("See: " + self.tempdelfilename + " for details of the commands run") else: logger.debug("Quotas removed") os.remove(self.tempdelfilename) self.tempdelfilename = False return True
def getConnected(self): """ Return all opened SAMBA sessions """ code, output, err = shlaunch('/usr/bin/net status sessions parseable') result = [] for line in output: if line.strip(): # 7727\useruid\Domain Users\machine\192.168.0.17 # 0 1 2 3 4 tab = line.strip().split('\\', 5) sessionsitem = {} sessionsitem['pid'] = tab[0] sessionsitem['useruid'] = tab[1] sessionsitem['machine'] = tab[3] sessionsitem['ip'] = tab[4] result.append(sessionsitem) return result
def getConnected(self): """ Return all opened SAMBA sessions """ code, output, err = shlaunch('/usr/bin/net status sessions parseable') result = [] for line in output: if line.strip(): #7727\useruid\Domain Users\machine\192.168.0.17 #0 1 2 3 4 tab = line.strip().split('\\',5) sessionsitem = {} sessionsitem['pid'] = tab[0] sessionsitem['useruid'] = tab[1] sessionsitem['machine'] = tab[3] sessionsitem['ip'] = tab[4] result.append(sessionsitem) return result
def applyQuotaToFS(self): if not self.tempfilename: return cmd = "%s %s" % (self.configuserquota.runquotascript, self.tempfilename) logger.debug("Applying quotas: " + cmd) code, out, err = mmctools.shlaunch(cmd) if code != 0: logger.error("Error applying quotas: " + err) logger.error("See: " + self.tempfilename + " for details of the commands run") raise Exception("Error applying quotas: %s" % err) else: logger.debug("Quotas applied") os.remove(self.tempfilename) self.tempfilename = False return True
def activate(): config = UserQuotaConfig("userquota") if config.disabled: logger.warning("Plugin userquota: disabled by configuration.") return False try: ldapObj = ldapUserGroupControl() except ldap.INVALID_CREDENTIALS: logger.error("Can't bind to LDAP: invalid credentials.") return False # Test if the quota LDAP schema is available in the directory try: schema = ldapObj.getSchema("systemQuotas") if len(schema) <= 0: logger.error("Quota schema is not included in LDAP directory") return False except: logger.exception("Invalid schema") return False # Check local file systems if config.runquotascript == "/bin/sh": for device in getDevicemap(): dev, blocksize, name = device.split(':') if not os.path.exists(dev): logger.error("%s does not exists") return False code, out, err = mmctools.shlaunch( "quotaon -aup | grep '%s) is on'" % dev) if code != 0 or not len(out) == 1: logger.error("User quotas are not enabled on %s" % dev) return False return True
def activate(): config = UserQuotaConfig("userquota") if config.disabled: logger.warning("Plugin userquota: disabled by configuration.") return False try: ldapObj = ldapUserGroupControl() except ldap.INVALID_CREDENTIALS: logger.error("Can't bind to LDAP: invalid credentials.") return False # Test if the quota LDAP schema is available in the directory try: schema = ldapObj.getSchema("systemQuotas") if len(schema) <= 0: logger.error("Quota schema is not included in LDAP directory"); return False except: logger.exception("Invalid schema") return False # Check local file systems if config.runquotascript == "/bin/sh": for device in getDevicemap(): dev, blocksize, name = device.split(':') if not os.path.exists(dev): logger.error("%s does not exists"); return False code, out, err = mmctools.shlaunch("quotaon -aup | grep '%s) is on'" % dev) if code != 0 or not len(out) == 1: logger.error("User quotas are not enabled on %s" % dev); return False return True
if len(line) > 0: parts = line.split("=", 1) if len(parts) is 2: if not parts[0].strip() in self.supportedOptions: tmpInsert[parts[0].strip()] = parts[1].strip() else: raise Exception("invalid samba parameter format") tmpInsert['path'] = path tmpInsert['comment'] = comment if not browseable: tmpInsert['browseable'] = 'No' # flush ACLs shlaunch("setfacl -b %s" % path) def sanitize_name(name): if ' ' in name: name = '"' + name + '"' return name if 'rwx' in perms and '@all' in perms['rwx']: tmpInsert['public'] = 'yes' tmpInsert['writeable'] = 'yes' os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) else: tmpInsert['public'] = 'no' tmpInsert['writeable'] = 'no' os.chmod(path, stat.S_IRWXU | stat.S_IRWXG) acls = posix1e.ACL(file=path)
# below, with the values of specific fields. if customparameters is not None: for line in customparameters: if len(line) > 0: parts = line.split("=", 1) if len(parts) is 2: if not parts[0].strip() in self.supportedOptions: tmpInsert[parts[0].strip()] = parts[1].strip() else: raise Exception("invalid samba parameter format") tmpInsert['comment'] = comment if permAll: tmpInsert['public'] = 'yes' shlaunch("setfacl -b %s" % path) os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) else: tmpInsert['public'] = 'no' os.chmod(path, stat.S_IRWXU | stat.S_IRWXG) # flush ACLs shlaunch("setfacl -b %s" % path) acl1 = posix1e.ACL(file=path) # Add and set default mask to rwx # This is needed by the ACL system, else the ACLs won't be valid e = acl1.append() e.permset.add(posix1e.ACL_READ) e.permset.add(posix1e.ACL_WRITE) e.permset.add(posix1e.ACL_EXECUTE) e.tag_type = posix1e.ACL_MASK # For each specified group, we add rwx access
class SambaConf: """ Handle smb.conf file for Samba 4 """ supportedGlobalOptions = ['realm', 'workgroup', 'netbios name', 'server role', 'logon path', 'logon drive', 'logon home', 'logon script', 'ldap passwd sync', 'wins support', 'dns forwarder'] KRB5_CONF_PATH = '/etc/krb5.conf' def __init__(self): config = Samba4Config("samba4") self.smb_conf_path = config.conf_file self.default_shares_path = config.defaultSharesPath self.authorizedSharePaths = config.authorizedSharePaths self.prefix = config.samba_prefix self.db_dir = config.db_dir try: self.config = ConfigObj(self.smb_conf_path, interpolation=False, list_values=False, write_empty_values=True, encoding='utf8') except ParseError as e: logger.error("Failed to parse %s : %s ", self.smb_conf_path, e) def private_dir(self): return os.path.join(self.db_dir, 'private') def validate(self, conf_file): """ Validate SAMBA configuration file with testparm. Try also to parse the configuration with ConfigObj. @return: Whether smb.conf has been validated or not @rtype: boolean """ cmd = shLaunch("%s/bin/testparm -s %s" % (self.prefix, shellquote(conf_file))) if cmd.exitCode: ret = False elif ("Unknown" in cmd.err or "ERROR:" in cmd.err or "Ignoring badly formed line" in cmd.err): ret = False else: ret = True try: ConfigObj(conf_file, interpolation=False, list_values=False) except ParseError: ret = False return ret def isValueTrue(self, string): """ @param string: a string @type string: str @return: Return 1 if string is yes/true/1 (case insensitive), return 0 if string is no/false/0 (case insensitive), else return -1 @rtype: int """ string = str(string).lower() if string in ["yes", "true", "1", "on"]: return 1 elif string in ["no", "false", "0"]: return 0 else: return -1 def getContent(self, section, option): try: return self.config[section][option] except KeyError: return False def setContent(self, section, option, value): try: self.config[section][option] = value except KeyError: self.config[section] = {} self.setContent(section, option, value) def getGlobalInfo(self): """ return main information about global section """ resArray = {} for option in self.supportedGlobalOptions: resArray[option] = self.getContent('global', option) resArray['hashomes'] = 'homes' in self.config return resArray def workgroupFromRealm(self, realm): return realm.split('.')[0][:15].upper() def writeSambaConfig(self, mode, netbios_name, realm, description, logon_path='', dns_forwarder=None, hashomes=True): """ Write SAMBA configuration file (smb.conf) to disk. @return values used to write the smb.conf template @rtype: dict """ openchange = False # FIXME openchange_conf = self.prefix + 'etc/openchange.conf' workgroup = self.workgroupFromRealm(realm) netbios_name = netbios_name.lower() realm = realm.upper() domain = realm.lower() params = {'workgroup': workgroup, 'realm': realm, 'netbios_name': netbios_name, 'description': description, 'mode': mode, 'sysvol_path': os.path.join(self.db_dir, 'sysvol'), 'openchange': openchange, 'openchange_conf': openchange_conf, 'domain': domain, 'interfaces': get_internal_interfaces(), 'logon_path': logon_path, 'dns_forwarder': dns_forwarder, 'hashomes': hashomes} smb_conf_template = env.get_template("smb.conf") with open(self.smb_conf_path, 'w') as f: f.write(smb_conf_template.render(params)) if openchange: openchange_conf_template = env.get_template("openchange.conf") with open(openchange_conf, 'w') as f: f.write(openchange_conf_template.render()) return params def writeKrb5Config(self, realm): params = {'realm': realm.upper()} krb5_conf_template = env.get_template('krb5.conf') with open(self.KRB5_CONF_PATH, 'w') as f: f.write(krb5_conf_template.render(params)) def getDetailedShares(self): """Return detailed list of shares""" return [self.getDetailedShare(section) for section in self._getSharesSectionList()] def getDetailedShare(self, section): guest = (self.isValueTrue(self.getContent(section, 'public')) == 1 or self.isValueTrue(self.getContent(section, 'guest ok')) == 1) enabled = (not self.getContent(section, 'browseable') or self.isValueTrue(self.getContent(section, 'browseable')) == 1) share_detail = { 'shareName': section, 'sharePath': self.getContent(section, 'path'), 'shareEnable': enabled, 'shareDescription': self.getContent(section, 'comment') or '', 'shareGuest': guest } # return share_detail return [share_detail['shareName'], share_detail['sharePath'], share_detail['shareEnable'], share_detail['shareDescription'], share_detail['shareGuest']] def _getSharesSectionList(self): return [k for k, _ in self.config.items() if k not in ("global", "printers", "print$")] def save(self): """ Write SAMBA configuration file (smb.conf) to disk """ _, tmpfname = tempfile.mkstemp("mmc") self.config.filename = tmpfname self.config.write() if not self.validate(tmpfname): raise Exception("smb.conf file is not valid (%s)" % tmpfname) shutil.copy(tmpfname, self.smb_conf_path) os.remove(tmpfname) return True def delShare(self, name, remove): """ Delete a share from SAMBA configuration, and maybe delete the share directory from disk. The save method must be called to update smb.conf. @param name: Name of the share @param remove: If true, we physically remove the directory """ r = AF().log(PLUGIN_NAME, AA.SAMBA4_DEL_SHARE, [(name, AT.SHARE)], remove) path = self.getContent(name, 'path') if not path: raise Exception('Share "%s" does not exist' % name) del self.config[name] if remove: if os.path.exists(path): shutil.rmtree(path) else: logger.error('The "%s" share path does not exist.' % path) r.commit() def shareInfo(self, name): """ Get information about a share """ ret = {} ret['desc'] = self.getContent(name, 'comment') if not ret['desc']: ret['desc'] = "" ret['sharePath'] = self.getContent(name, 'path') if self.isValueTrue(self.getContent(name, 'public')) == 1: ret['permAll'] = 1 elif self.isValueTrue(self.getContent(name, 'guest ok')) == 1: ret['permAll'] = 1 else: ret['permAll'] = 0 # If we cannot find it if not self.getContent(name, 'vfs objects'): ret['antivirus'] = 0 else: ret['antivirus'] = 1 if not self.getContent(name, 'browseable'): ret["browseable"] = 1 elif self.isValueTrue(self.getContent(name, 'browseable')): ret["browseable"] = 1 else: ret["browseable"] = 0 # Get the directory group owner if os.path.exists(str(ret['sharePath'])): stat_info = os.stat(ret['sharePath']) gid = stat_info.st_gid try: ret['group'] = grp.getgrgid(gid)[0] except: logger.error("Can't find the primary group of %s. " "Check your libnss settings." % ret['sharePath']) return False return ret def addShare(self, name, path, comment, browseable, permAll, usergroups, users, mod=False): """ Add a share in smb.conf and create it physically """ if mod: action = AA.SAMBA4_MOD_SHARE oldPath = self.config[name]['path'] else: action = AA.SAMBA4_ADD_SHARE r = AF().log(PLUGIN_NAME, action, [(name, AT.SHARE)], path) if name in self.config and not mod: raise Exception('This share already exist') if not name in self.config and mod: raise Exception('This share does not exist') # If no path is given, create a default one if not path: path = os.path.join(self.default_shares_path, name) path = os.path.realpath(path) # Check that the path is authorized # FIXME: handle correctly archives in base plugin if not self.isAuthorizedSharePath(path) and "/home/archives" not in path: raise Exception("%s is not an authorized share path." % path) # Create or move samba share directory, if it does not exist try: if mod: os.renames(oldPath, path) else: os.makedirs(path) except OSError, (errno, strerror): # Raise exception if error is not "File exists" if errno != 17: raise OSError(errno, strerror + ' ' + path) else: pass # Directory is owned by root os.chown(path, 0, 0) if mod: # Delete the old share del self.config[name] # create table and fix permission tmpInsert = {'comment': comment} if permAll: tmpInsert['public'] = 'yes' shlaunch("setfacl -b %s" % shellquote(path)) os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) else: tmpInsert['public'] = 'no' os.chmod(path, stat.S_IRWXU | stat.S_IRWXG) # flush ACLs shlaunch("setfacl -b %s" % path) acl1 = posix1e.ACL(file=path) # Add and set default mask to rwx # This is needed by the ACL system, else the ACLs won't be valid e = acl1.append() e.permset.add(posix1e.ACL_READ) e.permset.add(posix1e.ACL_WRITE) e.permset.add(posix1e.ACL_EXECUTE) e.tag_type = posix1e.ACL_MASK # For each specified group, we add rwx access for group in usergroups: e = acl1.append() e.permset.add(posix1e.ACL_READ) e.permset.add(posix1e.ACL_WRITE) e.permset.add(posix1e.ACL_EXECUTE) e.tag_type = posix1e.ACL_GROUP # Search the gid number corresponding to the given group ldapobj = ldapUserGroupControl() try: gidNumber = ldapobj.getDetailedGroup(group)['gidNumber'][0] except ldap.NO_SUCH_OBJECT: gidNumber = grp.getgrnam(group).gr_gid e.qualifier = int(gidNumber) # FIXME howto use posix1e for this ? shlaunch("setfacl -d -m g:%s:rwx %s" % (str(gidNumber), path)) for user in users: e = acl1.append() e.permset.add(posix1e.ACL_READ) e.permset.add(posix1e.ACL_WRITE) e.permset.add(posix1e.ACL_EXECUTE) e.tag_type = posix1e.ACL_USER # Search the gid number corresponding to the given group ldapobj = ldapUserGroupControl() try: uidNumber = ldapobj.getDetailedUser(user)['uidNumber'][0] except KeyError: uidNumber = pwd.getpwnam(user).pw_uid e.qualifier = int(uidNumber) # FIXME howto use posix1e for this ? shlaunch("setfacl -d -m u:%s:rwx %s" % (str(uidNumber), path)) # Test if our ACLs are valid if acl1.valid(): acl1.applyto(path) else: logger.error("Cannot save ACL on folder " + path) tmpInsert['writeable'] = 'yes' if not browseable: tmpInsert['browseable'] = 'No' tmpInsert['path'] = path self.config[name] = tmpInsert info = self.shareInfo(name) # FIXME are this signals used? if mod and share_modified: share_modified.send(sender=self, share_name=name, share_info=info) elif not mod and share_created: share_created.send(sender=self, share_name=name, share_info=info) r.commit()