def check_kdump_config(response): if common.is_linux(): kdump_cmdline = "/proc/cmdline" crashkernel = "" if os.path.exists(kdump_cmdline): data = file_op.cat(kdump_cmdline, "r") if data: crashkernel, num = common.grep(data, r"crashkernel=(\S+)") #enable if crashkernel: kdump_conf = "/etc/kdump.conf" if os.path.exists(kdump_conf): data = file_op.cat(kdump_conf, "r") if data: lines = data.split("\n") values = [["Key", "Value"], ["Reserved memory", crashkernel]] keys = [ "Store the dump to a remote machine using the NFS protocol,", "Local directory in which the core dump is to be saved", "Write dump to storage devices", "Write the dump directly to a device", "Core collector to compress the data", "Action to perform in case dumping fails", "Store the dump to a remote machine using the SSH protocol", "SSH Key" ] patterns = [ re.compile(r"nfs\s(\S+)"), re.compile(r"path\s(\S+)"), re.compile(r"ext4\s(\S+)"), re.compile(r"raw\s(\S+)"), re.compile(r"core_collector\s(\S+).*"), re.compile(r"default\s(\S+)"), re.compile(r"ssh\s(\S+)"), re.compile(r"sshkey\s(\S+)") ] for line in lines: for i in range(len(patterns)): match = patterns[i].match(line) if match and len(match.groups()): values.append([keys[i], match.groups()[0]]) response["kernel"].append({ "name": "Kernel Dump Configuration", "value": len(values) - 1, "values": values })
def check_need_reboot(response): #linux-image-4.4.0-96-generic #linux-base #linux-base pkgs = "/var/run/reboot-required.pkgs" #*** System restart required *** reboot = "/var/run/reboot-required" values = [["Packages"]] if os.path.exists(reboot) and os.path.exists(pkgs): response["kernel"].append({"name": "Reboot Required", "value": "Yes"}) data = file_op.cat("/var/run/reboot-required.pkgs", "r") if data: lines = data.split("\n") for line in lines: if line: values.append([line]) if len(values) > 1: response["kernel"].append({ "name": "Reboot Required Packages", "value": len(values) - 1, "values": values })
def enum_kernel_modules(response): data = file_op.cat("/proc/modules", "r") values = [["MODULE", "SIZE", "USED", "BY", "DESCRIPTION"]] #xt_CHECKSUM 16384 1 - Live 0xffffffffc0c19000 #ipt_MASQUERADE 16384 3 - Live 0xffffffffc0c0f000 #video 40960 3 thinkpad_acpi,nouveau,i915, Live 0xffffffffc0296000 #wmi 20480 2 nouveau,mxm_wmi, Live 0xffffffffc028c000 if data: lines = data.split("\n") for line in lines: if line: module, size, used, by, _ = line.split(" ", 4) values.append( [module, size, used, by, get_module_description(module)]) if len(values) > 1: response["kernel"].append({ "name": "Modules", "value": len(values) - 1, "values": values }) response["kernel"].append({ "name": "Kernel Type", "value": "Modular" if len(values) > 1 else "Monolithic" })
def kernel_default_limits(response): data = file_op.cat("/proc/1/limits", "r") values = [["Limit", "Soft Limit", "Hard Limit", "Units"]] if data: lines = data.split("\n") keys = [ "Max cpu time", "Max file size", "Max data size", "Max stack size", "Max core file size", "Max resident set", "Max processes", "Max open files", "Max locked memory", "Max address space", "Max file locks", "Max pending signals", "Max msgqueue size", "Max nice priority", "Max realtime priority", "Max realtime timeout" ] for line in lines: if line: for key in keys: pattern = re.compile( r"^({})\s*(\S*)\s*(\S*)\s*(\S*)".format(key)) match = pattern.match(line) if match: _, soft, hard, units = match.groups() values.append([key, soft, hard, units]) if len(values) > 1: response["kernel"].append({ "name": Klanguage().to_ts(1163), "values": values })
def check_need_reboot(response): #linux-image-4.4.0-96-generic #linux-base pkgs = "/var/run/reboot-required.pkgs" #*** System restart required *** reboot = "/var/run/reboot-required" values = [["Packages"]] exists_pkgs = [] if os.path.exists(reboot) and os.path.exists(pkgs): response["kernel"].append({ "name": Klanguage().to_ts(1164), "value": "Yes" }) data = file_op.cat(pkgs, "r") if data: lines = data.split("\n") for line in lines: if line: if line not in exists_pkgs: values.append([line]) exists_pkgs.append(line) if len(values) > 1: response["kernel"].append({ "name": Klanguage().to_ts(1158), "values": values })
def get_useradd_list(response): global uid_min global uid_max #root:x:0:0:root:/root:/bin/bash uid_values = [[ Klanguage().to_ts(1135), Klanguage().to_ts(1136), Klanguage().to_ts(1137), Klanguage().to_ts(1138), Klanguage().to_ts(1139) ]] data = file_op.cat("/etc/passwd", "r") if data: lines = data.split("\n") for line in lines: if line: username, password, uid, gid, comment, home, shell = line.split( ":") if int(uid) >= int(uid_min) and int(uid) <= int(uid_max): uid_values.append([username, uid, gid, home, shell]) response["authentication"].append({ "name": Klanguage().to_ts(1134), "values": uid_values })
def get_description_by_name(service, kind): if kind == 0: systemd_path = ["/lib/systemd/system/", "/etc/systemd/system"] pattern = re.compile(r"^Description=(.+)") for path in systemd_path: unit_path = os.path.join(path, service) if os.path.exists(unit_path): data = file_op.cat(unit_path, "r") if data: lines = data.split("\n") for line in lines: if line: match = pattern.match(line) if match and len(match.groups()): return match.groups()[0] if kind == 2: des_pattern = re.compile(r"# Description:\s+(.+)") short_pattern = re.compile(r"# Short-Description:\s+(.+)") initscript_path = os.path.join("/etc/init.d", service) if os.path.exists(initscript_path): data = file_op.cat(initscript_path, "r") if data: lines = data.split("\n") for line in lines: if line: match = short_pattern.match(line) if match and len(match.groups()): return match.groups()[0] match = des_pattern.match(line) if match and len(match.groups()): return match.groups()[0] return ""
def check_ssh_configuration(response): if common.is_program_running("sshd"): response["network"].append({ "name": "Checking running SSH daemon", "value": "Found" }) sshd_config = "/etc/ssh/sshd_config" data = file_op.cat(sshd_config, "r") if data: values = [["Key", "Value"]] lines = data.split("\n") keys = [ "PasswordAuthentication", "PermitEmptyPasswords", "PermitRootLogin", "Port" ] patterns = [ re.compile(r"^(#|)PasswordAuthentication\s(yes|no)"), re.compile(r"^(#|)PermitEmptyPasswords\s(yes|no)"), re.compile(r"^(#|)PermitRootLogin\s(yes|no)"), re.compile(r"^(#|)Port\s(\d+)") ] uncomment = {} comment = {} for line in lines: for i in range(len(patterns)): match = patterns[i].match(line) if match: groups = match.groups() if len(groups) == 2: if groups[0] == "#": comment[keys[i]] = groups[1] else: uncomment[keys[i]] = groups[1] for i in range(len(keys)): key = keys[i] if key in uncomment: values.append([key, uncomment[key]]) continue if key in comment: values.append([key, comment[key]]) continue if len(values) > 1: response["network"].append({ "name": "SSH Configuration", "value": len(values) - 1, "values": values })
def check_ipv6_configuration(response): ipv6_forward = "/proc/sys/net/ipv6/conf/all/forwarding " ipv6_all_disable = "/proc/sys/net/ipv6/conf/all/disable_ipv6" ipv6_default_disable = "/proc/sys/net/ipv6/conf/default/disable_ipv6" ipv6_disable = False if os.path.exists(ipv6_forward): data_ipv6_ipforward = file_op.cat(ipv6_forward, "r") response["network"].append({ "name": "IPv6 Packet Forwarding Support", "value": "Disable" if data_ipv6_ipforward == "0" else "Enabled" }) if os.path.exists(ipv6_all_disable) and os.path.exists( ipv6_default_disable): data_all = file_op.cat(ipv6_all_disable, "r") data_default = file_op.cat(ipv6_default_disable, "r") if data_all == "1" and data_default == "1": ipv6_disable = True response["network"].append({ "name": "IPv6 Support", "value": "Disable" if ipv6_disable else "Enabled" }) if not ipv6_disable: ipv6_icmp_redirect = "/proc/sys/net/ipv6/conf/all/accept_redirects" if os.path.exists(ipv6_icmp_redirect): data_redirect = file_op.cat(ipv6_icmp_redirect, "r") response["network"].append({ "name": "IPv6 ICMP Redirect Support", "value": "Disable" if data_redirect == "0" else "Enabled" })
def cpu_cache(): path = "/sys/devices/system/cpu/cpu0/cache" cache = { "L1Cache" : 0, "L2Cache" : 0, "L3Cache" : 0 } if os.path.exists(path): for i in range(4): index_dir = os.path.join(path, "index{}".format(i)) size = file_op.cat(os.path.join(index_dir, "size"), "r") level = file_op.cat(os.path.join(index_dir, "level"), "r") if size and level: key = "L{}Cache".format(level.strip("\n")) if key in cache: cache[key] = size.strip("\n") return cache
def check_magickey_configuration(response): magickey = "/proc/sys/kernel/sysrq " if os.path.exists(magickey): data_magickey = file_op.cat(magickey, "r") response["kernel"].append({ "name": "Magic system request Key", "Value": "Disable" if data_magickey == "0" else "Enabled" })
def check_magickey_configuration(response): magickey = "/proc/sys/kernel/sysrq" if os.path.exists(magickey): data_magickey = file_op.cat(magickey, "r") response["kernel"].append({ "name": Klanguage().to_ts(1133), "value": Klanguage().to_ts(1121) if data_magickey == "0" else Klanguage().to_ts(1120) })
def get_cpuinfo(response): try: data = file_op.cat("/proc/cpuinfo", "r") if data: #vendor_id, num = lib.grep(data, r"vendor_id\s*:\s(.*)") model_name, num = common.grep(data, r"model name\s*:\s(.*)") response["hardware"].append( {Klanguage().to_ts(1011): ["{}".format(model_name)]}) except: pass
def default_io_scheduler(response): config_file = find_kernel_config() if config_file: data = file_op.cat(config_file, "r") if data: io_scheduler, num = common.grep(data, r"CONFIG_DEFAULT_IOSCHED=(\S*)") if num: response["kernel"].append({ "name": "IO Scheduler", "value": io_scheduler })
def check_coredump_config(response): if common.is_linux(): dumpable = "/proc/sys/fs/suid_dumpable" config = {"0": "Default", "1": "Debug", "2": "Protected"} if os.path.exists(dumpable): data = file_op.cat(dumpable, "r") if data and data in config: response["kernel"].append({ "name": "Core dump mode", "value": config[data] })
def download_prepare(self, identity, path, items): filename = None content = None is_ok = False if len(items) == 1: file_path = os.path.join(path, items[0]) if os.path.isfile(file_path): content = file_op.cat(file_path) filename = items[0] is_ok = True if not is_ok: filename = "{}.zip".format(time_op.localtime2string()) f = BytesIO() zf = zipfile.ZipFile(f, mode='w', compression=zipfile.ZIP_DEFLATED) for i in items: i = common.path_translate(i) item = os.path.join(path, i) filelist = [] file_op.enum_file_path(item, filelist) for file in filelist: try: zipname = file[len(path) + 1:] zipname = zipname.replace('\\', '/') zf.write(file, zipname) except: pass zf.close() content = f.getvalue() f.close() encoded = base64.b64encode(content).decode("ascii") self.download_cache[identity] = { "begin_time": time_op.now(), "last_time": time_op.now(), "content": encoded, "total": len(encoded), "sent_bytes": 0, "filename": filename, } print(self.download_cache[identity])
def find_useradd_users(): uid_min = 1000 uid_max = 60000 data = file_op.cat("/etc/login.defs", "r") if data: lines = data.split("\n") for line in lines: if line: tmp, num = common.grep(line, r"^UID_MIN\s*(\d+)") if num: uid_min = tmp continue tmp, num = common.grep(line, r"^UID_MAX\s*(\d+)") if num: uid_max = tmp continue data = file_op.cat("/etc/passwd", "r") usernames = [] if data: lines = data.split("\n") for line in lines: if line: username, password, uid, gid, comment, home, shell = line.split( ":") if uid_min != None and uid_max != None: if int(uid) >= int(uid_min) and int(uid) <= int(uid_max): usernames.append(username) return usernames
def check_ipv4_configuration(response): ipv4_ipforward = "/proc/sys/net/ipv4/ip_forward" if os.path.exists(ipv4_ipforward): data_ipv4_ipforward = file_op.cat(ipv4_ipforward, "r") response["network"].append({ "name": "IPv4 Packet Forwarding Support", "value": "Disable" if data_ipv4_ipforward == "0" else "Enabled" }) ipv4_icmp_redirect = "/proc/sys/net/ipv4/conf/all/accept_redirects" if os.path.exists(ipv4_icmp_redirect): data_redirect = file_op.cat(ipv4_icmp_redirect, "r") response["network"].append({ "name": "IPv4 ICMP Redirect Support", "value": "Disable" if data_redirect == "0" else "Enabled" })
def cpu_nx_support(): suggest = Klanguage().to_ts(1185) try: data = file_op.cat("/proc/cpuinfo", "r") if data: flags, num = common.grep(data, r"flags\s*:\s(.*)") nx, num = common.grep(flags, r"nx") if nx == "nx": return Klanguage().to_ts(1120), LEVEL_SECURITY, suggest except Exception as e: pass return Klanguage().to_ts(1121), LEVEL_WARNING, suggest
def do_upload_virus(self, pathname, oldname): if file_op.getsize(pathname) <= constant.MALWARE_FILE_MAX_SIZE: sha256 = file_op.sha256_checksum(pathname) content = file_op.cat(pathname) if sha256 and content: sha256 = struct.pack("{}s".format(len(sha256)), sha256.encode("ascii")) oldname_len = struct.pack("<I", len(oldname)) oldname = struct.pack("{}s".format(len(oldname)), oldname.encode("ascii")) status, data = net_op.create_http_request(constant.SERVER_URL, "POST", "/client/virus_upload", sha256 + oldname_len + oldname + content) return data return None
def kernel_default_limits(response): data = file_op.cat("/proc/1/limits", "r") values = [["Limit", "Soft Limit", "Hard Limit", "Units"]] if data: lines = data.split("\n") keys = { "Max cpu time": "CPU time", "Max file size": "file size", "Max data size": "data seg size", "Max stack size": "stack size", "Max core file size": "core file size", "Max resident set": "resident set size", "Max processes": "max user processes", "Max open files": "number of open files", "Max locked memory": "locked-in-memory address space", "Max address space": "address space limit", "Max file locks": "file locks", "Max pending signals": "pending signals", "Max msgqueue size": "POSIX message queues", "Max nice priority": "scheduling priority", "Max realtime priority": "realtime priority", "Max realtime timeout": "realtime timeout" } for line in lines: if line: for key in keys: pattern = re.compile( r"^({})\s*(\S*)\s*(\S*)\s*(\S*)".format(key)) match = pattern.match(line) if match: item, soft, hard, units = match.groups() values.append([keys[item], soft, hard, units]) if len(values) > 1: response["kernel"].append({ "name": "Resource Limit", "value": len(values) - 1, "values": values })
def enum_kernel_modules(response): data = file_op.cat("/proc/modules", "r") count = 0 #xt_CHECKSUM 16384 1 - Live 0xffffffffc0c19000 if data: lines = data.split("\n") for line in lines: if line: count += 1 response["kernel"].append({ "name": Klanguage().to_ts(1131), "value": count }) response["kernel"].append({ "name": Klanguage().to_ts(1132), "value": "Modular" if count > 1 else "Monolithic" })
def check_dns_server(response): resolv_conf = "/etc/resolv.conf" if os.path.exists(resolv_conf): data = file_op.cat(resolv_conf, "r") if data: results = re.findall( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", data) if results: values = [["IP Address", "Latency"]] for ip in results: values.append([ip, common.measuring_latency(ip)]) response["network"].append({ "name": "DNS Configuration", "value": len(results), "values": values })
def check_selinux(response): selinux_config = "/etc/selinux/config" selinux_root = ["/sys/fs/selinux", "/selinux"] current_mode = "Disabled" config_mode = "Disabled" mls_status = "Disabled" deny_unknown = "Denied" policyvers = "Unknown" policy = "Unknown" mount = "" for i in selinux_root: if os.path.exists(i) and os.path.isdir(i): mount = i data = file_op.cat(os.path.join(i, "enforce"), "r") if data: if int(data) == 1: current_mode = "enforcing" if int(data) == 0: current_mode = "permissive" data = file_op.cat(os.path.join(i, "mls"), "r") if data: if int(data) == 1: mls_status = "enabled" data = file_op.cat(os.path.join(i, "deny_unknown"), "r") if data: if int(data) == 0: deny_unknown = "allowed" data = file_op.cat(os.path.join(i, "policyvers"), "r") if data: policyvers = data break if os.path.exists(selinux_config): with open(selinux_config, "r") as f: for line in f.readlines(): if line: if line.startswith("#"): continue if "=" in line: key, value = line.split("=") value = value.strip() if value in ["enforcing", "permissive", "disabled"]: config_mode = value if value in ["targeted", "minimum", "mls"]: policy = value response["kernel"].append({ "name": "SElinux Status", "value": config_mode, "values": [[], ["SELinuxfs mount", mount], ["Loaded policy name", policy], ["Current mode", current_mode], ["Policy MLS status", mls_status], ["Policy deny_unknown status", deny_unknown], ["Max kernel policy version", policyvers]] })
def find_runlevel(): global runlevel #runlevel #N 5 #who -r # run-level 5 2017-07-08 15:12 data, success, retcode = common.exec_command(['runlevel']) if success: runlevel_pattern = re.compile(r'^N\s(\d)') match = runlevel_pattern.match(data) if match: runlevel = match.groups()[0] return #/etc/systemd/system/default.target -> /lib/systemd/system/graphical.target default_target = "/etc/systemd/system/default.target" target = lib.readlink(default_target, 1) if target != "": for i in ["runlevel5.target", "graphical.target"]: tmp, num = common.grep(target, i) if num: runlevel = 5 return for i in ["runlevel3.target", "multi-user.target"]: tmp, num = common.grep(target, i) if num: runlevel = 3 return ## Default runlevel. The runlevels used are: # 0 - halt (Do NOT set initdefault to this) # 1 - Single user mode # 2 - Multiuser, without NFS (The same as 3, if you do not have networking) # 3 - Full multiuser mode # 4 - unused # 5 - X11 # 6 - reboot (Do NOT set initdefault to this) # #id:3:initdefault: data = file_op.cat("/etc/inittab", "r") if data: lines = data.split("\n") for line in lines: tmp, num = common.grep(line, r"^id:(\d)") if num: runlevel = tmp return # run-level 5 2017-09-17 13:35 data, success, retcode = common.exec_command(['who', '-r']) if success: tmp, num = common.grep(data, r"^\s*run-level\s(\d)") if num: runlevel = tmp return
def login_defs_policy(response): global uid_min global uid_max values = [[ Klanguage().to_ts(1148), Klanguage().to_ts(1149), Klanguage().to_ts(1119) ]] data = file_op.cat("/etc/login.defs", "r") if data: lines = data.split("\n") for line in lines: if line: # The default PATH settings, for superuser. tmp, num = common.grep(line, r"^ENV_SUPATH\s*(\S+)") if num: values.append( [Klanguage().to_ts(1180), tmp, LEVEL_INVALID, ""]) continue # The default PATH settings, for normal users. tmp, num = common.grep(line, r"^ENV_PATH\s*(\S+)") if num: values.append( [Klanguage().to_ts(1179), tmp, LEVEL_INVALID, ""]) continue # UMASK is the default umask value for pam_umask and is used by # useradd and newusers to set the mode of the new home directories. tmp, num = common.grep(line, r"^UMASK\s*(\S+)") if num: if tmp == "077" or tmp == "027" or tmp == "0077" or tmp == "0027": level = LEVEL_SECURITY else: level = LEVEL_WARNING response["statistic"]["warning"] += 1 values.append([ Klanguage().to_ts(1178), tmp, level, Klanguage().to_ts(1177) ]) continue # If defined, login failures will be logged here in a utmp format # last, when invoked as lastb, will read /var/log/btmp, so... tmp, num = common.grep(line, r"^FTMP_FILE\s*(\S+)") if num: values.append( [Klanguage().to_ts(1176), tmp, LEVEL_INVALID, ""]) continue # Algorithm will be used for encrypting password tmp, num = common.grep(line, r"^ENCRYPT_METHOD\s*(\S+)") if num: if tmp == "SHA512": level = LEVEL_SECURITY suggest = Klanguage().to_ts(1173) else: level = LEVEL_WARNING suggest = Klanguage().to_ts(1174) response["statistic"]["warning"] += 1 values.append( [Klanguage().to_ts(1175), tmp, level, suggest]) continue #Password aging controls #Maximum number of days a password may be used tmp, num = common.grep(line, r"^PASS_MAX_DAYS\s*(\d+)") if num: if tmp == "99999": level = LEVEL_WARNING suggest = Klanguage().to_ts(1170) response["statistic"]["warning"] += 1 else: level = LEVEL_SECURITY suggest = Klanguage().to_ts(1171) values.append( [Klanguage().to_ts(1172), tmp, level, suggest]) continue #Minimum number of days allowed between password changes. tmp, num = common.grep(line, r"^PASS_MIN_DAYS\s*(\d+)") if num: if tmp == "0": level = LEVEL_WARNING suggest = Klanguage().to_ts(1167) response["statistic"]["warning"] += 1 else: level = LEVEL_SECURITY suggest = Klanguage().to_ts(1168) values.append( [Klanguage().to_ts(1169), tmp, level, suggest]) continue #Number of days warning given before a password expires. tmp, num = common.grep(line, r"^PASS_WARN_AGE\s*(\d+)") if num: level = LEVEL_SECURITY suggest = Klanguage().to_ts(1165) values.append( [Klanguage().to_ts(1166), tmp, level, suggest]) continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MIN\s*(\d+)") if num: uid_min = tmp continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MAX\s*(\d+)") if num: uid_max = tmp continue response["authentication"].append({ "name": Klanguage().to_ts(1147), "values": values })
def login_defs_policy(self, target_risk_id): baseline = Kdatabase().get_obj('baseline') data = file_op.cat("/etc/login.defs", "r") if data: lines = data.split("\n") for line in lines: if line: # UMASK is the default umask value for pam_umask and is used by # useradd and newusers to set the mode of the new home directories. tmp, num = common.grep(line, r"^UMASK\s*(\S+)") if num: if tmp == "077" or tmp == "027" or tmp == "0077" or tmp == "0027": if target_risk_id == macro.BASELINE_ITEM[ "AU_UMASK"]: if macro.BASELINE_ITEM["AU_UMASK"] in baseline[ "risks"]: baseline["risks"][ macro.BASELINE_ITEM["AU_UMASK"]][ "handle_ts"] = time_op.now() baseline["risks"][ macro.BASELINE_ITEM["AU_UMASK"]][ "stage"] = macro.BASELINE_STAGE[ "VERIFIED"] else: if macro.BASELINE_ITEM["AU_UMASK"] in baseline[ "risks"]: baseline["risks"][macro.BASELINE_ITEM[ "AU_UMASK"]]["last_ts"] = time_op.now() baseline["risks"][ macro.BASELINE_ITEM["AU_UMASK"]][ "stage"] = macro.BASELINE_STAGE[ "UNRESOLVED"] else: baseline["risks"][ macro.BASELINE_ITEM["AU_UMASK"]] = { "name": Klanguage().to_ts(1178), "level": macro.BASELINE_LEVEL["MEDIUM"], "stage": macro.BASELINE_STAGE["UNRESOLVED"], "kind": macro.BASELINE_KIND["AUTHENTICATION"], "extra": tmp, "ts": time_op.now(), "last_ts": time_op.now(), "handle_ts": None, "solution": Klanguage().to_ts(1177) } continue # Algorithm will be used for encrypting password tmp, num = common.grep(line, r"^ENCRYPT_METHOD\s*(\S+)") if num: if tmp == "SHA512": if target_risk_id == macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"]: if macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"] in baseline[ "risks"]: baseline["risks"][macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"]][ "handle_ts"] = time_op.now() baseline["risks"][macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"]][ "stage"] = macro.BASELINE_STAGE[ "VERIFIED"] else: if macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"] in baseline["risks"]: baseline["risks"][ macro.BASELINE_ITEM["AU_ENCRYPT_METHOD"]][ "last_ts"] = time_op.now() baseline["risks"][ macro.BASELINE_ITEM["AU_ENCRYPT_METHOD"]][ "stage"] = macro.BASELINE_STAGE[ "UNRESOLVED"] else: baseline["risks"][macro.BASELINE_ITEM[ "AU_ENCRYPT_METHOD"]] = { "name": Klanguage().to_ts(1175), "level": macro.BASELINE_LEVEL["LOW"], "stage": macro.BASELINE_STAGE["UNRESOLVED"], "kind": macro.BASELINE_KIND["AUTHENTICATION"], "extra": tmp, "ts": time_op.now(), "last_ts": time_op.now(), "handle_ts": None, "solution": Klanguage().to_ts(1174) } continue #Password aging controls #Maximum number of days a password may be used tmp, num = common.grep(line, r"^PASS_MAX_DAYS\s*(\d+)") if num: if tmp == "99999": if macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"] in baseline["risks"]: baseline["risks"][ macro.BASELINE_ITEM["AU_PASS_MAX_DAYS"]][ "last_ts"] = time_op.now() baseline["risks"][ macro.BASELINE_ITEM["AU_PASS_MAX_DAYS"]][ "stage"] = macro.BASELINE_STAGE[ "UNRESOLVED"] else: baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"]] = { "name": Klanguage().to_ts(1175), "level": macro.BASELINE_LEVEL["LOW"], "stage": macro.BASELINE_STAGE["UNRESOLVED"], "kind": macro.BASELINE_KIND["AUTHENTICATION"], "extra": tmp, "ts": time_op.now(), "last_ts": time_op.now(), "handle_ts": None, "solution": Klanguage().to_ts(1170) } else: if target_risk_id == macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"]: if macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"] in baseline[ "risks"]: baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"]][ "handle_ts"] = time_op.now() baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MAX_DAYS"]][ "stage"] = macro.BASELINE_STAGE[ "VERIFIED"] continue #Minimum number of days allowed between password changes. tmp, num = common.grep(line, r"^PASS_MIN_DAYS\s*(\d+)") if num: if tmp == "0": if macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"] in baseline["risks"]: baseline["risks"][ macro.BASELINE_ITEM["AU_PASS_MIN_DAYS"]][ "last_ts"] = time_op.now() baseline["risks"][ macro.BASELINE_ITEM["AU_PASS_MIN_DAYS"]][ "stage"] = macro.BASELINE_STAGE[ "UNRESOLVED"] else: baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"]] = { "name": Klanguage().to_ts(1175), "level": macro.BASELINE_LEVEL["LOW"], "stage": macro.BASELINE_STAGE["UNRESOLVED"], "kind": macro.BASELINE_KIND["AUTHENTICATION"], "extra": tmp, "ts": time_op.now(), "last_ts": time_op.now(), "handle_ts": None, "solution": Klanguage().to_ts(1167) } else: if target_risk_id == macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"]: if macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"] in baseline[ "risks"]: baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"]][ "handle_ts"] = time_op.now() baseline["risks"][macro.BASELINE_ITEM[ "AU_PASS_MIN_DAYS"]][ "stage"] = macro.BASELINE_STAGE[ "VERIFIED"] continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MIN\s*(\d+)") if num: self.uid_min = tmp continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MAX\s*(\d+)") if num: self.uid_max = tmp continue
def detect_distribution(): distro = "" distro_release = "" #os-release #https://www.freedesktop.org/software/systemd/man/os-release.html #NAME="Ubuntu" #VERSION="16.04.2 LTS (Xenial Xerus)" items = {"NAME": None, "VERSION": None} data = file_op.cat('/etc/os-release', 'r') if data: lines = data.split("\n") for line in lines: if line: k, v = line.split("=") if k in items: items[k] = v.lstrip('"').rstrip('"') distro = items["NAME"] distro_release = items["VERSION"] if distro and distro_release: return distro, distro_release #fedora, oracle, centos, amazon #https://www.rackaid.com/blog/how-to-determine-centos-or-red-hat-version/ #Fedora release 26 (Twenty Six) #CentOS Linux release 7.3.1611 (Core) #Amazon Linux AMI release 2017.03 identification = [ "/etc/fedora-release", "/etc/oracle-release", "/etc/redhat-release", "/etc/system-release" ] for i in identification: if os.path.exists(i): data = file_op.cat(i, 'r') if data: pattern = re.compile(r'(.*) release (\d[\d.]*)') match = pattern.match(data) if len(match.groups()) == 2: distro = match.groups()[0] distro_release = match.groups()[1] if distro and distro_release: return distro, distro_release data = file_op.cat('/etc/issue', 'r') #raspbian if success: result = data.split() if len(result) > 2 and result[0] == "Raspbian": distro = "raspbian" return True data, success, retcode = common.exec_command(['lsb_release', '-ir']) #Distributor ID: Ubuntu #Release: 16.04 if success: pattern = re.compile( r'(?s)^Distributor ID:\s*(.+?)\n*Release:\s*(.+?)$') match = pattern.match(data) if len(match.groups()) == 2: distro = match.groups()[0] distro_release = match.groups()[1] if distro and distro_release: return distro, distro_release data = file_op.cat('/etc/lsb-release', 'r') #DISTRIB_ID=Ubuntu #DISTRIB_RELEASE=16.04 #DISTRIB_CODENAME=xenial #DISTRIB_DESCRIPTION="Ubuntu 16.04.2 LTS" if success: pattern = re.compile( r'(?s)^DISTRIB_ID=(.+?)\n*DISTRIB_RELEASE=(.+?)\n.*$') match = pattern.match(data) if len(match.groups()) == 2: distro = match.groups()[0] distro_release = match.groups()[1] if distro and distro_release: return distro, distro_release distro = platform.linux_distribution()[0] distro_release = platform.linux_distribution()[1] return distro, distro_release
def get_useradd_list(response): global uid_min global uid_max user_connect_time = {} if lib.check_programs_installed("ac"): data, success, retcode = common.exec_command(["ac", "-p"]) if success: lines = data.split("\n") for line in lines: result = line.split() if len(result) == 2: user_connect_time[result[0]] = result[1] uid_values = [["USERNAME", "UID", "GID", "HOME", "SHELL"]] if user_connect_time: uid_values[0].append("TOTAL CONNECT HOURS") gid_values = [["GID", "GROUP NAME", "MEMBERS"]] #root:x:0:0:root:/root:/bin/bash #bin:x:1:1:bin:/bin:/sbin/nologin #daemon:x:2:2:daemon:/sbin:/sbin/nologin data = file_op.cat("/etc/passwd", "r") if data: lines = data.split("\n") for line in lines: if line: username, password, uid, gid, comment, home, shell = line.split( ":") if int(uid) >= int(uid_min) and int(uid) <= int(uid_max): uid_values.append([username, uid, gid, home, shell]) if user_connect_time: uid_values[len(uid_values) - 1].append( user_connect_time[username] if username in user_connect_time else "Unknown") #root:x:0: #bin:x:1: #daemon:x:2: data = file_op.cat("/etc/group", "r") if data: lines = data.split("\n") for line in lines: if line: group_name, password, gid, members = line.split(":") gid_values.append([gid, group_name, members]) response["authentication"].append({ "name": "Users create with useradd", "value": len(uid_values) - 1, "values": uid_values }) response["authentication"].append({ "name": "User groups", "value": len(gid_values) - 1, "values": gid_values })
def login_defs_policy(response): global uid_min global uid_max values = [["ITEM", "VALUE"]] data = file_op.cat("/etc/login.defs", "r") if data: lines = data.split("\n") for line in lines: if line: # The default PATH settings, for superuser. tmp, num = common.grep(line, r"^ENV_SUPATH\s*(\S+)") if num: values.append(["Default PATH settings for superuser", tmp]) continue # The default PATH settings, for normal users. tmp, num = common.grep(line, r"^ENV_PATH\s*(\S+)") if num: values.append( ["Default PATH settings for normal users", tmp]) continue # UMASK is the default umask value for pam_umask and is used by # useradd and newusers to set the mode of the new home directories. tmp, num = common.grep(line, r"^UMASK\s*(\S+)") if num: values.append(["UMASK", tmp]) continue # If defined, login failures will be logged here in a utmp format # last, when invoked as lastb, will read /var/log/btmp, so... tmp, num = common.grep(line, r"^FTMP_FILE\s*(\S+)") if num: values.append(["Login failures will be logged in", tmp]) continue # Algorithm will be used for encrypting password tmp, num = common.grep(line, r"^ENCRYPT_METHOD\s*(\S+)") if num: values.append([ "Algorithm will be used for encrypting password", tmp ]) continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MIN\s*(\d+)") if num: values.append([ "Min values for automatic uid selection in useradd", tmp ]) uid_min = tmp continue #Min/max values for automatic uid selection in useradd tmp, num = common.grep(line, r"^UID_MAX\s*(\d+)") if num: values.append([ "Max values for automatic uid selection in useradd", tmp ]) uid_max = tmp continue #Min/max values for automatic gid selection in groupadd tmp, num = common.grep(line, r"^GID_MIN\s*(\d+)") if num: values.append([ "Min values for automatic gid selection in groupadd", tmp ]) continue #Min/max values for automatic gid selection in groupadd tmp, num = common.grep(line, r"^GID_MAX\s*(\d+)") if num: values.append([ "Max values for automatic gid selection in groupadd", tmp ]) continue #Password aging controls #Maximum number of days a password may be used tmp, num = common.grep(line, r"^PASS_MAX_DAYS\s*(\d+)") if num: values.append( ["Maximum number of days a password may be used", tmp]) continue #Minimum number of days allowed between password changes. tmp, num = common.grep(line, r"^PASS_MIN_DAYS\s*(\d+)") if num: values.append([ "Minimum number of days allowed between password changes.", tmp ]) continue #Number of days warning given before a password expires. tmp, num = common.grep(line, r"^PASS_WARN_AGE\s*(\d+)") if num: values.append([ "Number of days warning given before a password expires.", tmp ]) continue response["authentication"].append({ "name": "Configuration control definitions for the login package", "value": len(values) - 1, "values": values })