def print_process_tree(p: Process): """Print the process tree.""" def _print_process_tree(p, depth): suspressed = "(suppressed) " if p.suppressed_process else "" print(f" {' '*(depth+1)}{suspressed}{p.cmdline} | {p.id}" ) # - proc_guid={p.id})") print("------ Process Execution Tree ------") print() print(f" {p.cmdline} | {p.id}") # - proc_guid={p.id})") p.walk_children(_print_process_tree) print()
def print_ancestry(p: Process): """Print the process ancestry for this process.""" def _print_ancestry_details(p, depth): suspressed = " (suppressed) " if p.suppressed_process else " " print( f"{' '*(depth + 1)}{as_configured_timezone(p.start) or '<unknown>'}: {p.cmdline}{suspressed} | {p.id}" ) print("------ Process Ancestry ------") print() p.walk_parents(_print_ancestry_details) print()
def print_filemods(p: Process, current_segment_only: bool = False, raw_print=False, **kwargs): """Print file modifications.""" def _print_filemod_events(filemods): for fm in filemods: assert isinstance(fm, models.CbFileModEvent) if raw_print: print(fm) continue detail_line = f" @{as_configured_timezone(fm.timestamp)}: {fm.type} {fm.path}" if fm.filetype != "Unknown": detail_line += f" - type:{fm.filetype}" if fm.md5: detail_line += f" - md5:{fm.md5}" print(detail_line) print() print("------ FILEMODS ------") if current_segment_only: _print_filemod_events(p.filemods) return _print_filemod_events(p.all_filemods()) return
def print_process_info(proc: Process, return_string: bool = False, raw_print=False, header=True): """Analyst friendly custom process data format. Args: proc: CbR Process return_string: return string if True, else print it to stdout. Returns: string or None """ if not proc._info and raw_print: LOGGER.debug(f"retrieving process info.") proc.refresh() txt = "" if header: txt += "------ INFO ------\n" if raw_print: txt = str(proc) else: status = "Terminated" if proc.terminated else "Running" txt += f" Process GUID: {proc.id}\n" txt += f" Process Name: {proc.process_name}\n" txt += f" Process PID: {proc.process_pid}\n" txt += f" Process MD5: {proc.process_md5}\n" txt += f" Process Path: {proc.path}\n" txt += f" Process Status: {status}\n" txt += f" Command Line: {proc.cmdline}\n" txt += f" Parent Name: {proc.parent_name}\n" txt += f" Parent GUID: {proc.parent_id}\n" txt += f" Hostname: {proc.hostname}\n" txt += f" Username: {proc.username}\n" txt += f" Start Time: {as_configured_timezone(proc.start)}\n" try: txt += f" Last Update Time: {as_configured_timezone(proc.last_update)}\n" except TypeError: # should be handled by cbapi txt += f" Last Update Time: None\n" txt += f" Sensor ID: {proc.sensor_id}\n" txt += f" Comms IP: {proc.comms_ip}\n" txt += f" Interface IP: {proc.interface_ip}\n" txt += f" GUI Link: {proc.webui_link}\n" if return_string: return txt txt += "\n" print(txt)
def process_to_dict(p: Process, max_segments=None) -> Dict: """Get all events for this process.""" all_segments = p.get_segments() if max_segments is None: max_segments = len(all_segments) p.refresh() results = p.original_document results["captured_segments"] = {} results["all_segments"] = all_segments results["process_ancestry"] = StringIO() with redirect_stdout(results["process_ancestry"]): print_ancestry(p) results["process_ancestry"] = results["process_ancestry"].getvalue() results["process_tree"] = StringIO() with redirect_stdout(results["process_tree"]): print_process_tree(p) results["process_tree"] = results["process_tree"].getvalue() captured_segment_count = 0 if p.current_segment: # if current_segment is set, something specifically targeted this segment # and we will ensure it gets captured here results["captured_segments"][ p.current_segment] = segment_events_to_dict(p) captured_segment_count += 1 for segment in all_segments: p.current_segment = segment if segment in results["captured_segments"]: continue if captured_segment_count >= max_segments: LOGGER.info( f"hit maximum segment limit exporting process to json for {p.id}" ) break results["captured_segments"][segment] = segment_events_to_dict(p) captured_segment_count += 1 return results
def print_regmods(p: Process, current_segment_only: bool = False, raw_print=False): """Print registry modifications.""" def _print_regmod_events(regmods): for rm in regmods: assert isinstance(rm, models.CbRegModEvent) if raw_print: print(rm) continue print( f" @{as_configured_timezone(rm.timestamp)}: {rm.type} {rm.path}" ) print() print("------ REGMODS ------") if current_segment_only: _print_regmod_events(p.regmods) return _print_regmod_events(p.all_regmods()) return
def print_crossprocs(p: Process, current_segment_only: bool = False, raw_print=False): """Print Cross Process activity.""" def _print_crossproc_events(crossprocs): for cp in crossprocs: assert isinstance(cp, models.CbCrossProcEvent) if raw_print: print(cp) continue print( f" @{as_configured_timezone(cp.timestamp)}: {cp.type} | {cp.source_path} -> {cp.target_path} | {cp.source_proc.id} -> {cp.target_proc.id}" ) # print() # extra space seems to be helpful on the eye with these print() print("------ CROSSPROCS ------") if current_segment_only: _print_crossproc_events(p.crossprocs) return _print_crossproc_events(p.all_crossprocs()) return
def print_modloads(p: Process, current_segment_only: bool = False, raw_print=False): """Print modual/library loads.""" def _print_modload_events(modloads): for ml in modloads: assert isinstance(ml, models.CbModLoadEvent) if raw_print: print(ml) continue sig_status = "signed" if ml.is_signed else "unsigned" print( f" @{as_configured_timezone(ml.timestamp)}: ({sig_status}) {ml.path} , md5:{ml.md5}" ) print() print("------ MODLOADS ------") if current_segment_only: _print_modload_events(p.modloads) return _print_modload_events(p.all_modloads()) return
def print_netconns(p: Process, current_segment_only: bool = False, raw_print=False): """Print network connection events.""" def _print_netconn_events(netconns): for nc in netconns: assert isinstance(nc, models.CbNetConnEvent) if raw_print: print(nc) continue detail_line = f" @{as_configured_timezone(nc.timestamp)}: ({nc.direction}) local_ip_port={nc.local_ip}:{nc.local_port}" if nc.proxy_ip != "0.0.0.0": detail_line += f" proxy_ip_port={nc.proxy_ip}:{nc.proxy_port}" detail_line += f" remote_ip_port={nc.remote_ip}:{nc.remote_port} domain={nc.domain}" print(detail_line) print() print("------ NETCONNS ------") if current_segment_only: _print_netconn_events(p.netconns) return _print_netconn_events(p.all_netconns()) return
def execute_response_arguments(cb: CbResponseAPI, args: argparse.Namespace) -> bool: """The logic to execute response specific command line arguments. Args: cb: CbResponseAPI args: parsed argparse namespace Returns: True or None on success, False on failure. """ if not isinstance(cb, CbResponseAPI): LOGGER.critical(f"expected CbResponseAPI but got '{type(cb)}'") return False # Sensor Quering # if args.command and (args.command == "sensor-query" or args.command == "sq"): LOGGER.info( f"searching {args.environment} environment for sensor query: {args.sensor_query}..." ) sensors = make_sensor_query(cb, args.sensor_query) if not sensors: return None # don't display large results by default print_results = True if not args.no_warnings and len(sensors) > 10: prompt = "Print all results? (y/n) [y] " print_results = input_with_timeout(prompt, default="y") print_results = True if print_results.lower() == "y" else False if len(sensors) > 0 and print_results: print( "\n------------------------- SENSOR RESULTS -------------------------" ) for sensor in sensors: if args.all_details: print() print(sensor) else: print(sensor_info(sensor)) print() return True # Watchlists # if args.command and (args.command == "response_watchlist" or args.command == "rwl"): watchlists = watchlist_names = [] if args.query_watchlists: watchlists = query_watchlists(cb, args.query_watchlists) elif args.list_watchlists: watchlists = get_all_watchlists(cb) if args.watchlist_names_from_stdin: watchlist_names = [line.strip() for line in sys.stdin] if args.watchlists_to_json: if watchlists: print( json.dumps( these_watchlists_to_list_dict( cb, [wl.name for wl in watchlists]))) if watchlist_names: print( json.dumps( these_watchlists_to_list_dict(cb, watchlist_names))) return elif len(watchlists) > 0: print( "\n------------------------- WATCHLISTS -------------------------" ) for wl in watchlists: print(wl) # Process Quering # if args.command and (args.command.startswith("q") or args.command == "pq"): LOGGER.info(f"searching {args.environment} environment..") args.start_time = (datetime.datetime.strptime(args.start_time, "%Y-%m-%d %H:%M:%S") if args.start_time else args.start_time) args.last_time = (datetime.datetime.strptime(args.last_time, "%Y-%m-%d %H:%M:%S") if args.last_time else args.last_time) processes = make_process_query(cb, args.query, start_time=args.start_time, last_time=args.last_time, raise_exceptions=False) if args.facets: LOGGER.info("getting facet data...") print_facet_histogram(processes.facets()) # don't display large results by default print_results = True if not args.no_warnings and len(processes) > 10: prompt = "Print all results? (y/n) [y] " print_results = input_with_timeout(prompt, default="y") print_results = True if print_results.lower() == "y" else False if len(processes) > 0 and print_results: print( "\n------------------------- QUERY RESULTS -------------------------" ) for proc in processes: print(" -------------------------") if args.all_details: print(proc) else: print_process_info(proc, raw_print=args.all_details, header=False) return True # Enumerations # if args.command and args.command == "enumerate": if args.logon_history: logon_history(cb, args.logon_history) return # Process Inspection # if args.command and (args.command.lower() == "inspect" or args.command.lower().startswith("proc")): process_id = args.process_guid_options process_segment = None if "/" in args.process_guid_options: if not args.process_guid_options.count("/") == 1: LOGGER.error( f"process guid/segement format error: {args.process_guid_options}" ) return False process_id, process_segment = args.process_guid_options.split("/") if not re.match("[0-9]{13}", process_segment): LOGGER.error( f"{process_segment} is not in the form of a process segment." ) return False process_segment = int(process_segment) if not is_uuid(process_id): LOGGER.error( f"{process_id} is not in the form of a globally unique process id (GUID/UUID)." ) return False try: proc = Process(cb, process_id, force_init=True) if process_segment and process_segment not in proc.get_segments(): LOGGER.warning( f"segment '{process_segment}' does not exist. Setting to first segment." ) process_segment = None proc.current_segment = process_segment except ObjectNotFoundError: LOGGER.warning( f"ObjectNotFoundError - process data does not exist.") return False except Exception as e: LOGGER.error(f"problem finding process: {e}") return False all_inspection_args = [ iarg for iarg in vars(args).keys() if iarg.startswith("inspect_") ] set_inspection_args = [ iarg for iarg, value in vars(args).items() if iarg.startswith("inspect_") and value is True ] if not set_inspection_args: LOGGER.debug(f"seting all inspection arguments.") for iarg in all_inspection_args: args.__setattr__(iarg, True) if args.json: print( json.dumps(process_to_dict(proc, max_segments=args.segment_limit), default=str)) return if args.walk_and_inspect_tree: inspect_process_tree( proc, info=args.inspect_proc_info, filemods=args.inspect_filemods, netconns=args.inspect_netconns, regmods=args.inspect_regmods, modloads=args.inspect_modloads, crossprocs=args.inspect_crossprocs, children=args.inspect_children, raw_print=args.raw_print_events, ) return True # else if args.inspect_process_ancestry: print_ancestry(proc) if args.inspect_process_tree: print_process_tree(proc) if args.inspect_proc_info: print_process_info(proc, raw_print=args.raw_print_events) if args.inspect_filemods: print_filemods(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) if args.inspect_netconns: print_netconns(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) if args.inspect_regmods: print_regmods(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) if args.inspect_modloads: print_modloads(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) if args.inspect_crossprocs: print_crossprocs(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) if args.inspect_children: print_childprocs(proc, current_segment_only=bool(process_segment), raw_print=args.raw_print_events) # Live Response Actions # if args.command and (args.command.lower() == "lr" or args.command.lower().startswith("live")): # create a LR session manager session_manager = CustomLiveResponseSessionManager( cb, custom_session_keepalive=True) # store a list of commands to execute on this sensor commands = [] try: sensor = Sensor(cb, args.name_or_id, force_init=True) except ObjectNotFoundError: LOGGER.info(f"searching for sensor...") sensor = find_sensor_by_hostname(cb, args.name_or_id) if not sensor: LOGGER.info(f"could not find a sensor.") return None if args.execute_command: # XXX expand this for more flexibiliy by making an execute parser # that can accept more arugments to pass to ExecuteCommand cmd = ExecuteCommand(args.execute_command) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.sensor_isolation_toggle: result = None state = "isolated" if sensor.is_isolating else "unisolated" desired_state = "unisolated" if sensor.is_isolating else "isolated" LOGGER.info( f"sensor {sensor.id}:{sensor.hostname} is currently {state}. Changing state to: {desired_state}" ) if sensor.is_isolating: result = sensor.unisolate() else: result = sensor.isolate() if result: state = "isolated" if sensor.is_isolating else "unisolated" LOGGER.info( f"successfully {state} sensor {sensor.id}:{sensor.hostname}" ) else: state = "unisolate" if sensor.is_isolating else "isolate" LOGGER.error( f"failed to {state} sensor {sensor.id}:{sensor.hostname}") # Put File # if args.live_response_command and args.live_response_command.lower( ) == "put": cmd = PutFile(args.local_filepath, args.sensor_write_filepath) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.create_regkey: cmd = CreateRegKey(args.create_regkey) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.set_regkey_value: cmd = SetRegKeyValue(args.create_regkey, args.set_regkey_value) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") # Sensor Collection # if args.live_response_command and args.live_response_command.lower( ) == "collect": if args.sensor_info: print(sensor_info(sensor)) if args.process_list: cmd = ProcessListing() commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.list_directory: cmd = ListDirectory(args.list_directory) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.walk_directory: cmd = WalkDirectory(args.walk_directory) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.file: cmd = GetFile(args.file) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.regkeypath: cmd = ListRegKeyValues(args.regkeypath) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.regkeyvalue: cmd = RegKeyValue(args.regkeyvalue) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.drives: cmd = LogicalDrives() commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.memdump: cmd = GetSystemMemoryDump() commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") # Sensor Remediation # if args.live_response_command and args.live_response_command == "remediate": if args.delete_file_path: cmd = DeleteFile(args.delete_file_path) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.kill_process_name: cmd = KillProcessByName(args.kill_process_name) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.delete_regkeyvalue: cmd = DeleteRegistryKeyValue(args.delete_regkeyvalue) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.delete_entire_regkey: cmd = DeleteRegistryKey(args.delete_entire_regkey) commands.append(cmd) LOGGER.info(f"recorded command: {cmd}") if args.remediation_script: remediation_commands = build_remediation_commands( args.remediation_script) LOGGER.info( f"created {len(remediation_commands)} remediation commands from {args.remediation_script}" ) commands.extend(remediation_commands) # Playbook execution # if args.live_response_command and ( args.live_response_command.startswith("play") or args.live_response_command == "pb"): if args.playbook_configpath: playbook_commands = build_playbook_commands( args.playbook_configpath) commands.extend(playbook_commands) LOGGER.info( f"loaded {len(playbook_commands)} playbook commands.") if args.playbook_name: playbook_data = get_playbook_map()[args.playbook_name] playbook_path = playbook_data["path"] playbook_commands = build_playbook_commands(playbook_path) commands.extend(playbook_commands) LOGGER.info( f"loaded {len(playbook_commands)} playbook commands.") # Handle LR commands # if commands: timeout = 1200 # default 20 minutes (same used by Cb) if not is_sensor_online(sensor): # Decision point: if the sensor is NOT online, give the analyst and option to wait LOGGER.warning(f"{sensor.id}:{sensor.hostname} is offline.") prompt = "Would you like to wait for the host to come online? (y/n) [y] " wait = input_with_timeout(prompt, default="y") wait = True if wait.lower() == "y" else False if not wait: return None prompt = "How many days do you want to wait? [Default is 7 days] " timeout = input_with_timeout(prompt, default=7) if isinstance(timeout, str): timeout = int(timeout) if timeout > 30: LOGGER.warning( f"{timeout} days is a long time. Restricting to max of 30 days." ) timeout = 30 # 86400 seconds in a day timeout = timeout * 86400 if not session_manager.wait_for_active_session(sensor, timeout=timeout): LOGGER.error(f"reached timeout waiting for active session.") return False # we have an active session, issue the commands. for command in commands: session_manager.submit_command(command, sensor) if session_manager.commands: # Wait for issued commands to complete and process any results. session_manager.process_completed_commands() # Direct Session Interaction # if args.command and args.command.startswith("sess"): if args.list_sensor_sessions: print( json.dumps(sensor_live_response_sessions_by_sensor_id( cb, args.list_sensor_sessions), indent=2, sort_keys=True)) if args.get_session_command_list: print( json.dumps(get_session_commands(cb, args.get_session_command_list), indent=2, sort_keys=True)) if args.list_all_sessions: print( json.dumps(all_live_response_sessions(cb), indent=2, sort_keys=True)) if args.get_session: print( json.dumps(get_session_by_id(cb, args.get_session), indent=2, sort_keys=True)) if args.close_session: session_manager = CustomLiveResponseSessionManager(cb) session_manager._close_session(args.close_session) print( json.dumps(get_session_by_id(cb, args.close_session), indent=2, sort_keys=True)) if args.get_command_result: session_id, command_id = args.get_command_result.split(":", 1) print( json.dumps(get_command_result(cb, session_id, command_id), indent=2, sort_keys=True)) if args.get_file_content: session_id, file_id = args.get_file_content.split(":", 1) get_file_content(cb, session_id, file_id) return True
def get_process(monkeypatch): from cbapi.response.models import CbFileModEvent from cbapi.response.models import CbNetConnEvent from cbapi.response.models import CbRegModEvent from cbapi.response.models import CbModLoadEvent from cbapi.response.models import CbCrossProcEvent from cbapi.response.models import CbChildProcEvent def _get_segments(): proc._segments = initial_data["captured_segments"].keys() return proc._segments def _refresh(): return True def _require_events(): proc._full_init = True proc._events_loaded = True return def _retrieve_cb_info(): return cb.server_info def _all_filemods(): for segment_id in proc.get_segments(): for fm in proc._events[segment_id]["filemods"]: timestamp = fm["timestamp"] seq = fm["sequence"] yield CbFileModEvent(proc, timestamp, seq, fm) def _all_netconns(): for segment_id in proc.get_segments(): for nc in proc._events[segment_id]["netconns"]: timestamp = nc["timestamp"] seq = nc["sequence"] yield CbNetConnEvent(proc, timestamp, seq, nc) def _all_regmods(): for segment_id in proc.get_segments(): for rm in proc._events[segment_id]["regmods"]: timestamp = rm["timestamp"] seq = rm["sequence"] yield CbRegModEvent(proc, timestamp, seq, rm) def _all_modloads(): def _is_signed(self): return self["is_signed"] modloads = [] for segment_id in proc.get_segments(): for ml in proc._events[segment_id]["modloads"]: timestamp = ml["timestamp"] seq = ml["sequence"] monkeypatch.setattr(CbModLoadEvent, "is_signed", _is_signed) yield CbModLoadEvent(proc, timestamp, seq, ml) def _all_crossprocs(): for segment_id in proc.get_segments(): for cp in proc._events[segment_id]["crossprocs"]: timestamp = cp["timestamp"] seq = cp["sequence"] yield CbCrossProcEvent(proc, timestamp, seq, cp) def _all_childprocs(): for segment_id in proc.get_segments(): for cp in proc._events[segment_id]["children"]: timestamp = cp["timestamp"] seq = cp["sequence"] is_suppressed = cp["is_suppressed"] proc_data = cp["proc_data"] yield CbChildProcEvent(proc, timestamp, seq, cp, is_suppressed=is_suppressed, proc_data=proc_data) def _walk_children(callback, max_depth=1, depth=0): if max_depth and depth > max_depth: return if not proc.terminated: try: callback(proc, depth=depth) except ObjectNotFoundError: pass else: proc.walk_children(callback, max_depth=max_depth, depth=depth + 1) def _walk_parents(callback, max_depth=1, depth=0): if max_depth and depth > max_depth: return try: parent_proc = proc if parent_proc and parent_proc.get("process_pid", -1) != -1: callback(parent_proc, depth=depth) else: return except ObjectNotFoundError: return else: parent_proc.walk_parents(callback, max_depth=max_depth, depth=depth + 1) # set default timezone to GMT set_timezone("GMT") initial_data = {} cb = fake_cb_response_api(monkeypatch) with open( f"{HOME_PATH}/test_data/00007c6f-0000-0a28-01d6-ffde20451832.json", "r") as fp: initial_data = json.load(fp) proc = Process(cb, "00007c6f-0000-0a28-01d6-ffde20451832", initial_data=initial_data) # load events for segment_id in initial_data["captured_segments"].keys(): if segment_id not in proc._events: proc._events[segment_id] = {} for event_type in [ "filemods", "netconns", "regmods", "modloads", "crossprocs", "children" ]: proc._events[segment_id][event_type] = initial_data[ "captured_segments"][segment_id][event_type] monkeypatch.setattr(proc, "get_segments", _get_segments) monkeypatch.setattr(proc, "refresh", _refresh) monkeypatch.setattr(proc, "require_events", _require_events) monkeypatch.setattr(proc, "_retrieve_cb_info", _retrieve_cb_info) monkeypatch.setattr(proc, "all_filemods", _all_filemods) monkeypatch.setattr(proc, "all_netconns", _all_netconns) monkeypatch.setattr(proc, "all_regmods", _all_regmods) monkeypatch.setattr(proc, "all_modloads", _all_modloads) monkeypatch.setattr(proc, "all_crossprocs", _all_crossprocs) monkeypatch.setattr(proc, "all_childprocs", _all_childprocs) monkeypatch.setattr(proc, "walk_children", _walk_children) monkeypatch.setattr(proc, "walk_parents", _walk_parents) return proc
def print_childprocs(p: Process, current_segment_only: bool = False, raw_print=False): """Print child process events.""" if p.current_segment is None: # avoids server error calling /api/v4/process/{guid}/{segment}/event p.current_segment = p.get_segments()[0] def _print_childproc_events(childprocs): if raw_print: for cp in childprocs: print(cp) return # group start/end childproc events together organized_childprocs = {} for cp in childprocs: guid = cp.procguid[:cp.procguid.rfind("-")] if guid in organized_childprocs: organized_childprocs[guid].append(cp) else: organized_childprocs[guid] = [cp] for cp_guid, cp_events in organized_childprocs.items(): # there should only be two events, a spawn and terminate event # however, don't make assumptions spawn = cp_events[0] terminate_cp = None if len(cp_events) > 1: for cp in cp_events: if cp.timestamp < spawn.timestamp: spawn = cp else: terminate_cp = cp status = "unknown" try: if spawn.is_suppressed: status = "suppressed" status = "terminated" if spawn.process.terminated else "running" if spawn.process.terminated and terminate_cp is None: LOGGER.debug( f"notice: no termination event found. process must have terminated elsewhere?" ) except ObjectNotFoundError: LOGGER.debug(f"child process not found. ") print( f" @{as_configured_timezone(spawn.timestamp)}: ({status}) {spawn.path} md5={spawn.md5} pid={spawn.pid} - {cp_guid}" ) print() print("------ CHILDPROCS ------") childprocs = [] if current_segment_only: childprocs = p.childprocs else: childprocs = p.all_childprocs() try: _print_childproc_events(childprocs) """ for cp in p.children: print(f" @{as_configured_timezone(cp.timestamp)}: {cp.path} md5={cp.md5} pid={cp.pid} - {cp.procguid}") print() """ except Exception as e: LOGGER.error( f"unhandled exception when enumerating childproc events: {e}") return