Beispiel #1
0
def convert_response_watchlists_to_single_psc_edr_watchlist(
    cb: CbThreatHunterAPI,
    response_watchlists: List[Dict],
    watchlist_name: str = None,
    watchlist_description="Consolidated Cb Respone Watchlists. Each report in this watchlist is based on a Cb Response Watchlist",
) -> List[Dict]:
    """Convert a list of Response Watchlists to PSC EDR watchlists.

    This is a many-to-one Watchlist migration.

    Args:
      cb: Cb PSC object
      response_watchlists: List of Response Ratchlist in dictionary form.
      watchlist_name: The name to give the resulting Response consolidated PSC EDR Watchlist.
      watchlist_description: The description to give the resulting Watchlist.
    Returns:
      PSC Watchlist containing all Response Watchlists as intel Reports.
    """
    from cbinterface.helpers import input_with_timeout

    if watchlist_name is None:
        watchlist_name = input_with_timeout("Enter a name for the resulting PSC EDR Watchlist: ", stderr=False)
        watchlist_description = (
            input_with_timeout(
                f"Enter a description for the Watchlist [default description: {watchlist_description}] : ", stderr=False
            )
            or watchlist_description
        )

    reports = list(yield_reports_created_from_response_watchlists(cb, response_watchlists))
    if not reports:
        return None

    return create_watchlist_from_report_list(cb, watchlist_name, watchlist_description, reports)
Beispiel #2
0
def interactively_update_alert_state(
    cb: CbThreatHunterAPI,
    alert_id,
    state: Union["DISMISSED", "OPEN"] = None,
    remediation_state: str = None,
    comment: str = None,
) -> Dict:
    """Update alert remediation state by ID."""
    from cbinterface.helpers import input_with_timeout

    if not state:
        state = input_with_timeout("Alert state to set, DISMISSED or OPEN? [DISMISSED]: ") or "DISMISSED"
        if state not in ["DISMISSED", "OPEN"]:
            LOGGER.error(f"state must be one of [DISMISSED, OPEN], not {state}")
            return False
    if not remediation_state:
        remediation_state = input_with_timeout("State of Remediation: ") or ""
    if not comment:
        comment = input_with_timeout("Comment: ") or ""
    return update_alert_state(cb, alert_id, state, remediation_state, comment)
Beispiel #3
0
def interactively_update_report_ioc_query(cb: CbThreatHunterAPI, report_id, ioc_id) -> Dict:
    """Prompt user for new query and update the report IOC query."""
    from cbinterface.helpers import input_with_timeout

    report = get_report(cb, report_id)
    if not report:
        return None

    ioc = [ioc for ioc in report["iocs_v2"] if ioc_id == ioc["id"]][0]
    if ioc["match_type"] != "query":
        LOGGER.warning(f"IOC={ioc_id} is not a query based IOC: {ioc}")

    print(f"Current IOC query: {ioc['values'][0]}")
    new_ioc_query = input_with_timeout("Enter new query: ", timeout=90)
    return update_report_ioc_query(cb, report_id, ioc_id, new_ioc_query)
Beispiel #4
0
def enforce_argument_placeholders(playbook: ConfigParser,
                                  placeholders: dict = {}):
    """Some playbooks require arguments to execute. These arguments are in the form of string format placeholder keys.
    Make sure we have all of the arguments for this playbook. Prompt the user for those arguments if we don't.

    Args:
      playbook: A loaded LR playbook playbook
      placeholders: A dictionary of placeholders we already have (if any)
    Return:
      completed placeholders dict
    """
    LOGGER.debug("Enforcing argument placeholders")
    args_needed = []
    required_args = (playbook["overview"]["required_arguments"].split(",")
                     if playbook.has_option("overview", "required_arguments")
                     else None)
    if required_args:
        args_needed = [
            arg for arg in required_args if arg not in placeholders.keys()
        ]
    for arg in args_needed:
        prompt = f"playbook requires argument {arg}: "
        placeholders[arg] = input_with_timeout(prompt)
    return placeholders
Beispiel #5
0
def execute_response_arguments(cb: CbResponseAPI,
                               args: argparse.Namespace) -> bool:
    """The logic to execute response specific command line arguments.

    Args:
        cb: CbResponseAPI
        args: parsed argparse namespace
    Returns:
        True or None on success, False on failure.
    """

    if not isinstance(cb, CbResponseAPI):
        LOGGER.critical(f"expected CbResponseAPI but got '{type(cb)}'")
        return False

    # Sensor Quering #
    if args.command and (args.command == "sensor-query"
                         or args.command == "sq"):
        LOGGER.info(
            f"searching {args.environment} environment for sensor query: {args.sensor_query}..."
        )

        sensors = make_sensor_query(cb, args.sensor_query)
        if not sensors:
            return None

        # don't display large results by default
        print_results = True
        if not args.no_warnings and len(sensors) > 10:
            prompt = "Print all results? (y/n) [y] "
            print_results = input_with_timeout(prompt, default="y")
            print_results = True if print_results.lower() == "y" else False

        if len(sensors) > 0 and print_results:
            print(
                "\n------------------------- SENSOR RESULTS -------------------------"
            )
            for sensor in sensors:
                if args.all_details:
                    print()
                    print(sensor)
                else:
                    print(sensor_info(sensor))
            print()
        return True

    # Watchlists #
    if args.command and (args.command == "response_watchlist"
                         or args.command == "rwl"):
        watchlists = watchlist_names = []
        if args.query_watchlists:
            watchlists = query_watchlists(cb, args.query_watchlists)
        elif args.list_watchlists:
            watchlists = get_all_watchlists(cb)

        if args.watchlist_names_from_stdin:
            watchlist_names = [line.strip() for line in sys.stdin]

        if args.watchlists_to_json:
            if watchlists:
                print(
                    json.dumps(
                        these_watchlists_to_list_dict(
                            cb, [wl.name for wl in watchlists])))
            if watchlist_names:
                print(
                    json.dumps(
                        these_watchlists_to_list_dict(cb, watchlist_names)))
            return
        elif len(watchlists) > 0:
            print(
                "\n------------------------- WATCHLISTS -------------------------"
            )
            for wl in watchlists:
                print(wl)

    # Process Quering #
    if args.command and (args.command.startswith("q") or args.command == "pq"):
        LOGGER.info(f"searching {args.environment} environment..")
        args.start_time = (datetime.datetime.strptime(args.start_time,
                                                      "%Y-%m-%d %H:%M:%S")
                           if args.start_time else args.start_time)
        args.last_time = (datetime.datetime.strptime(args.last_time,
                                                     "%Y-%m-%d %H:%M:%S")
                          if args.last_time else args.last_time)
        processes = make_process_query(cb,
                                       args.query,
                                       start_time=args.start_time,
                                       last_time=args.last_time,
                                       raise_exceptions=False)

        if args.facets:
            LOGGER.info("getting facet data...")
            print_facet_histogram(processes.facets())

        # don't display large results by default
        print_results = True
        if not args.no_warnings and len(processes) > 10:
            prompt = "Print all results? (y/n) [y] "
            print_results = input_with_timeout(prompt, default="y")
            print_results = True if print_results.lower() == "y" else False

        if len(processes) > 0 and print_results:
            print(
                "\n------------------------- QUERY RESULTS -------------------------"
            )
            for proc in processes:
                print("  -------------------------")
                if args.all_details:
                    print(proc)
                else:
                    print_process_info(proc,
                                       raw_print=args.all_details,
                                       header=False)

        return True

    # Enumerations #
    if args.command and args.command == "enumerate":
        if args.logon_history:
            logon_history(cb, args.logon_history)
            return

    # Process Inspection #
    if args.command and (args.command.lower() == "inspect"
                         or args.command.lower().startswith("proc")):
        process_id = args.process_guid_options
        process_segment = None
        if "/" in args.process_guid_options:
            if not args.process_guid_options.count("/") == 1:
                LOGGER.error(
                    f"process guid/segement format error: {args.process_guid_options}"
                )
                return False
            process_id, process_segment = args.process_guid_options.split("/")
            if not re.match("[0-9]{13}", process_segment):
                LOGGER.error(
                    f"{process_segment} is not in the form of a process segment."
                )
                return False
            process_segment = int(process_segment)
        if not is_uuid(process_id):
            LOGGER.error(
                f"{process_id} is not in the form of a globally unique process id (GUID/UUID)."
            )
            return False

        try:
            proc = Process(cb, process_id, force_init=True)
            if process_segment and process_segment not in proc.get_segments():
                LOGGER.warning(
                    f"segment '{process_segment}' does not exist. Setting to first segment."
                )
                process_segment = None
            proc.current_segment = process_segment
        except ObjectNotFoundError:
            LOGGER.warning(
                f"ObjectNotFoundError - process data does not exist.")
            return False
        except Exception as e:
            LOGGER.error(f"problem finding process: {e}")
            return False

        all_inspection_args = [
            iarg for iarg in vars(args).keys() if iarg.startswith("inspect_")
        ]
        set_inspection_args = [
            iarg for iarg, value in vars(args).items()
            if iarg.startswith("inspect_") and value is True
        ]
        if not set_inspection_args:
            LOGGER.debug(f"seting all inspection arguments.")
            for iarg in all_inspection_args:
                args.__setattr__(iarg, True)

        if args.json:
            print(
                json.dumps(process_to_dict(proc,
                                           max_segments=args.segment_limit),
                           default=str))
            return

        if args.walk_and_inspect_tree:
            inspect_process_tree(
                proc,
                info=args.inspect_proc_info,
                filemods=args.inspect_filemods,
                netconns=args.inspect_netconns,
                regmods=args.inspect_regmods,
                modloads=args.inspect_modloads,
                crossprocs=args.inspect_crossprocs,
                children=args.inspect_children,
                raw_print=args.raw_print_events,
            )
            return True
        # else
        if args.inspect_process_ancestry:
            print_ancestry(proc)
        if args.inspect_process_tree:
            print_process_tree(proc)
        if args.inspect_proc_info:
            print_process_info(proc, raw_print=args.raw_print_events)
        if args.inspect_filemods:
            print_filemods(proc,
                           current_segment_only=bool(process_segment),
                           raw_print=args.raw_print_events)
        if args.inspect_netconns:
            print_netconns(proc,
                           current_segment_only=bool(process_segment),
                           raw_print=args.raw_print_events)
        if args.inspect_regmods:
            print_regmods(proc,
                          current_segment_only=bool(process_segment),
                          raw_print=args.raw_print_events)
        if args.inspect_modloads:
            print_modloads(proc,
                           current_segment_only=bool(process_segment),
                           raw_print=args.raw_print_events)
        if args.inspect_crossprocs:
            print_crossprocs(proc,
                             current_segment_only=bool(process_segment),
                             raw_print=args.raw_print_events)
        if args.inspect_children:
            print_childprocs(proc,
                             current_segment_only=bool(process_segment),
                             raw_print=args.raw_print_events)

    # Live Response Actions #
    if args.command and (args.command.lower() == "lr"
                         or args.command.lower().startswith("live")):
        # create a LR session manager
        session_manager = CustomLiveResponseSessionManager(
            cb, custom_session_keepalive=True)
        # store a list of commands to execute on this sensor
        commands = []

        try:
            sensor = Sensor(cb, args.name_or_id, force_init=True)
        except ObjectNotFoundError:
            LOGGER.info(f"searching for sensor...")
            sensor = find_sensor_by_hostname(cb, args.name_or_id)

        if not sensor:
            LOGGER.info(f"could not find a sensor.")
            return None

        if args.execute_command:
            # XXX expand this for more flexibiliy by making an execute parser
            # that can accept more arugments to pass to ExecuteCommand
            cmd = ExecuteCommand(args.execute_command)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")

        if args.sensor_isolation_toggle:
            result = None
            state = "isolated" if sensor.is_isolating else "unisolated"
            desired_state = "unisolated" if sensor.is_isolating else "isolated"
            LOGGER.info(
                f"sensor {sensor.id}:{sensor.hostname} is currently {state}. Changing state to: {desired_state}"
            )
            if sensor.is_isolating:
                result = sensor.unisolate()
            else:
                result = sensor.isolate()
            if result:
                state = "isolated" if sensor.is_isolating else "unisolated"
                LOGGER.info(
                    f"successfully {state} sensor {sensor.id}:{sensor.hostname}"
                )
            else:
                state = "unisolate" if sensor.is_isolating else "isolate"
                LOGGER.error(
                    f"failed to {state} sensor {sensor.id}:{sensor.hostname}")

        # Put File #
        if args.live_response_command and args.live_response_command.lower(
        ) == "put":
            cmd = PutFile(args.local_filepath, args.sensor_write_filepath)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")

        if args.create_regkey:
            cmd = CreateRegKey(args.create_regkey)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")
            if args.set_regkey_value:
                cmd = SetRegKeyValue(args.create_regkey, args.set_regkey_value)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

        # Sensor Collection #
        if args.live_response_command and args.live_response_command.lower(
        ) == "collect":
            if args.sensor_info:
                print(sensor_info(sensor))

            if args.process_list:
                cmd = ProcessListing()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.list_directory:
                cmd = ListDirectory(args.list_directory)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.walk_directory:
                cmd = WalkDirectory(args.walk_directory)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.file:
                cmd = GetFile(args.file)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.regkeypath:
                cmd = ListRegKeyValues(args.regkeypath)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.regkeyvalue:
                cmd = RegKeyValue(args.regkeyvalue)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.drives:
                cmd = LogicalDrives()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.memdump:
                cmd = GetSystemMemoryDump()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

        # Sensor Remediation #
        if args.live_response_command and args.live_response_command == "remediate":
            if args.delete_file_path:
                cmd = DeleteFile(args.delete_file_path)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.kill_process_name:
                cmd = KillProcessByName(args.kill_process_name)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.delete_regkeyvalue:
                cmd = DeleteRegistryKeyValue(args.delete_regkeyvalue)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.delete_entire_regkey:
                cmd = DeleteRegistryKey(args.delete_entire_regkey)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.remediation_script:
                remediation_commands = build_remediation_commands(
                    args.remediation_script)
                LOGGER.info(
                    f"created {len(remediation_commands)} remediation commands from {args.remediation_script}"
                )
                commands.extend(remediation_commands)

        # Playbook execution #
        if args.live_response_command and (
                args.live_response_command.startswith("play")
                or args.live_response_command == "pb"):
            if args.playbook_configpath:
                playbook_commands = build_playbook_commands(
                    args.playbook_configpath)
                commands.extend(playbook_commands)
                LOGGER.info(
                    f"loaded {len(playbook_commands)} playbook commands.")
            if args.playbook_name:
                playbook_data = get_playbook_map()[args.playbook_name]
                playbook_path = playbook_data["path"]
                playbook_commands = build_playbook_commands(playbook_path)
                commands.extend(playbook_commands)
                LOGGER.info(
                    f"loaded {len(playbook_commands)} playbook commands.")

        # Handle LR commands #
        if commands:
            timeout = 1200  # default 20 minutes (same used by Cb)
            if not is_sensor_online(sensor):
                # Decision point: if the sensor is NOT online, give the analyst and option to wait
                LOGGER.warning(f"{sensor.id}:{sensor.hostname} is offline.")
                prompt = "Would you like to wait for the host to come online? (y/n) [y] "
                wait = input_with_timeout(prompt, default="y")
                wait = True if wait.lower() == "y" else False
                if not wait:
                    return None
                prompt = "How many days do you want to wait? [Default is 7 days] "
                timeout = input_with_timeout(prompt, default=7)
                if isinstance(timeout, str):
                    timeout = int(timeout)
                if timeout > 30:
                    LOGGER.warning(
                        f"{timeout} days is a long time. Restricting to max of 30 days."
                    )
                    timeout = 30

                # 86400 seconds in a day
                timeout = timeout * 86400

            if not session_manager.wait_for_active_session(sensor,
                                                           timeout=timeout):
                LOGGER.error(f"reached timeout waiting for active session.")
                return False

            # we have an active session, issue the commands.
            for command in commands:
                session_manager.submit_command(command, sensor)

        if session_manager.commands:
            # Wait for issued commands to complete and process any results.
            session_manager.process_completed_commands()

    # Direct Session Interaction #
    if args.command and args.command.startswith("sess"):
        if args.list_sensor_sessions:
            print(
                json.dumps(sensor_live_response_sessions_by_sensor_id(
                    cb, args.list_sensor_sessions),
                           indent=2,
                           sort_keys=True))

        if args.get_session_command_list:
            print(
                json.dumps(get_session_commands(cb,
                                                args.get_session_command_list),
                           indent=2,
                           sort_keys=True))

        if args.list_all_sessions:
            print(
                json.dumps(all_live_response_sessions(cb),
                           indent=2,
                           sort_keys=True))

        if args.get_session:
            print(
                json.dumps(get_session_by_id(cb, args.get_session),
                           indent=2,
                           sort_keys=True))

        if args.close_session:
            session_manager = CustomLiveResponseSessionManager(cb)
            session_manager._close_session(args.close_session)
            print(
                json.dumps(get_session_by_id(cb, args.close_session),
                           indent=2,
                           sort_keys=True))

        if args.get_command_result:
            session_id, command_id = args.get_command_result.split(":", 1)
            print(
                json.dumps(get_command_result(cb, session_id, command_id),
                           indent=2,
                           sort_keys=True))

        if args.get_file_content:
            session_id, file_id = args.get_file_content.split(":", 1)
            get_file_content(cb, session_id, file_id)

    return True
Beispiel #6
0
def execute_threathunter_arguments(cb: CbThreatHunterAPI,
                                   args: argparse.Namespace) -> bool:
    """The logic to execute psc ThreatHunter specific command line arguments.

    Args:
        cb: CbThreatHunterAPI
        args: parsed argparse namespace
    Returns:
        True or None on success, False on failure.
    """
    if not isinstance(cb, CbThreatHunterAPI):
        LOGGER.critical(f"Requires Cb PSC based API. Got '{product}' API.")
        return False

    # UBS #
    if args.command == "ubs":
        if args.from_stdin:
            args.sha256hashes.extend([line.strip() for line in sys.stdin])

        if args.sha256hashes:

            set_ubs_args = [
                arg for arg, value in vars(args).items()
                if arg.startswith("ubs_") and value is True
            ]
            if not set_ubs_args:
                LOGGER.debug(f"seting ubs metadata argument as default.")
                args.ubs_get_metadata = True

            if args.ubs_get_file:
                request_and_get_files(cb, sha256hashes=args.sha256hashes)
            if args.ubs_get_device_summary:
                summary = get_device_summary(cb, args.sha256hashes)
                if summary:
                    print(json.dumps(summary, indent=2))
            if args.ubs_get_signature_summary:
                summary = get_signature_summary(cb, args.sha256hashes)
                if summary:
                    print(json.dumps(summary, indent=2))
            if args.ubs_get_file_path_summary:
                summary = get_file_path_summary(cb, args.sha256hashes)
                if summary:
                    print(json.dumps(summary, indent=2))
            if args.ubs_get_metadata:
                # this is default if no arguments are specified with the sha256(s)
                file_metadata = get_file_metadata(
                    cb, sha256hashes=args.sha256hashes)
                if file_metadata:
                    print(json.dumps(file_metadata, indent=2))
            if args.ubs_combined_info:
                results = consolidate_metadata_and_summaries(
                    cb, args.sha256hashes)
                if results:
                    print(json.dumps(results, indent=2))
        else:
            LOGGER.error(
                f"You must specify at least one sha256 with the `--sha256` argument."
            )
            return False

        return True

    # Intel #
    if args.command == "intel":
        if args.intel_command == "alerts":

            if args.intel_alerts_command == "search":  #'device_name': ['XW7R17'],
                # NOTE TODO: implement start and end time argparse options
                # criteria = {'last_event_time': {'start': '2021-04-13T19:39:30.054855+00:00', 'end': '2021-04-14T19:39:30.054855+00:00'}, 'workflow': ['OPEN']}
                if args.create_time_range:
                    criteria["create_time"] = {
                        "range": f"-{args.create_time_range}"
                    }
                results = get_all_alerts(cb,
                                         query=args.alert_query,
                                         workflow_state=args.alert_states,
                                         max_results=args.max_alerts_result)
                if results:
                    print(json.dumps(results, indent=2))

                return True

            if args.from_stdin:
                args.alert_ids.extend(
                    [line.strip().strip('"') for line in sys.stdin])

            if not args.alert_ids:
                LOGGER.error(f"You have to supply at least one alert ID.")
                return False

            if args.get_alert:
                alerts = [
                    get_alert(cb, alert_id) for alert_id in args.alert_ids
                ]
                if alerts:
                    print(json.dumps(alerts, indent=2))

            if args.open_alert:
                results = [
                    update_alert_state(
                        cb,
                        alert_id,
                        state="OPEN",
                        remediation_state=args.remediation_state,
                        comment=args.comment) for alert_id in args.alert_ids
                ]
                if result:
                    print(json.dumps(results, indent=2))

            if args.dismiss_alert:
                results = [
                    update_alert_state(
                        cb,
                        alert_id,
                        state="DISMISSED",
                        remediation_state=args.remediation_state,
                        comment=args.comment,
                    ) for alert_id in args.alert_ids
                ]
                if results:
                    print(json.dumps(results, indent=2))

            if args.interactively_update_alert:
                results = [
                    interactively_update_alert_state(cb, alert_id)
                    for alert_id in args.alert_ids
                ]
                if results:
                    print(json.dumps(results, indent=2))

        if args.intel_command == "migrate":
            response_watchlists = None
            with open(args.response_watchlist_json_data_path, "r") as fp:
                response_watchlists = json.load(fp)

            if args.one_for_one:
                results = convert_response_watchlists_to_psc_edr_watchlists(
                    cb, response_watchlists)
                LOGGER.info(
                    f"created {len(results)} PSC watchlists from {len(response_watchlists)} Response watchlists."
                )
                print("Created watchlists:")
                for wl in results:
                    print(f" + ID={wl['id']} - Title={wl['name']}")

            if args.many_to_one:
                watchlist = convert_response_watchlists_to_single_psc_edr_watchlist(
                    cb, response_watchlists)
                if not watchlist:
                    return False
                LOGGER.info(
                    f"Created \"{watchlist['name']}\" containing {len(watchlist['report_ids'])} intel reports based on {len(response_watchlists)} Response watchlists."
                )

        if args.intel_command == "watchlists":
            if args.intel_watchlist_command == "new":
                report_data = {}
                if not os.path.exists(args.report_path):
                    LOGGER.error(f"{args.report_path} does not exist.")
                    return False
                with open(args.report_path, "r") as fp:
                    report_data = json.load(fp)
                if not report_data:
                    LOGGER.error(f"failed to load report data")
                    return False
                watchlist_data = create_new_report_and_append_to_watchlist(
                    cb, args.watchlist_id, report_data)
                if watchlist_data:
                    LOGGER.info(
                        f"successfully appended new threat report to watchlist."
                    )
                return True

            if args.write_basic_threat_report_template:
                result = write_basic_report_template()
                if result:
                    LOGGER.info(f"wrote: {result}")
                return result

            if args.list_watchlists:
                watchlists = get_all_watchlists(cb)
                if args.json:
                    print(json.dumps(watchlists, indent=2))
                else:
                    for wl in watchlists:
                        print(Watchlist(cb, initial_data=wl))
                        print()

            if args.get_watchlist:
                watchlist = get_watchlist(cb, args.get_watchlist)
                if watchlist:
                    print(json.dumps(watchlist, indent=2))

            if args.watchlist_name_search:
                watchlists = get_watchlists_like_name(
                    cb, args.watchlist_name_search)
                if watchlists:
                    if args.json:
                        print(json.dumps(watchlists, indent=2))
                    else:
                        for wl in watchlists:
                            print(Watchlist(cb, initial_data=wl))
                            print()

            if args.get_watchlist_report:
                if args.json:
                    print(
                        json.dumps(get_report_with_IOC_status(
                            cb, args.get_watchlist_report),
                                   indent=2))
                else:
                    report = get_report_with_IOC_status(
                        cb, args.get_watchlist_report)
                    if report:
                        print_report(
                            report
                        )  # specifically helpful with query based IOCs

            if args.delete_watchlist_report:
                result = delete_report(cb, args.delete_watchlist_report)
                if result.status_code == 204:
                    LOGGER.info(f"deleted watchlist report")

            if args.update_ioc_query:
                report_id, ioc_id = args.update_ioc_query.split("/", 1)
                updated_report = interactively_update_report_ioc_query(
                    cb, report_id, ioc_id)
                if updated_report:
                    LOGGER.info(
                        f"Query IOC ID={ioc_id} of report ID={report_id} successfully updated."
                    )

            if args.get_ioc_status:
                report_id, ioc_id = args.get_ioc_status.split("/", 1)
                status = is_ioc_ignored(cb,
                                        report_id,
                                        ioc_id,
                                        check_existence=True)
                status = "IGNORED" if status else "ACTIVE"
                print(f"IOC ID={ioc_id} in Report ID={report_id} is {status}")

            if args.ignore_ioc:
                report_id, ioc_id = args.ignore_ioc.split("/", 1)
                status = ignore_ioc(cb, report_id, ioc_id)
                status = "IGNORED" if status else "ACTIVE"
                print(f"IOC ID={ioc_id} in Report ID={report_id} is {status}")

            if args.activate_ioc:
                report_id, ioc_id = args.activate_ioc.split("/", 1)
                status = activate_ioc(cb, report_id, ioc_id)
                status = "ACTIVE" if status else "IGNORED"
                print(f"IOC ID={ioc_id} in Report ID={report_id} is {status}")

        if args.intel_command == "feeds":
            if args.list_feeds:
                feeds = get_all_feeds(cb)
                if not feeds:
                    return None
                if args.json:
                    print(json.dumps(feeds, indent=2))
                else:
                    for f in feeds:
                        print(Feed(cb, initial_data=f))
                        print()

            if args.get_feed:
                feed = get_feed(cb, args.get_feed)
                if not feed:
                    return None
                if args.json:
                    print(json.dumps(feed, indent=2))
                else:
                    print(Feed(cb, initial_data=feed))

            if args.search_for_feed:
                feeds = search_feed_names(cb, args.search_for_feed)
                if not feeds:
                    return None
                if args.json:
                    print(json.dumps(feeds, indent=2))
                else:
                    for f in feeds:
                        print(Feed(cb, initial_data=f))
                        print()

            if args.get_feed_report:
                try:
                    feed_id, report_id = args.get_feed_report.split("/", 1)
                except ValueError:
                    feed_id, report_id = args.get_feed_report.split("-", 1)
                report = get_feed_report(cb, feed_id, report_id)
                print(json.dumps(report, indent=2))

        return True

    # Device Quering #
    if args.command and args.command.startswith("d"):
        LOGGER.info(
            f"searching {args.environment} environment for device query: {args.device_query}..."
        )
        if args.device_query.upper() == "FIELDS":
            device_meta_file = os.path.join(
                os.path.dirname(cbapi_file_path),
                "psc/defense/models/deviceInfo.yaml")
            model_data = {}
            with open(device_meta_file, "r") as fp:
                model_data = yaml.safe_load(fp.read())
            possibly_searchable_props = list(model_data["properties"].keys())
            print("Device model fields:")
            for field_name in list(model_data["properties"].keys()):
                print(f"\t{field_name}")
            return True

        if args.quarantine and args.un_quarantine:
            LOGGER.error("quarantine AND un-quarantine? ­Ъце Won't do it.")
            return False

        devices = make_device_query(cb, args.device_query)
        if not devices:
            return None

        # Quarantine?
        if args.quarantine:
            toggle_device_quarantine(cb, devices, True)
        elif args.un_quarantine:
            toggle_device_quarantine(cb, devices, False)

        # don't display large results by default
        print_results = True
        if not args.no_warnings and len(devices) > 10:
            prompt = "Print all results? (y/n) [y] "
            print_results = input_with_timeout(prompt, default="y")
            print_results = True if print_results.lower() == "y" else False

        if len(devices) > 0 and print_results:
            print(
                "\n------------------------- PSC DEVICE RESULTS -------------------------"
            )
            for device in devices:
                if args.all_details:
                    print()
                    print(device)
                else:
                    print(device_info(device))
            print()
        return True

    # Process Quering #
    if args.command and (args.command.startswith("q") or args.command == "pq"):
        LOGGER.info(f"searching {args.environment} environment..")
        args.start_time = (datetime.datetime.strptime(args.start_time,
                                                      "%Y-%m-%d %H:%M:%S")
                           if args.start_time else args.start_time)
        args.last_time = (datetime.datetime.strptime(args.last_time,
                                                     "%Y-%m-%d %H:%M:%S")
                          if args.last_time else args.last_time)
        processes = make_process_query(
            cb,
            args.query,
            start_time=args.start_time,
            last_time=args.last_time,
            raise_exceptions=False,
            validate_query=True,
        )

        if args.facets:
            LOGGER.info("getting facet data...")
            print_facet_histogram(processes)
            # NOTE TODO - pick this v2 back up and see if it's more efficient to use
            # knowing we have to remember the childproc_name facet data we like.
            # from cbinterface.psc.query import print_facet_histogram_v2
            # print_facet_histogram_v2(cb, args.query)

        # don't display large results by default
        print_results = True
        if not args.no_warnings and len(processes) > 10:
            prompt = "Print all results? (y/n) [y] "
            print_results = input_with_timeout(prompt, default="y")
            print_results = True if print_results.lower() == "y" else False

        if len(processes) > 0 and print_results:
            print(
                "\n------------------------- QUERY RESULTS -------------------------"
            )
            for proc in processes:
                print("  -------------------------")
                if args.all_details:
                    print(proc)
                else:
                    print_process_info(proc,
                                       raw_print=args.all_details,
                                       header=False)

        return True

    # Enumerations #
    if args.command and args.command == "enumerate":
        if args.logon_history:
            logon_history(cb, args.logon_history)
            return

    # Process Inspection #
    if args.command and (args.command == "proc"
                         or args.command.startswith("i")):
        process_id = args.process_guid_options
        if not is_psc_guid(process_id):
            # check to see if the analyst passed a local file path, which we assume is a local process json file
            # if os.path.exists(args.process_guid_options):
            # XXX NOTE: create functionality sourced from process json file?
            LOGGER.error(
                f"{process_id} is not in the form of a CbThreathunter process guid."
            )
            return False

        try:
            # proc = Process(cb, process_id)
            proc = select_process(cb, process_id)
            if not proc:
                LOGGER.warning(
                    f"Process data does not exist for GUID={process_id}")
                return False
        except Exception as e:
            LOGGER.error(f"unexpected problem finding process: {e}")
            return False

        # format datetimes as needed
        args.start_time = (datetime.datetime.strptime(
            args.start_time, "%Y-%m-%d %H:%M:%S").replace(
                tzinfo=tz.gettz("GMT"))
                           if args.start_time else args.start_time)
        args.end_time = (datetime.datetime.strptime(
            args.end_time, "%Y-%m-%d %H:%M:%S").replace(
                tzinfo=tz.gettz("GMT")) if args.end_time else args.end_time)

        if args.event_search:
            for event in yield_events(proc,
                                      query=args.event_search,
                                      start_time=args.start_time,
                                      end_time=args.end_time):
                if args.raw_print_events:
                    print(
                        json.dumps(event,
                                   default=str,
                                   indent=2,
                                   sort_keys=True))
                else:
                    print(format_event_data(event))
            return True

        if args.json:
            print(
                json.dumps(
                    process_to_dict(proc,
                                    start_time=args.start_time,
                                    end_time=args.end_time,
                                    event_rows=2000),
                    default=str,
                ))
            return

        all_inspection_args = [
            iarg for iarg in vars(args).keys() if iarg.startswith("inspect_")
        ]
        set_inspection_args = [
            iarg for iarg, value in vars(args).items()
            if iarg.startswith("inspect_") and value is True
        ]
        if not set_inspection_args:
            LOGGER.debug(f"seting all inspection arguments.")
            for iarg in all_inspection_args:
                args.__setattr__(iarg, True)

        if args.walk_and_inspect_tree:
            inspect_process_tree(
                proc,
                info=args.inspect_proc_info,
                filemods=args.inspect_filemods,
                netconns=args.inspect_netconns,
                regmods=args.inspect_regmods,
                modloads=args.inspect_modloads,
                crossprocs=args.inspect_crossprocs,
                children=args.inspect_children,
                scriptloads=args.inspect_scriptloads,
                raw_print=args.raw_print_events,
                start_time=args.start_time,
                end_time=args.end_time,
            )
            return True

        if args.inspect_process_ancestry:
            print_ancestry(proc)
            print()
        if args.inspect_process_tree:
            print_process_tree(proc)
            print()
        if args.inspect_proc_info:
            print_process_info(proc, raw_print=args.raw_print_events)
        if args.inspect_filemods:
            print_filemods(proc,
                           raw_print=args.raw_print_events,
                           start_time=args.start_time,
                           end_time=args.end_time)
        if args.inspect_netconns:
            print_netconns(proc,
                           raw_print=args.raw_print_events,
                           start_time=args.start_time,
                           end_time=args.end_time)
        if args.inspect_regmods:
            print_regmods(proc,
                          raw_print=args.raw_print_events,
                          start_time=args.start_time,
                          end_time=args.end_time)
        if args.inspect_modloads:
            print_modloads(proc,
                           raw_print=args.raw_print_events,
                           start_time=args.start_time,
                           end_time=args.end_time)
        if args.inspect_crossprocs:
            print_crossprocs(proc,
                             raw_print=args.raw_print_events,
                             start_time=args.start_time,
                             end_time=args.end_time)
        if args.inspect_children:
            print_childprocs(proc,
                             raw_print=args.raw_print_events,
                             start_time=args.start_time,
                             end_time=args.end_time)
        if args.inspect_scriptloads:
            print_scriptloads(proc,
                              raw_print=args.raw_print_events,
                              start_time=args.start_time,
                              end_time=args.end_time)

        return True

    # Live Response Actions #
    if args.command and (args.command.lower() == "lr"
                         or args.command.lower().startswith("live")):
        # create a LR session manager
        session_manager = CustomLiveResponseSessionManager(
            cb, custom_session_keepalive=True)
        # store a list of commands to execute on this device
        commands = []

        LOGGER.info(f"searching for device...")
        device = None
        try:  # if device.id
            device = Device(cb, args.name_or_id)
        except ClientError:
            device = find_device_by_hostname(cb, args.name_or_id)

        if not device:
            LOGGER.info(f"could not find a device.")
            return None

        if args.execute_command:
            # XXX expand this for more flexibiliy by making an execute parser
            # that can accept more arugments to pass to ExecuteCommand
            cmd = ExecuteCommand(args.execute_command)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")

        # Quarantine?
        if args.quarantine:
            if toggle_device_quarantine(cb, [device], True):
                LOGGER.info(
                    f"Device {device.id}:{device.name} is set to quarantine.")
        elif args.un_quarantine:
            if toggle_device_quarantine(cb, [device], False):
                LOGGER.info(
                    f"Device {device.id}:{device.name} is set to NOT quarantine."
                )

        # Put File #
        if args.live_response_command and args.live_response_command.lower(
        ) == "put":
            cmd = PutFile(args.local_filepath, args.sensor_write_filepath)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")

        if args.create_regkey:
            cmd = CreateRegKey(args.create_regkey)
            commands.append(cmd)
            LOGGER.info(f"recorded command: {cmd}")
            if args.set_regkey_value:
                cmd = SetRegKeyValue(args.create_regkey, args.set_regkey_value)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

        # Sensor Collection #
        if args.live_response_command and args.live_response_command.lower(
        ) == "collect":
            if args.sensor_info:
                print(sensor_info(sensor))

            if args.process_list:
                cmd = ProcessListing()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.list_directory:
                cmd = ListDirectory(args.list_directory)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.walk_directory:
                cmd = WalkDirectory(args.walk_directory)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.file:
                cmd = GetFile(args.file)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.regkeypath:
                cmd = ListRegKeyValues(args.regkeypath)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.regkeyvalue:
                cmd = RegKeyValue(args.regkeyvalue)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.drives:
                cmd = LogicalDrives()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.memdump:
                cmd = GetSystemMemoryDump()
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

        # Sensor Remediation #
        if args.live_response_command and args.live_response_command == "remediate":
            if args.delete_file_path:
                cmd = DeleteFile(args.delete_file_path)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.kill_process_name:
                cmd = KillProcessByName(args.kill_process_name)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.delete_regkeyvalue:
                cmd = DeleteRegistryKeyValue(args.delete_regkeyvalue)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.delete_entire_regkey:
                cmd = DeleteRegistryKey(args.delete_entire_regkey)
                commands.append(cmd)
                LOGGER.info(f"recorded command: {cmd}")

            if args.remediation_script:
                remediation_commands = build_remediation_commands(
                    args.remediation_script)
                LOGGER.info(
                    f"created {len(remediation_commands)} remediation commands from {args.remediation_script}"
                )
                commands.extend(remediation_commands)

        # Playbook execution #
        if args.live_response_command and (
                args.live_response_command.startswith("play")
                or args.live_response_command == "pb"):
            if args.playbook_configpath:
                playbook_commands = build_playbook_commands(
                    args.playbook_configpath)
                commands.extend(playbook_commands)
                LOGGER.info(
                    f"loaded {len(playbook_commands)} playbook commands.")
            if args.playbook_name:
                playbook_data = get_playbook_map()[args.playbook_name]
                playbook_path = playbook_data["path"]
                playbook_commands = build_playbook_commands(playbook_path)
                commands.extend(playbook_commands)
                LOGGER.info(
                    f"loaded {len(playbook_commands)} playbook commands.")

        # Handle LR commands #
        if commands:
            timeout = 1200  # default 20 minutes (same used by Cb)
            if not is_device_online(device):
                # Decision point: if the device is NOT online, give the analyst and option to wait
                LOGGER.warning(f"{device.id}:{device.name} is offline.")
                prompt = "Would you like to wait for the host to come online? (y/n) [y] "
                wait = input_with_timeout(prompt, default="y")
                wait = True if wait.lower() == "y" else False
                if not wait:
                    return None
                prompt = "How many days do you want to wait? [Default is 7 days] "
                timeout = input_with_timeout(prompt, default=7)
                if isinstance(timeout, str):
                    timeout = int(timeout)
                if timeout > 30:
                    LOGGER.warning(
                        f"{timeout} days is a long time. Restricting to max of 30 days."
                    )
                    timeout = 30

                # 86400 seconds in a day
                timeout = timeout * 86400

            if not session_manager.wait_for_active_session(device,
                                                           timeout=timeout):
                LOGGER.error(f"reached timeout waiting for active session.")
                return False

            # we have an active session, issue the commands.
            for command in commands:
                session_manager.submit_command(command, device)

        if session_manager.commands:
            # Wait for issued commands to complete and process any results.
            session_manager.process_completed_commands()

    # Direct Session Interaction #
    if args.command and args.command.startswith("sess"):
        cblr = CbThreatHunterAPI(url=cb.credentials.url,
                                 token=cb.credentials.lr_token,
                                 org_key=cb.credentials.org_key)

        # if args.list_all_sessions:
        # Not implemented with PSC
        # if args.list_sensor_sessions:
        # Not implemented with PSC

        if args.get_session_command_list:
            print(
                json.dumps(get_session_commands(cblr,
                                                args.get_session_command_list),
                           indent=2,
                           sort_keys=True))

        if args.get_session:
            print(
                json.dumps(get_session_by_id(cblr, args.get_session),
                           indent=2,
                           sort_keys=True))

        if args.close_session:
            print(
                json.dumps(close_session_by_id(cblr, args.close_session),
                           indent=2,
                           sort_keys=True))

        if args.get_command_result:
            session_id, device_id, command_id = args.get_command_result.split(
                ":", 2)
            session_id = f"{session_id}:{device_id}"
            print(
                json.dumps(get_command_result(cblr, session_id, command_id),
                           indent=2,
                           sort_keys=True))

        if args.get_file_content:
            session_id, device_id, file_id = args.get_file_content.split(
                ":", 2)
            session_id = f"{session_id}:{device_id}"
            get_file_content(cblr, session_id, file_id)
Beispiel #7
0
def deploy_lerc(device_or_sensor: Union[Device, Sensor],
                install_command: str,
                lerc_installer_path: str,
                interactive: bool = False) -> lerc_api.Client:
    """Deploy LERC to a Carbon Black Cloud Device.
    
    Args:
        device: A Carbon Black Cloud Device
        install_command: the command that installs LERC.
        lerc_installer_path: path to a LERC installer package (MSI).
    Returns:
        An instance of the installed lerc_api.Client, if successful.
    """

    hostname = device = sensor = None
    if isinstance(device_or_sensor, Device):
        from cbinterface.psc.sessions import CustomLiveResponseSessionManager
        device = device_or_sensor
        hostname = device.name[device.name.rfind('\\') +
                               1:] if '\\' in device.name else device.name
    elif isinstance(device_or_sensor, Sensor):
        from cbinterface.response.sessions import CustomLiveResponseSessionManager
        sensor = device_or_sensor
        hostname = sensor.computer_name

    # create lerc session
    ls = lerc_api.lerc_session()
    # check and see if the client's already installed
    client = None
    try:
        # NOTE: will remove proxy var from env
        client = ls.get_host(hostname)
    except:
        logger.warning("Can't reach the lerc control server")

    previously_installed = proceed_with_force = None
    if client:
        if client.status != 'UNINSTALLED':
            logger.warning(
                f"lerc server reports the client is already installed on a system with this hostname:\n{client}"
            )
            proceed_with_force = input_with_timeout(
                "Proceed with fresh install? (y/n) [n] ", default='n')
            proceed_with_force = True if proceed_with_force == 'y' else False
            if not proceed_with_force:
                return None
        else:
            previously_installed = True
            logger.info(
                "A client was previously uninstalled on this host: {}".format(
                    client))

    cb = device_or_sensor._cb

    offline = False
    timeout = 1200  # default 20 minutes (same used by Cb)
    if device and not is_device_online(device):
        # Decision point: if the device is NOT online, give the analyst and option to wait
        logger.warning(f"{device.id}:{device.name} is offline.")
        offline = True
    elif sensor and not is_sensor_online(sensor):
        # Decision point: if the sensor is NOT online, give the analyst and option to wait
        logger.warning(f"{sensor.id}:{sensor.hostname} is offline.")
        offline = True

    if offline:
        wait = "y"
        if interactive:
            prompt = "Would you like to wait for the host to come online? (y/n) [y] "
            wait = input_with_timeout(prompt, default="y")
        wait = True if wait.lower() == "y" else False
        if not wait:
            return None
        timeout = 7
        if interactive:
            prompt = "How many days do you want to wait? [Default is 7 days] "
            timeout = input_with_timeout(prompt, default=7)
            if isinstance(timeout, str):
                timeout = int(timeout)
        if timeout > 30:
            logger.warning(
                f"{timeout} days is a long time. Restricting to max of 30 days."
            )
            timeout = 30

        # 86400 seconds in a day
        timeout = timeout * 86400

    logger.info(f"waiting for active session on device ...")
    session_manager = CustomLiveResponseSessionManager(
        cb, custom_session_keepalive=True)
    if not session_manager.wait_for_active_session(device_or_sensor,
                                                   timeout=timeout):
        logger.error(f"reached timeout waiting for active session.")
        return False

    download = PutFile(lerc_installer_path, 'lercSetup.msi')
    execute = ExecuteCommand(install_command,
                             wait_for_output=False,
                             wait_timeout=60,
                             wait_for_completion=True)

    logger.info(f"submitting commands to download and install lerc.")
    if previously_installed:
        # delete any old msi package, just in-case
        session_manager.submit_command(DeleteFile('lercSetup.msi'),
                                       device_or_sensor)
    session_manager.submit_command(download, device_or_sensor)
    session_manager.submit_command(execute, device_or_sensor)
    session_manager.process_completed_commands()  # wait

    # wait and anticipate the client check-in
    wait = 5  #seconds
    attempts = 6
    logger.info(
        "~ Giving client up to {} seconds to check in with the lerc control server.."
        .format(attempts * wait))

    for i in range(attempts):
        try:
            client = ls.get_host(hostname)
        except:
            logger.warning("Can't reach the lerc control server")
            break
        if client:
            if client.status != 'UNINSTALLED':
                break
        logger.info(
            "~ giving the client {} more seconds".format(attempts * wait -
                                                         wait * i))
        time.sleep(wait)

    if not client or client.status == 'UNINSTALLED':
        logger.warning("failed to auto-confirm install with lerc server.")
        if previously_installed:
            logger.warning("client never checked in.")
        logging.info("getting install log...")
        upload_log = GetFile('lerc_install.log',
                             f"{hostname}_lerc_install.log")
        session_manager.submit_command(upload_log, device_or_sensor)
        session_manager.process_completed_commands()
        return False

    logger.info(
        "Client installed on {} at '{}' - status={} - last check-in='{}'".
        format(hostname, client.install_date, client.status,
               client.last_activity))
    return client