예제 #1
0
def main(args: list[str]):
    #Disable insecure warnings.  We make a number of HTTPS requests to Splunk
    #docker containers that we've set up.  Without this line, we get an
    #insecure warning every time due to invalid cert.
    requests.packages.urllib3.disable_warnings()

    start_datetime = datetime.now()

    action, settings = modules.new_arguments2.parse(args)
    if action == "configure":
        # Done, nothing else to do
        print("Configuration complete!")
        sys.exit(0)
    elif action != "run":
        print("Unsupported action: [%s]" % (action), file=sys.stderr)
        sys.exit(1)

    if settings['mock'] is False:
        # If this is a real run, then make sure Docker is installed and running and usable
        # If this is a mock, then that is not required.  By only checking on a non-mock
        # run, we save ourselves the need to install docker in the CI for the manifest
        # generation step.
        try:
            docker.client.from_env()
        except Exception as e:
            print(
                "Error, failed to get docker client.  Is Docker Installed and Running?\n\t%s"
                % (str(e)))
            sys.exit(1)

        credentials_needed = False
        credential_error = False
        if len(settings['splunkbase_apps']) > 0:
            credentials_needed = True

        if settings['splunkbase_username'] == None and credentials_needed:
            print("Error - you have listed apps to download from Splunkbase but have "\
                  "not provided --splunkbase_username via the command line or config file.",file=sys.stderr)
            credential_error = True

        if settings['splunkbase_password'] == None and credentials_needed:
            print("Error - you have listed apps to download from Splunkbase but have "\
                    "not provided --splunkbase_password via the command line or config file.",file=sys.stderr)
            credential_error = True

        if credential_error:
            print(
                "Please supply the required credentials to continue.\n\tQuitting...",
                file=sys.stderr)
            sys.exit(1)

    FULL_DOCKER_HUB_CONTAINER_NAME = "splunk/splunk:%s" % settings[
        'container_tag']

    if settings['num_containers'] > MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING:
        print(
            "You requested to run with [%d] containers which may use a very large amount of resources "
            "as they all run in parallel.  The maximum suggested number of parallel containers is "
            "[%d].  We will do what you asked, but be warned!" %
            (settings['num_containers'],
             MAX_RECOMMENDED_CONTAINERS_BEFORE_WARNING))

    # Check out security content if required
    try:
        #Make sure we fix up the persist_securiy_content argument if it is passed in error (we say it exists but it doesn't)
        github_service, settings[
            'persist_security_content'] = ensure_security_content(
                settings['branch'], settings['commit_hash'],
                settings['pr_number'], settings['persist_security_content'])
        settings['commit_hash'] = github_service.commit_hash
    except Exception as e:
        print("\nFailure checking out git repository: [%s]"\
              "\n\tCommit Hash: [%s]"\
              "\n\tBranch     : [%s]"\
              "\n\tPR         : [%s]\n\tQuitting..."%
              (str(e),settings['commit_hash'],settings['branch'],settings['pr_number']),file=sys.stderr)
        sys.exit(1)

    #passes = [{'search_string': '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) as lastTime from datamodel=Endpoint.Processes where (Processes.process_name ="7z.exe" OR Processes.process_name = "7za.exe" OR Processes.original_file_name = "7z.exe" OR Processes.original_file_name =  "7za.exe") AND (Processes.process="*\\\\C$\\\\*" OR Processes.process="*\\\\Admin$\\\\*" OR Processes.process="*\\\\IPC$\\\\*") by Processes.original_file_name Processes.parent_process_name Processes.parent_process Processes.process_name Processes.process Processes.parent_process_id Processes.process_id  Processes.dest Processes.user | `drop_dm_object_name(Processes)` | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)` | `7zip_commandline_to_smb_share_path_filter` | stats count | where count > 0', 'detection_name': '7zip CommandLine To SMB Share Path', 'detection_file': 'endpoint/7zip_commandline_to_smb_share_path.yml', 'success': True, 'error': False, 'diskUsage': '286720', 'runDuration': '0.922', 'scanCount': '4897'}]
    #github_service.update_and_commit_passed_tests(passes)
    #sys.exit(0)
    # Make a backup of this config containing the hash and stripped credentials.
    # This makes the test perfectly reproducible.
    reproduce_test_config, _ = validate_args.validate_and_write(
        settings, output_file=None, strip_credentials=True)
    if reproduce_test_config == None:
        print("Error - there was an error writing out the file to reproduce the test.  This should not happen, as all "\
              "settings should have been validated by this point.\n\tQuitting...",file=sys.stderr)
        sys.exit(1)

    try:
        all_test_files = github_service.get_test_files(
            settings['mode'], settings['folders'], settings['types'],
            settings['detections_list'], settings['detections_file'])

        #We randomly shuffle this because there are likely patterns in searches.  For example,
        #cloud/endpoint/network likely have different impacts on the system.  By shuffling,
        #we spread out this load on a single computer, but also spread it in case
        #we are running on GitHub Actions against multiple machines.  Hopefully, this
        #will reduce that chnaces the some machines run and complete quickly while
        #others take a long time.
        random.shuffle(all_test_files)

    except Exception as e:
        print("Error getting test files:\n%s" % (str(e)), file=sys.stderr)
        print("\tQuitting...", file=sys.stderr)
        sys.exit(1)

    print("***This run will test [%d] detections!***" % (len(all_test_files)))

    #Set up the directory that will be used to store the local apps/apps we build
    local_volume_absolute_path = os.path.abspath(
        os.path.join(os.getcwd(), "apps"))
    try:
        # remove the directory first
        shutil.rmtree(local_volume_absolute_path, ignore_errors=True)
        os.mkdir(local_volume_absolute_path)
    except FileExistsError as e:
        # Directory already exists, do nothing
        pass
    except Exception as e:
        print("Error creating the apps folder [%s]: [%s]\n\tQuitting..." %
              (local_volume_absolute_path, str(e)),
              file=sys.stderr)
        sys.exit(1)
    #Add the info about the mount
    mounts = [{
        "local_path": local_volume_absolute_path,
        "container_path": "/tmp/apps",
        "type": "bind",
        "read_only": True
    }]

    # Check to see if we want to install ESCU and whether it was preeviously generated and we should use that file
    if 'SPLUNK_ES_CONTENT_UPDATE' in settings['local_apps'] and settings[
            'local_apps']['SPLUNK_ES_CONTENT_UPDATE']['local_path'] is not None:
        # Using a pregenerated ESCU, no need to build it
        pass
    elif 'SPLUNK_ES_CONTENT_UPDATE' not in settings['local_apps']:
        print(
            "%s was not found in %s.  We assume this is an error and shut down.\n\t"
            "Quitting..." %
            ('SPLUNK_ES_CONTENT_UPDATE', "settings['local_apps']"),
            file=sys.stderr)
        sys.exit(1)
    else:
        # Generate the ESCU package from this branch.
        source_path = generate_escu_app(settings['persist_security_content'])
        settings['local_apps']['SPLUNK_ES_CONTENT_UPDATE'][
            'local_path'] = source_path

    # Copy all the apps, to include ESCU (whether pregenerated or just generated)
    copy_local_apps_to_directory(settings['local_apps'],
                                 local_volume_absolute_path)

    # If this is a mock run, finish it now
    if settings['mock']:
        #The function below
        if finish_mock(settings, all_test_files):
            # mock was successful!
            print("Mock successful!  Manifests generated!")
            sys.exit(0)
        else:
            print(
                "There was an unrecoverage error during the mock.\n\tQuitting...",
                file=sys.stderr)
            sys.exit(1)

    #Add some files that always need to be copied to to container to set up indexes and datamodels.
    files_to_copy_to_container = OrderedDict()
    files_to_copy_to_container["INDEXES"] = {
        "local_file_path": index_file_local_path,
        "container_file_path": index_file_container_path
    }
    files_to_copy_to_container["DATAMODELS"] = {
        "local_file_path": datamodel_file_local_path,
        "container_file_path": datamodel_file_container_path
    }
    files_to_copy_to_container["AUTHORIZATIONS"] = {
        "local_file_path": authorizations_file_local_path,
        "container_file_path": authorizations_file_container_path
    }

    try:
        cm = container_manager.ContainerManager(
            all_test_files,
            FULL_DOCKER_HUB_CONTAINER_NAME,
            settings['local_base_container_name'],
            settings['num_containers'],
            settings['local_apps'],
            settings['splunkbase_apps'],
            settings['branch'],
            settings['commit_hash'],
            reproduce_test_config,
            files_to_copy_to_container=files_to_copy_to_container,
            web_port_start=8000,
            management_port_start=8089,
            mounts=mounts,
            show_container_password=settings['show_splunk_app_password'],
            container_password=settings['splunk_app_password'],
            splunkbase_username=settings['splunkbase_username'],
            splunkbase_password=settings['splunkbase_password'],
            reuse_image=settings['reuse_image'],
            interactive_failure=not settings['no_interactive_failure'],
            interactive=settings['interactive'])
    except Exception as e:
        print(
            "Error - unrecoverable error trying to set up the containers: [%s].\n\tQuitting..."
            % (str(e)),
            file=sys.stderr)
        sys.exit(1)

    try:
        result = cm.run_test()
    except Exception as e:
        print(
            "Error - there was an error running the tests: [%s]\n\tQuitting..."
            % (str(e)),
            file=sys.stderr)
        sys.exit(1)

    #github_service.update_and_commit_passed_tests(cm.synchronization_object.successes)

    #Return code indicates whether testing succeeded and all tests were run.
    #It does NOT indicate that all tests passed!
    if result is True:
        print("Test Execution Successful")
        sys.exit(0)
    else:
        print("Test Execution Failed - review the logs for more details")
        sys.exit(1)
예제 #2
0
def finish_mock(
        settings: dict,
        detections: list[str],
        output_file_template: str = "prior_config/config_tests_%d.json"
) -> bool:
    num_containers = settings['num_containers']

    try:
        # Remove the prior config directory if it exists.  If not, continue
        shutil.rmtree("prior_config", ignore_errors=True)

        # We want to make the prior_config directory and the prior_config/apps directory
        os.makedirs("prior_config/apps")
    except FileExistsError as e:
        print(
            "Directory priorconfig/apps exists, but we just deleted it!\n\tQuitting...",
            file=sys.stderr)
        return False
    except Exception as e:
        print(
            "Some error occured when trying to make the configs folder: [%s]\n\tQuitting..."
            % (str(e)),
            file=sys.stderr)
        return False

    # Copy the apps to the appropriate local.  This will also update
    # the app paths in settings['local_apps']
    copy_local_apps_to_directory(settings['local_apps'], "prior_config/apps")

    for output_file_index in range(0, num_containers):
        fname = output_file_template % (output_file_index)

        # Get the n'th detection for this file
        detection_tests = detections[output_file_index::num_containers]
        normalized_detection_names = []
        # Normalize the test filename to the name of the detection instead.
        # These are what we should write to the file
        for d in detection_tests:
            filename = os.path.basename(d)
            filename = filename.replace(".test.yml", ".yml")
            leading = os.path.split(d)[0]
            leading = leading.replace("tests/", "detections/")
            new_name = os.path.join("security_content", leading, filename)
            normalized_detection_names.append(new_name)

        # Generate an appropriate config file for this test
        mock_settings = copy.deepcopy(settings)
        # This may be able to support as many as 2 for GitHub Actions...
        # we will have to determine in testing.
        mock_settings['num_containers'] = 1

        # Must be selected since we are passing in a list of detections
        mock_settings['mode'] = 'selected'

        # Pass in the list of detections to run
        mock_settings['detections_list'] = normalized_detection_names

        # We want to persist security content and run with the escu package that we created.
        #Note that if we haven't checked this out yet, we will check it out for you.
        mock_settings['persist_security_content'] = True

        mock_settings['mock'] = False

        # Make sure that it still validates after all of the changes

        try:
            with open(fname, 'w') as cfg:
                validated_settings, b = validate_and_write(mock_settings, cfg)
                if validated_settings is None:
                    print(
                        "There was an error validating the updated mock settings.\n\tQuitting...",
                        file=sys.stderr)
                    return False

        except Exception as e:
            print("Error writing config file %s: [%s]\n\tQuitting..." %
                  (fname, str(e)),
                  file=sys.stderr)
            return False

    return True
예제 #3
0
def configure_action(args) -> tuple[str, dict]:
    settings = OrderedDict()
    if args.input_config_file is None:
        settings, schema = validate_args.validate({})
    else:
        settings, schema = validate_args.validate_file(args.input_config_file)

    if settings is None:
        print("Failure while processing settings\n\tQuitting...",
              file=sys.stderr)
        sys.exit(1)

    new_config = {}
    for arg in settings:
        default = settings[arg]
        default_string = str(default).replace("'", '"')

        if 'enum' in schema['properties'][arg]:
            choice = input("%s [default: %s | choices: {%s}]: " %
                           (arg, default_string, ','.join(
                               schema['properties'][arg]['enum'])))
        else:
            choice = input("%s [default: %s]: " % (arg, default_string))
        choice = choice.strip()
        if len(choice) == 0:
            print("\tNothing entered, using default:")
            new_config[arg] = default
            formatted_print = default
        else:
            if choice.lower() in [
                    "true", "false"
            ] and schema['properties'][arg]['type'] == "boolean":
                new_config[arg] = json.loads(choice.lower())
                formatted_print = choice.lower()
            else:

                if choice in [
                        'true', 'false'
                ] or (choice.isdigit()
                      and schema['properties'][arg]['type'] != "integer"):
                    choice = '"' + choice + '"'
                # replace all single quotes with doubles quotes to make valid json
                elif "'" in choice:
                    print(
                        '''Found %d single quotes (') in input... we will convert these to double quotes (") to ensure valida json.'''
                        % (choice.count("'")))
                    choice = choice.replace("'", '"')
                elif '"' in choice:
                    # Do nothing
                    pass
                elif choice.isdigit():
                    pass
                else:
                    choice = '"' + choice + '"'

                new_config[arg] = json.loads(choice)
                formatted_print = choice
        # We print out choice instead of new_config[arg] because the json.loads() messes up the quotation marks again
        print("\t{0}\n".format(formatted_print))

    # Now parse the new config and make sure it's good
    validated_new_settings, schema = validate_args.validate_and_write(
        new_config,
        args.output_config_file,
        skip_password_accessibility_check=False)
    if validated_new_settings == None:
        print("Could not update settings.\n\tQuitting...", file=sys.stderr)
        sys.exit(1)

    return ("configure", validated_new_settings)
예제 #4
0
def outputResultsJSON(output_filename:str, data:list[dict], baseline:OrderedDict, 
                      failure_manifest_filename = "detection_failure_manifest.json", 
                      output_folder:str="", summarization_reproduce_failure_config:dict={})->tuple[bool,int,int,int,int]:
    success = True
    
    try:
        test_count = len(data)
        #Passed
        pass_count = len([x for x in data if x['success'] == True])
        
        
        #A failure or an error
        fail_count = len([x for x in data if x['success'] == False])
        
        #An error (every error is also a failure)
        fail_and_error_count = len([x for x in data if x['error'] == True])
        
        #A failure without an error
        fail_without_error_count = len([x for x in data if x['success'] == False and x['error'] == False])
        
        #This number should always be zero...
        error_and_success_count = len([x for x in data if x['success'] == True and x['error'] == True])
        if error_and_success_count > 0:
            print("Error - a test was successful, but also included an error. This should be impossible.",file=sys.stderr)
            success = False
            
        if test_count != (pass_count + fail_count):
            print("Error - the total tests [%d] does not equal the pass[%d]/fails[%d]"%(test_count, pass_count,fail_count))
            success=False

        if fail_count > 0:
            result = "FAIL for %d detections"%(fail_count)
            success = False
        else:
            result = "PASS for all %d detections"%(pass_count)

        summary={"TOTAL_TESTS": test_count, "TESTS_PASSED": pass_count, 
                 "TOTAL_FAILURES": fail_count, "FAIL_ONLY": fail_without_error_count, 
                 "FAIL_AND_ERROR":fail_and_error_count }

        data_sorted = sorted(data, key = lambda k: (-k['error'], k['success'], k['detection_file']))
        with open(os.path.join(output_folder,output_filename), "w") as jsonFile:
            json.dump({'summary':summary, 'baseline': baseline, 'results':data_sorted}, jsonFile, indent="    ")
        
        
        #Generate a failure that the user can download to reproduce and test ONLY the failures locally.
        #This makes it easy to test and debug ONLY those that failed.  No need to test the ones
        #that succeeded!
        
        
        fail_list = [os.path.join("security_content/detections",x['detection_file'] ) for x in data_sorted if x['success'] == False]

        if len(fail_list) > 0:
                                    
            failures_test_override = copy.deepcopy(summarization_reproduce_failure_config)
            failures_test_override.update({"detections_list": fail_list, "no_interactive_failure":False, 
                                    "num_containers":1, "branch": baseline["branch"], "commit_hash":baseline["commit_hash"], 
                                    "mode":"selected", "show_splunk_app_password": True})
            with open(os.path.join(output_folder,failure_manifest_filename),"w") as failures:
                validate_args.validate_and_write(failures_test_override, failures)
    except Exception as e:
        print("There was an error generating [%s]: [%s]"%(output_filename, str(e)),file=sys.stderr)
        print(data)
        raise(e)
        #success = False
        #return success, False

    #note that total failures is fail_count, fail_and_error count is JUST errors (and every error is also a failure)
    return success, test_count, pass_count, fail_count, fail_and_error_count
예제 #5
0
def parse(args) -> tuple[str, dict]:
    '''
    try:
        with open(DEFAULT_CONFIG_FILE, 'r') as settings_file:
            default_settings = json.load(settings_file)
    except Exception as e:
        print("Error loading settings file %s: %s"%(DEFAULT_CONFIG_FILE, str(e)), file=sys.stderr)
        sys.exit(1)
    '''

    import os
    # if there is no default config file, then generate one
    if not os.path.exists(DEFAULT_CONFIG_FILE):
        print("No default configuration file [%s] found.  Creating one..." %
              (DEFAULT_CONFIG_FILE))
        with open(DEFAULT_CONFIG_FILE, 'w') as cfg:
            validate_args.validate_and_write(
                {}, cfg, skip_password_accessibility_check=True)

    parser = argparse.ArgumentParser(
        description=
        "Use 'SOME_PROGRAM_NAME_STRING --help' to get help with the arguments")
    parser.set_defaults(func=lambda _: parser.print_help())

    actions_parser = parser.add_subparsers(title="Action")

    # Configure parser
    configure_parser = actions_parser.add_parser("configure",
                                                 help="Configure a test run")
    configure_parser.set_defaults(func=configure_action)
    configure_parser.add_argument(
        '-i',
        '--input_config_file',
        required=False,
        type=argparse.FileType('r'),
        help="The config file to base the configuration off of.")
    configure_parser.add_argument(
        '-o',
        '--output_config_file',
        required=False,
        default=DEFAULT_CONFIG_FILE,
        type=argparse.FileType('w'),
        help="The config file to write the configuration off of.")

    # Run parser
    run_parser = actions_parser.add_parser("run", help="Run a test")
    run_parser.set_defaults(func=run_action)
    run_parser.add_argument(
        '-c',
        '--config_file',
        required=False,
        type=argparse.FileType('r'),
        default=DEFAULT_CONFIG_FILE,
        help="The config file for the test.  Note that this file "
        "cannot be changed (except for credentials that can be "
        "entered on the command line).")

    run_parser.add_argument(
        '-user',
        '--splunkbase_username',
        required=False,
        type=str,
        help="Username for login to splunkbase.  This is required "
        "if downloading packages from Splunkbase.  While this can "
        "be stored in the config file, it is strongly recommended "
        "to enter it at runtime.")

    run_parser.add_argument('-b',
                            '--branch',
                            required=False,
                            type=str,
                            help="The branch to run the tests on.")

    run_parser.add_argument('-hash',
                            '--commit_hash',
                            required=False,
                            type=str,
                            help="The hash to run the tests on.")

    run_parser.add_argument('-pr',
                            '--pr_number',
                            required=False,
                            type=int,
                            help="The Pull request to run the tests on.")

    run_parser.add_argument(
        '-m',
        '--mode',
        required=False,
        type=str,
        help="The mode all, changes, or selected for the testing.")

    run_parser.add_argument(
        '-pass',
        '--splunkbase_password',
        required=False,
        type=str,
        help="Password for login to splunkbase.  This is required if "
        "downloading packages from Splunkbase.  While this can be "
        "stored in the config file, it is strongly recommended "
        "to enter it at runtime.")

    run_parser.add_argument(
        '-splunkpass',
        '--splunk_app_password',
        required=False,
        type=str,
        help="Password for login to the splunk app.  If you don't "
        "provide one here or in the config, it will be generated "
        "automatically for you.")

    run_parser.add_argument(
        "-show_pass",
        "--show_splunk_app_password",
        required=False,
        action="store_true",
        help="The password to login to the Splunk Server.  If the config "
        "file is set to true, it will override the default False for this.  True "
        "will override the default value in the config file.")

    run_parser.add_argument(
        "-mock",
        "--mock",
        required=False,
        action="store_true",
        help=
        "Split into multiple configs, don't actually run the tests. If the config "
        "file is set to true, it will override the default False for this.  True "
        "will override the default value in the config file.")

    run_parser.add_argument(
        "-n",
        "--num_containers",
        required=False,
        type=int,
        help="The number of Splunk containers to run or mock")

    run_parser.add_argument("-nif", "--no_interactive_failure", required=False,
                            action="store_true",
                            help="After a detection fails, pause and allow the user to log into "\
                            "the Splunk server to interactively debug the failure.  Wait for the user "\
                            "to hit enter before removing the test data and moving on to the next test.")

    run_parser.add_argument("-i", "--interactive", required=False,
                            action="store_true",
                            help="After a detection runs, pause and allow the user to log into "\
                            "the Splunk server to debug the detection.  Wait for the user "\
                            "to hit enter before removing the test data and moving on to the next test.")

    args = parser.parse_args()

    # Run the appropriate parser
    try:
        # If one of these arguments is not passed on the command line, don't overwrite its config
        # file value with None - keep the config file value
        keys = list(args.__dict__.keys())
        for key in keys:

            # We have to do the check separately because booleans using the --store_true
            # action have an implict default=False value, even if we don't set it. We cannot
            # set their value to something else, like None

            # Don't overwite booleans
            if args.__dict__[key] is False and key in [
                    "show_splunk_app_password", "mock",
                    "no_interactive_failure", "interactive"
            ]:
                del args.__dict__[key]
            # Don't overwrite other values
            elif args.__dict__[key] is None and key in [
                    "splunkbase_username", "branch", "commit_hash",
                    "pr_number", "mode", "splunkbase_password",
                    "num_containers"
            ]:
                del args.__dict__[key]

        action, settings = args.func(args)

        return action, settings
    except Exception as e:
        print("Unknown Error Validating Json Configuration - [%s]" % (str(e)))
        sys.exit(1)