def update_configs( run_json_attack: Dict[str, Any], run_json_benign: Dict[str, Any], attack_config: str, benign_config: str, attack: str, ) -> None: """ Purpose: Update configs based on the calibration. Could include increasing/decreasing threads/connections per thread depending on the user's resources. Args: run_json_attack: the updated attack config run_json_benign: the updated benign config attack_config: the run attack config file path benign_config: the run benign config file path attack: current attack, Returns: N/A """ if attack == "apachekill": # update benign num_ips? benign_json = load_json(benign_config) benign_json["client_options"]["num_ips"] = run_json_benign["benign"][ "client_options"]["num_ips"] save_json(benign_config, benign_json) # update attack config attack_json = load_json(attack_config) attack_json["attack_options"]["ak_num_threads"] = run_json_attack[ "attack"]["attack_options"]["ak_num_threads"] save_json(attack_config, attack_json) # Update sut config config_file = "magicwand_components/suts/mw_apache_wp.json" sut_config = load_json(config_file) sut_config["max_clients"] = run_json_attack["sut"]["max_clients"] sut_config["run_duration"] = run_json_attack["sut"]["run_duration"] save_json(config_file, sut_config) else: raise TypeError(f"Invalid attack {attack}")
def __init__(self, log_level: int = logging.INFO) -> None: """ Purpose: Init Component Args: log_level: Verbosity of the logger Returns: self: The MwComponent """ # get logger super().__init__(log_level=log_level) # set config to expected spot self._config = load_json("magicwand_components/benign/mw_locust.json")
def __init__(self, log_level: int = logging.INFO) -> None: """ Purpose: Init Component Args: log_level: Verbosity of the logger Returns: self: The MwComponent """ # get logger # self._logger = get_logger("mw-log", log_level) super().__init__(log_level=log_level) # set config to expected spot self._config = load_json( "magicwand_components/attacks/apachekill.json")
def make_150_second_run() -> None: """ Purpose: Make run 150 seconds Args: N/A Returns: N/A """ # sut config path config_file = "magicwand_components/suts/mw_apache_wp.json" # load sut config sut_config = load_json(config_file) # set to 150 seconds sut_config["run_duration"] = 150 save_json(config_file, sut_config)
def set_env_variables(self) -> int: """ Purpose: Set environment variables Args: N/A Returns: stats: 0 if worked, -1 if failed """ try: locust_config = load_json( "magicwand_components/benign/mw_locust.json") if "seed" in locust_config["client_options"]: os.environ["CURR_SEED"] = str( locust_config["client_options"]["seed"]) else: os.environ["CURR_SEED"] = "None" except Exception as error: print(error) print( "Failed to load seed from mw_locust.json, defaulting to 'None'" ) os.environ["CURR_SEED"] = "None" try: attack_options: Dict[str, Any] = self.config["attack_options"] os.environ["CURR_ATTACK_DURATION"] = str( attack_options["ak_duration"]) os.environ["CURR_IP_LIMIT"] = str(attack_options["ak_num_ips"]) os.environ["CURR_DELAY"] = str( attack_options.get("attack_delay", 15)) os.environ["CURR_MAX_THREADS"] = str( attack_options["ak_num_threads"]) return 0 except Exception as error: raise ValueError( str(error) + " is a required field for apachekill configs, please check the config and return once the error is corrected" )
def make_30_second_test() -> None: """ Purpose: Make run 30 seconds Args: N/A Returns: N/A """ # sut config path config_file = "magicwand_components/suts/mw_apache_wp.json" # load sut config sut_config = load_json(config_file) # set to 30 seconds sut_config["run_duration"] = 30 # save sut config try: with open(config_file, "w") as outfile: json.dump(sut_config, outfile) except Exception as error: raise OSError(error)
def make_weak_ak() -> None: """ Purpose: Make a weak apachekill Args: N/A Returns: N/A """ # ak config path config_file = "magicwand_components/attacks/apachekill.json" # load ak config attack_config = load_json(config_file) # make attack weak attack_config["ak_num_threads"] = 1 # save sut config try: with open(config_file, "w") as outfile: json.dump(attack_config, outfile) except Exception as error: raise OSError(error)
def calibrate(attack: str, log_level: int) -> int: """ Purpose: Setup calibrate Args: attack: which attack to calibrate log_level: Current log level Returns: status: 0 if passed, -1 if failed """ # TODO can massge this whenever we add new attacks valid_attacks = ["apachekill"] if attack == "apachekill": attack_config = "configs/apachekill-only.json" calibrate_component = Apachekill(log_level) attack_component_config = "magicwand_components/attacks/apachekill.json" else: raise ValueError( f"Invalid attack {attack}, must be from {valid_attacks}") benign_config = "configs/mw_locust-only.json" benign_component_config = "magicwand_components/benign/mw_locust.json" data_version = "mw_calibrate_runs" # change run time to 150 make_150_second_run() calibrate_count = 0 status = 0 while calibrate_count < MAX_RETRIES: # Do a benign run as a control run for comparing results benign_data = generate_data(benign_config, data_version, log_level, attack, calibrate_component) # Do an attack run with the current config settings to see if the attack affects the SUT attack_data = generate_data(attack_config, data_version, log_level, attack, calibrate_component) # Load the results from the runs run_json_attack = load_json(attack_data["run_json_loc"]) run_json_benign = load_json(benign_data["run_json_loc"]) apache_df = benign_data["apache_df"] apache_df_attack = attack_data["apache_df"] rtt_df = benign_data["rtt_df"] rtt_df_attack = attack_data["rtt_df"] mem_df = benign_data["mem_df"] mem_df_attack = attack_data["mem_df"] # Compare the results of benign vs attack to ensure attack affected the SUT ( passed_checks, new_run_json_attack, new_run_json_benign, ) = calibrate_component.ratio_checker( run_json_attack, run_json_benign, apache_df, apache_df_attack, rtt_df, rtt_df_attack, mem_df, mem_df_attack, ) # update the configs update_configs( new_run_json_attack, new_run_json_benign, attack_component_config, benign_component_config, attack, ) # If the calibration checks all passed, then the attack is calibrated. # Once attack is calibrated, we can exit the loop and return success # If it fails then go through the calibration process again if passed_checks: status = 0 break else: calibrate_count += 1 if calibrate_count > MAX_RETRIES: logging.info("Giving up after 5 failed runs...") status = -1 break return status