def test_success(raw_results): rule_ok = common.RuleResult(raw_results["vm_passed_everything"]) assert rule_ok.success rule_not_ok = common.RuleResult( raw_results["container_failed_remediation"]) assert not rule_not_ok.success
def _check_and_record_rule_scenario(self, scenario, remote_rule_dir, rule_id, remediation_available): self._current_result = common.RuleResult() self._current_result.conditions = common.Scenario_conditions( self.test_env.name, self.test_env.scanning_mode, self.remediate_using, self.datastream) self._current_result.scenario = common.Scenario_run(rule_id, scenario.script) self._current_result.when = self.test_timestamp_str self._check_rule_scenario(scenario, remote_rule_dir, rule_id, remediation_available) self.results.append(self._current_result.save_to_dict())
def print_result_differences(json_results): results = [json.load(open(fname, "r")) for fname in json_results] rules = [common.RuleResult(r) for r in sum(results, [])] aggregated_results = aggregate_results_by_scenarios(rules) # Number of scenarios that ended the same way # despite testing conditions may have been different. rules_that_ended_same = 0 # Number of scenarios that succeeded regardless of different conditions rules_that_ended_by_success = 0 differences = [] for scenario, results in aggregated_results.items(): if len(results) < 2: if results[0].success: rules_that_ended_by_success += 1 # At most one scenario => no difference analysis is applicable. continue difference = analyze_differences(results) if difference: rule_stem = re.sub("xccdf_org.ssgproject.content_rule_(.+)", r"\1", scenario.rule_id) assert len(rule_stem) < len(scenario.rule_id), ( "The rule ID '{rule_id}' has a strange form, as it doesn't have " "the common rule prefix.".format(rule_id=scenario.rule_id)) msg = ("{scenario} in {rule_stem}: {diff}".format( scenario=scenario.script, rule_stem=rule_stem, diff=difference)) differences.append(msg) else: rules_that_ended_same += 1 if results[0].success: rules_that_ended_by_success += 1 msg = ( "Analyzed {total_count} scenarios. Of those,\n" "\t{same_count} ended the same,\n\t{success_count} were a success.\n\n" "{different_count} ended differently:".format( total_count=len(aggregated_results), same_count=rules_that_ended_same, success_count=rules_that_ended_by_success, different_count=len(differences))) print(msg) for d in differences: print("\t" + d) return len(aggregated_results) == rules_that_ended_by_success
def test_persistence(raw_results): for r in raw_results.values(): intermediate = common.RuleResult(r) assert r == intermediate.save_to_dict()
def different_results(raw_results): return [ common.RuleResult(raw_results[key]) for key in ("container_failed_remediation", "vm_passed_everything") ]
def results_list(raw_results): return [common.RuleResult(r) for r in raw_results.values()]