def load_from_xml_element(self, element): self.mode = oscap_helpers.EvaluationMode.from_string( et_helpers.get_element_text(element, "mode", "sds")) self.target = et_helpers.get_element_text(element, "target") self.input_ = SCAPInput() self.input_.load_from_xml_element( et_helpers.get_element(element, "input")) self.tailoring = SCAPTailoring() try: self.tailoring.load_from_xml_element( et_helpers.get_element(element, "tailoring")) except RuntimeError: # tailoring is optional, if it's not present just skip tailoring pass self.profile_id = et_helpers.get_element_text(element, "profile") self.online_remediation = \ et_helpers.get_element_text(element, "online_remediation") == "true" cpe_hints_str = et_helpers.get_element_text(element, "cpe_hints") self.cpe_hints = [] if cpe_hints_str is not None: for cpe_hint in cpe_hints_str.split(", "): self.cpe_hints.append(cpe_hint)
def load_from_xml_element(self, element): self.mode = oscap_helpers.EvaluationMode.from_string( et_helpers.get_element_text(element, "mode", "sds") ) self.target = et_helpers.get_element_text(element, "target") self.input_ = SCAPInput() self.input_.load_from_xml_element( et_helpers.get_element(element, "input") ) self.tailoring = SCAPTailoring() try: self.tailoring.load_from_xml_element( et_helpers.get_element(element, "tailoring") ) except RuntimeError: # tailoring is optional, if it's not present just skip tailoring pass self.profile_id = et_helpers.get_element_text(element, "profile") self.online_remediation = \ et_helpers.get_element_text(element, "online_remediation") == "true" cpe_hints_str = et_helpers.get_element_text(element, "cpe_hints") self.cpe_hints = [] if cpe_hints_str is not None: for cpe_hint in cpe_hints_str.split(", "): self.cpe_hints.append(cpe_hint)
def scrape_profiles(tree, xccdf_id, xccdf_ns, profile_ns, dest): xlink_href = "{http://www.w3.org/1999/xlink}href" xccdfs = [] if xccdf_id is None: # If xccdf_id is not specified look for profiles only in the first # xccdf component found in the datastream. xccdfs = tree.findall(".//{%s}checklists/{%s}component-ref[1]" % (xccdf_ns, xccdf_ns)) else: xccdfs = tree.findall( ".//{%s}checklists/{%s}component-ref/[@id='%s']" % (xccdf_ns, xccdf_ns, xccdf_id)) for x in xccdfs: c = x.attrib[xlink_href] c = c[1:] # Removes starting '#' character. for elem in tree.findall( ".//{%s}component/[@id='%s']//{%s}Profile" % (xccdf_ns, c, profile_ns)): id_ = elem.get("id") title = et_helpers.get_element_text(elem, "{%s}title" % (profile_ns), "") dest[id_] = title
def load_from_xml_element(self, root, config_file): self.id_ = Task.get_task_id_from_filepath(config_file) self.enabled = root.get("enabled", "false") == "true" self.title = et_helpers.get_element_text(root, "title") self.evaluation_spec = evaluation_spec.EvaluationSpec() self.evaluation_spec.load_from_xml_element( et_helpers.get_element(root, "evaluation_spec")) self.max_results_to_keep = \ int(et_helpers.get_element_text(root, "max-results-to-keep", "-1")) self.schedule = Schedule() self.schedule.load_from_xml_element( et_helpers.get_element(root, "schedule")) self.config_file = config_file
def scrape_profiles(tree, namespace, dest): for elem in tree.findall(".//{%s}Profile" % (namespace)): id_ = elem.get("id") if id_ is None: continue title = et_helpers.get_element_text(elem, "{%s}title" % (namespace), "") dest[id_] = title
def load_from_xml_element(self, element): self.target = et_helpers.get_element_text(element, "target") self.input_ = SCAPInput() self.input_.load_from_xml_element( et_helpers.get_element(element, "input") ) self.tailoring = SCAPTailoring() try: self.tailoring.load_from_xml_element( et_helpers.get_element(element, "tailoring") ) except RuntimeError: # tailoring is optional, if it's not present just skip tailoring pass self.profile_id = et_helpers.get_element_text(element, "profile") self.online_remediation = \ et_helpers.get_element_text(element, "online_remediation") == "true"
def load_from_xml_element(self, root, config_file): self.id_ = Task.get_task_id_from_filepath(config_file) self.enabled = root.get("enabled", "false") == "true" self.title = et_helpers.get_element_text(root, "title") self.evaluation_spec = evaluation_spec.EvaluationSpec() self.evaluation_spec.load_from_xml_element( et_helpers.get_element(root, "evaluation_spec") ) self.max_results_to_keep = \ int(et_helpers.get_element_text(root, "max-results-to-keep", "-1")) self.schedule = Schedule() self.schedule.load_from_xml_element( et_helpers.get_element(root, "schedule") ) self.config_file = config_file
def scrape_profiles(tree, namespace, dest): for elem in tree.iter("{%s}Profile" % (namespace)): id_ = elem.get("id") if id_ is None: continue title = et_helpers.get_element_text( elem, "{%s}title" % (namespace), "" ) dest[id_] = title
def scrape_profiles(tree, namespace, dest): # TODO support multiple benchmarks in one DataStream benchmark = tree.find(".//{%s}Benchmark" % (namespace)) if benchmark is None: return for elem in benchmark.findall(".//{%s}Profile" % (namespace)): id_ = elem.get("id") if id_ is None: continue title = et_helpers.get_element_text(elem, "{%s}title" % (namespace), "") dest[id_] = title