Ejemplo n.º 1
0
    def update_config_value(self):
        self._encode_authentication()

        set_attr_funcs = (self._san_address,
                          self._san_user,
                          self._san_password,
                          self._san_product,
                          self._san_protocol,
                          self._lun_type,
                          self._lun_ready_wait_interval,
                          self._lun_copy_wait_interval,
                          self._lun_timeout,
                          self._lun_write_type,
                          self._lun_prefetch,
                          self._lun_policy,
                          self._lun_read_cache_policy,
                          self._lun_write_cache_policy,
                          self._storage_pools,
                          self._iscsi_default_target_ip,
                          self._iscsi_info,)

        tree = ET.parse(self.conf.cinder_huawei_conf_file)
        xml_root = tree.getroot()
        for f in set_attr_funcs:
            f(xml_root)
Ejemplo n.º 2
0
    def _handler(request, response):
        from defusedxml import ElementTree
        response.update_status('PyWPS Process started.', 0)

        fn = request.inputs['polygon'][0].file

        ns = {'gml': 'http://www.opengis.net/gml'}
        poly = ElementTree.parse(fn)

        # Find the first polygon in the file.
        e = poly.find('.//gml:Polygon', ns)

        # Get the coordinates
        c = e.find('.//gml:coordinates', ns).text
        coords = [tuple(map(float, p.split(','))) for p in c.split(' ')]

        # Compute the average
        n = len(coords)
        x, y = zip(*coords)
        cx = sum(x) / n
        cy = sum(y) / n

        response.outputs['output'].data = '{:.5f},{:.5f}'.format(cx, cy)

        response.update_status('PyWPS Process completed.', 100)
        return response
Ejemplo n.º 3
0
 def get_dom():
     # Load the XML on first use and keep it in memory in a global
     # variable. This is perhaps not the best design.
     global XML_DOM
     if XML_DOM is None:
         XML_DOM = ET.parse(XML_FILE)
     return XML_DOM
Ejemplo n.º 4
0
   def extract(self, data, dependency_results):
      results_dir = tempfile.mkdtemp() + '/'
      temp_pdf_file = extraction.utils.temp_file(data)

      try:
         command_args = ['java', '-jar', config.ALGORITHMS_JAR_PATH, config.ALGORITHMS_PERL_PATH, 'f', temp_pdf_file, results_dir]
         status, stdout, stderr = extraction.utils.external_process(command_args, timeout=20)
      except subprocess.TimeoutExpired:
         shutil.rmtree(results_dir)
         raise RunnableError('Algorithms Jar timed out while processing document')
      finally:
         os.remove(temp_pdf_file)

      if status != 0:
         raise RunnableError('Algorithms Jar Failure. Possible error:\n' + stderr)

      paths = glob.glob(results_dir + '*.xml')
      if len(paths) != 1:
         raise RunnableError('Wrong number of results files from Algorithms Jar.')

      tree = safeET.parse(paths[0])
      xml_root = tree.getroot()

      shutil.rmtree(results_dir)

      return ExtractorResult(xml_result=xml_root)
Ejemplo n.º 5
0
def findPlayer(saveFileLocation,read_data=False):
	if read_data == False:
		root = ET.parse(saveFileLocation).getroot()
	else:
		root = ET.fromstring(saveFileLocation)
	player = root.find("player")
	return player
Ejemplo n.º 6
0
    def setUp(self):
        TestFunctional.setUp(self)

        # Set pbshook frequency to 10 seconds
        self.server.manager(MGR_CMD_SET, PBS_HOOK,
                            {'enabled': 'true', 'freq': 10},
                            id='PBS_alps_inventory_check', expect=True)

        momA = self.moms.values()[0]
        if not momA.is_cray():
            self.skipTest("%s: not a cray mom." % (momA.shortname))
        mom_config = momA.parse_config()
        if '$alps_client' not in mom_config:
            self.skipTest("alps_client not set in mom config.")

        if '$vnode_per_numa_node' in mom_config:
            momA.unset_mom_config('$vnode_per_numa_node', False)

        momA.add_config({'$logevent': '0xffffffff'})

        # check if required BASIL version available on the machine.
        for ver in self.basil_version:
            xml_out = self.query_alps(ver, 'QUERY', 'ENGINE')
            xml_tree = ET.parse(xml_out)
            os.remove(xml_out)
            response = xml_tree.find(".//ResponseData")
            status = response.attrib['status']
            if status == "SUCCESS":
                self.available_version = ver
                break
        if self.available_version == "":
            self.skipTest("No supported basil version found on the platform.")

        # Reset nodes
        self.reset_nodes(momA.shortname)
Ejemplo n.º 7
0
	def __init__( self ):
		try:
			self.mypath = os.path.dirname( sys.argv[0] )
			tree = ET.parse( self.mypath + '/addon.xml' )
			version = tree.getroot().attrib['version']
			AppLogger.__init__( self, os.path.basename( sys.argv[0] ), version )
		except Exception:
			AppLogger.__init__( self, os.path.basename( sys.argv[0] ), '0.0' )
Ejemplo n.º 8
0
    def test_InventoryVnodes(self):
        """
        This test validates the vnode created using alps BASIL 1.4 & 1.7
        inventory query response.
        """

        # Parse inventory query response and fetch node information.
        xml_out = self.query_alps('1.4', 'QUERY', 'INVENTORY')
        xml_tree = ET.parse(xml_out)
        os.remove(xml_out)
        inventory_1_4_el = xml_tree.find(".//Inventory")
        hn = inventory_1_4_el.attrib["mpp_host"]

        if self.available_version == '1.7':
            knl_vnodes = self.get_knl_vnodes()

        # Fill vnode structure using BASIL response
        for node in inventory_1_4_el.getiterator('Node'):
            role = node.attrib["role"]
            if role == 'BATCH':
                # XML values
                node_id = node.attrib["node_id"]
                cu_el = node.findall('.//ComputeUnit')
                mem_el = node.findall('.//Memory')
                ac_el = node.findall('.//Accelerator')
                page_size_kb = mem_el[0].attrib["page_size_kb"]
                page_count = mem_el[0].attrib["page_count"]

                vnode = self.init_inventory_node()
                vnode['arch'] = node.attrib['architecture']
                vnode['vnode'] = hn + '_' + node_id
                vnode['vntype'] = "cray_compute"
                vnode['mem'] = str(int(page_size_kb) *
                                   int(page_count) * len(mem_el)) + "kb"
                vnode['host'] = vnode['vnode']
                vnode['PBScraynid'] = node_id
                vnode['PBScrayhost'] = hn
                vnode['ncpus'] = str(len(cu_el))
                if ac_el:
                    vnode['naccelerators'] = str(len(ac_el))
                    vnode['accelerator_memory'] = str(
                        ac_el[0].attrib['memory_mb']) + "mb"
                    vnode['accelerator_model'] = ac_el[0].attrib['family']

                if node_id in knl_vnodes:
                    vnode['hbmem'] = knl_vnodes[node_id]['hbmem']
                    vnode['current_aoe'] = knl_vnodes[node_id]['current_aoe']
                    vnode['vnode'] = hn + '_' + node_id

                # Compare xml vnode with pbs node.
                self.logger.info("Validating vnode:%s" % (vnode['vnode']))
                self.comp_node(vnode)
Ejemplo n.º 9
0
    def __init__(self, filename, test):
        bug_patterns = dict()
        dupes = dict()

        SEVERITY = {
            '1': 'High',
            '2': 'Medium',
            '3': 'Low'
        }

        tree = ET.parse(filename)
        root = tree.getroot()

        for pattern in root.findall('BugPattern'):
            plain_pattern = re.sub(r'<[b-z/]*?>|<a|</a>|href=', '', ET.tostring(pattern.find('Details'), method='text'))
            bug_patterns[pattern.get('type')] = plain_pattern

        for bug in root.findall('BugInstance'):
            desc = ''
            for message in bug.itertext():
                desc += message

            dupe_key = bug.get('instanceHash')

            title = bug.find('ShortMessage').text
            cwe = bug.get('cweid', default=0)
            severity = SEVERITY[bug.get('priority')]
            description = desc
            mitigation = bug_patterns[bug.get('type')]
            impact = 'N/A'
            references = 'N/A'

            if dupe_key in dupes:
                finding = dupes[dupe_key]
            else:
                finding = Finding(
                    title=title,
                    cwe=cwe,
                    severity=severity,
                    description=description,
                    mitigation=mitigation,
                    impact=impact,
                    references=references,
                    test=test,
                    active=False,
                    verified=False,
                    numerical_severity=Finding.get_numerical_severity(severity),
                    static_finding=True
                )
                dupes[dupe_key] = finding

        self.items = dupes.values()
Ejemplo n.º 10
0
    def get_context_data(self, **kwargs):
        target = self.request.GET.get('target')

        try:
            root = etree.parse(self.request, forbid_dtd=True).getroot()
            ticket = root.find('.//{urn:oasis:names:tc:SAML:1.0:protocol}AssertionArtifact').text
        except (etree.ParseError, ValueError, AttributeError):
            ticket = None

        st, pgt, error = self.validate_service_ticket(target, ticket, None, None)
        attributes = self.get_attributes(st.user, st.service) if st else None
        return {'ticket': st, 'pgt': pgt, 'error': error,
                'attributes': attributes}
Ejemplo n.º 11
0
    def parse(self, stream, media_type=None, parser_context=None):
        """
        Parses the incoming bytestream as XML and returns the resulting data.
        """

        parser_context = parser_context or {}
        encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
        parser = etree.DefusedXMLParser(encoding=encoding)
        try:
            tree = etree.parse(stream, parser=parser, forbid_dtd=True)
        except (etree.ParseError, ValueError) as exc:
            raise ParseError(detail=str(exc))
        data = self._xml_convert(tree.getroot())

        return data
Ejemplo n.º 12
0
    def parse_xml(self, xml_output):
        """
        Open and parse an xml file.

        TODO: Write custom parser to just read the nodes that we need instead of
        reading the whole file.

        @return xml_tree An xml tree instance. None if error.
        """
        try:
            tree = ET.parse(xml_output)
        except SyntaxError as se:
            raise se

        return tree
Ejemplo n.º 13
0
def get_cve_cweid_from_file(compressed_content, cve_dict):
    zip = ZipFile(BytesIO(compressed_content))
    zip_file = ZipFile(BytesIO(compressed_content))
    filename = zip_file.extract(zip_file.filelist[0])
    root = ET.parse(filename).getroot()
    os.remove(filename)
    cwe_ns = "{http://scap.nist.gov/schema/vulnerability/0.4}"
    default_ns = "{http://scap.nist.gov/schema/feed/vulnerability/2.0}"
    for entry in root.findall('{ns}entry'.format(ns=default_ns)):
        id = entry.attrib["id"]
        cwe = entry.find('{nsd}cwe'.format(nsd=cwe_ns))
        if cwe is not None:
            if id in cve_dict.keys():
                cve_dict[id]["cweid"] = str(cwe.attrib["id"])
    return dict(cve_dict)
Ejemplo n.º 14
0
    def parse(self, stream, media_type=None, parser_context=None):
        """
        Parses the incoming bytestream as XML and returns the resulting data.
        """
        assert etree, "XMLParser requires defusedxml to be installed"

        parser_context = parser_context or {}
        encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
        parser = etree.DefusedXMLParser(encoding=encoding)
        try:
            tree = etree.parse(stream, parser=parser, forbid_dtd=True)
        except (etree.ParseError, ValueError) as exc:
            raise ParseError("XML parse error - %s" % six.text_type(exc))
        data = self._xml_convert(tree.getroot())

        return data
Ejemplo n.º 15
0
 def _load_control_enhancement_from_xml(self):
     "load control enhancement from 800-53 xml using a pure python process"
     tree = ET.parse(self.xmlfile)
     root = tree.getroot()
     # handle name spaces thusly:
     # namespace:tag => {namespace_uri}tag
     # example: controls:control => {http://scap.nist.gov/schema/sp800-53/feed/2.0}control
     # find first control where number tag value equals id
     # sc = root.find("./{http://scap.nist.gov/schema/sp800-53/feed/2.0}control/[{http://scap.nist.gov/schema/sp800-53/2.0}number='%s']" % self.id)
     sc = root.find(
         "./{http://scap.nist.gov/schema/sp800-53/feed/2.0}control/{http://scap.nist.gov/schema/sp800-53/2.0}control-enhancements/{http://scap.nist.gov/schema/sp800-53/2.0}control-enhancement/[{http://scap.nist.gov/schema/sp800-53/2.0}number='%s']"
         % self.id
     )
     if sc is not None:
         # self.family = sc.find('{http://scap.nist.gov/schema/sp800-53/2.0}family').text
         self.number = sc.find("{http://scap.nist.gov/schema/sp800-53/2.0}number").text.strip()
         self.title = sc.find("{http://scap.nist.gov/schema/sp800-53/2.0}title").text.strip()
         # self.priority = sc.find('{http://scap.nist.gov/schema/sp800-53/2.0}priority').text
         self.description = "".join(sc.find("{http://scap.nist.gov/schema/sp800-53/2.0}statement").itertext())
         self.description = re.sub(
             r"[ ]{2,}", "", re.sub(r"^[ ]", "", re.sub(r"\n", "", re.sub(r"[ ]{2,}", " ", self.description)))
         )
         self.description = self.description.replace(self.id, "\n").strip()
         self.control_enhancements = None
         self.sg = sc.find("{http://scap.nist.gov/schema/sp800-53/2.0}supplemental-guidance")
         self.supplemental_guidance = self.sg.find(
             "{http://scap.nist.gov/schema/sp800-53/2.0}description"
         ).text.strip()
         related_controls = []
         # findall("{http://scap.nist.gov/schema/sp800-53/2.0}supplemental-guidance/{http://scap.nist.gov/schema/sp800-53/2.0}related")
         for related in self.sg.findall("{http://scap.nist.gov/schema/sp800-53/2.0}related"):
             related_controls.append(related.text.strip())
         self.related_controls = ",".join(related_controls)
         self.responsible = None
     else:
         self.details = json.loads(
             '{"id": null, "error": "Failed to get security control information from 800-53 xml"}'
         )
         self.title = (
             self.description
         ) = self.supplemental_guidance = self.control_enhancements = self.responsible = None
         self.details = {}
Ejemplo n.º 16
0
    def _encode_authentication(self):
        need_encode = False
        tree = ET.parse(self.conf.cinder_huawei_conf_file)
        xml_root = tree.getroot()
        name_node = xml_root.find('Storage/UserName')
        pwd_node = xml_root.find('Storage/UserPassword')
        if (name_node is not None
                and not name_node.text.startswith('!$$$')):
            name_node.text = '!$$$' + base64.b64encode(name_node.text)
            need_encode = True
        if (pwd_node is not None
                and not pwd_node.text.startswith('!$$$')):
            pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text)
            need_encode = True

        if need_encode:
            utils.execute('chmod',
                          '600',
                          self.conf.cinder_huawei_conf_file,
                          run_as_root=True)
            tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8')
Ejemplo n.º 17
0
def main():
    all_permissions = [] # list of strings naming each permission used in the dataset
    app_permission_map = {} # mapping from android app names to lists of permissions
    app_malicious_map = {} # mapping from android app names to 1 or 0 for malware or goodware
    root_dir = os.getcwd()
    for i, directory in enumerate(['benign_apk', 'malicious_apk']):
        os.chdir(directory)
        category_root_dir = os.getcwd()
        for filename in glob.glob('*.apk'):
            print('Processing ' + filename)
            try:
                os.chdir(filename[:-4])
                with open('AndroidManifest.xml') as xml_file:
                    et = ElementTree.parse(xml_file)
            except (ElementTree.ParseError, UnicodeDecodeError, FileNotFoundError):
                print('Parsing error encountered for ' + filename)
                os.chdir(category_root_dir)
                continue
            app_name = filename
            # make a one-hot bit vector of length 2. 1st bit set if malicious, otherwise 2nd bit
            app_malicious_map[app_name] = [1,0] if i else [0,1]
            permissions = et.getroot().findall('./uses-permission')
            app_permission_map[app_name] = []
            for permission in permissions:
                try:
                    permission_name = permission.attrib['{http://schemas.android.com/apk/res/android}name'].upper()
                    if not permission_name.startswith('ANDROID.PERMISSION'): continue # ignore custom permissions
                    if permission_name not in all_permissions: all_permissions.append(permission_name)
                    app_permission_map[app_name].append(permission_name)
                except KeyError:
                    pass
            os.chdir(os.pardir)
        os.chdir(root_dir)
    all_apps = {} # mapping combining app_permission_map and app_malicious_map using bits
    for app_name in app_permission_map:
        bit_vector = [1 if p in app_permission_map[app_name] else 0 for p in all_permissions]
        all_apps[app_name] = {'vector': bit_vector, 'malicious': app_malicious_map[app_name]}
    with open('app_permission_vectors.json', 'w') as outfile:
        json.dump({'features': all_permissions, 'apps': all_apps}, outfile)
    print('Wrote data on ' + str(len(all_permissions)) + ' permissions and ' + str(len(all_apps)) + ' apps to a file.')
Ejemplo n.º 18
0
    def get_knl_vnodes(self):
        xml_out = self.query_alps('1.7', 'QUERY', 'SYSTEM')
        tree = ET.parse(xml_out)
        os.remove(xml_out)
        root = tree.getroot()
        knl_vnodes = {}
        knl_info = {}

        # If node has the KNL processor then add them
        # to knl_vnodes dictionary
        for node in root.getiterator('Nodes'):
            # XML values
            role = node.attrib["role"]
            state = node.attrib["state"]
            numa_cfg = node.attrib["numa_cfg"]
            hbm_size_mb = node.attrib["hbm_size_mb"]
            hbm_cache_pct = node.attrib["hbm_cache_pct"]

            if role == 'batch' and state == 'up' and numa_cfg is not ""\
               and hbm_size_mb is not "" and hbm_cache_pct is not "":
                # derived values from XML
                knl_info['current_aoe'] = numa_cfg + '_' + hbm_cache_pct
                knl_info['hbmem'] = hbm_size_mb + 'mb'
                nid_ranges = node.text.strip()
                nid_range_list = list(nid_ranges.split(','))
                while len(nid_range_list) > 0:
                    nid_range = nid_range_list.pop()
                    nid1 = nid_range.split('-')
                    if len(nid1) == 2:
                        # range of nodes
                        r1 = int(nid1[0])
                        r2 = int(nid1[1]) + 1
                        for node_id in range(r1, r2):
                            # associate each nid with it's knl information
                            knl_vnodes['%d' % node_id] = knl_info
                    else:
                        # single node
                        node_id = int(nid1[0])
                        knl_vnodes['%d' % node_id] = knl_info
        return knl_vnodes
Ejemplo n.º 19
0
def get_cve_description_from_file(compressed_content):
    cve_info_set = {}
    zip_file = ZipFile(BytesIO(compressed_content))
    filename = zip_file.extract(zip_file.filelist[0])
    root = ET.parse(filename).getroot()
    os.remove(filename)
    for child in root:
        try:
            cveid = child.attrib['name']
            aux = child.attrib['published'].split('-')
            pub_date = datetime.datetime(int(aux[0]), int(aux[1]), int(aux[2]))
            aux = child.attrib['modified'].split('-')
            mod_date = datetime.datetime(int(aux[0]), int(aux[1]), int(aux[2]))
            cvss_base = float(child.attrib['CVSS_base_score'])
            cvss_impact = float(child.attrib['CVSS_impact_subscore'])
            cvss_exploit = float(child.attrib['CVSS_exploit_subscore'])
            vector, features = extract_vector(child.attrib['CVSS_vector'])
            summary = child[0][0].text
            cve_info_set[cveid] = {"cveid": cveid,
                                    "pub_date": pub_date,
                                    "mod_date": mod_date,
                                    "summary": summary,
                                    "cvss_base": cvss_base,
                                    "cvss_impact": cvss_impact,
                                    "cvss_exploit": cvss_exploit,
                                    "cvss_access_vector": features[0],
                                    "cvss_access_complexity": features[1],
                                    "cvss_authentication": features[2],
                                    "cvss_confidentiality_impact": features[3],
                                    "cvss_integrity_impact": features[4],
                                    "cvss_availability_impact": features[5],
                                    "cvss_vector": vector,
                                    "cweid": "CWE-0"
                                    }
        except KeyError:
            # Any error continue
            pass
    return dict(cve_info_set)
def main():
    all_permissions = [] # list of strings naming each permission used in the dataset
    app_permission_map = {} # mapping from android app names to lists of permissions
    app_malicious_map = {} # mapping from android app names to 1 or 0 for malware or goodware
    for i, directory in enumerate(['benign_apk', 'malicious_apk']):
        os.chdir(directory)
        valid_apks = []
        if os.path.isfile('valid_apks.txt'):
            with open('valid_apks.txt') as f:
                valid_apks = [line.strip() for line in f.readlines()]
        malformed_xml = []
        if os.path.isfile('malformed_xml.txt'):
            with open('malformed_xml.txt') as f:
                malformed_xml = [line.strip() for line in f.readlines()]
        for filename in valid_apks:
            if filename not in malformed_xml:
                os.chdir(filename[:-4])
                with open('AndroidManifest.xml') as xml_file:
                    et = ElementTree.parse(xml_file)
                    app_name = et.getroot().attrib['package']
                    # make a one-hot bit vector of length 2. 1st bit set if malicious, otherwise 2nd bit
                    app_malicious_map[app_name] = [1,0] if i else [0,1]
                    permissions = et.getroot().findall('./uses-permission')
                    app_permission_map[app_name] = []
                    for permission in permissions:
                        permission_name = permission.attrib['{http://schemas.android.com/apk/res/android}name']
                        if permission_name not in all_permissions: all_permissions.append(permission_name)
                        app_permission_map[app_name].append(permission_name)
                os.chdir(os.pardir)
        os.chdir(os.pardir)
    all_apps = {} # mapping combining app_permission_map and app_malicious_map using bits
    for app_name in app_permission_map:
        bit_vector = [1 if p in app_permission_map[app_name] else 0 for p in all_permissions]
        all_apps[app_name] = {'vector': bit_vector, 'malicious': app_malicious_map[app_name]}
    with open('app_permission_vectors.json', 'w') as outfile:
        json.dump({'permissions': all_permissions, 'apps': all_apps}, outfile)
    print('Wrote data on ' + str(len(all_permissions)) + ' permissions and ' + str(len(all_apps)) + ' apps to a file.')
Ejemplo n.º 21
0
def parseXML(file):

    #set up tree
    tree = ET.parse(file)
    root = tree.getroot()

    # assets
    assets = []

    for host in root.findall('host'):

        asset = {} #blank dict for asset attributes

        for address in host.findall('address'):

            if address.get('addrtype') == 'ipv4':
                asset['ipv4'] = address.get('addr')

            if address.get('addrtype') == 'mac':
                asset['mac'] = address.get('addr')

        assets.append(asset) #add asset to asset list

    return assets
    def _load_metadata_from_path(self, meta_xml_path, dataset_id, dataset):
        if not os.path.exists(meta_xml_path):
            raise MetaXmlNotFoundError(
                'meta.xml not found for dataset %s (path: %s)'
                % (dataset_id, meta_xml_path)
            )

        with retry_open_file(meta_xml_path, 'r') as meta_xml:
            meta_xml = etree.parse(meta_xml)
            dataset_node = meta_xml.find('datensatz')
            resources_node = dataset_node.find('ressourcen')

        metadata = self._dropzone_get_metadata(
            dataset_id,
            dataset,
            dataset_node
        )

        # add resource metadata
        metadata['resource_metadata'] = self._get_resources_metadata(
            resources_node
        )

        return metadata
Ejemplo n.º 23
0
    def update_config_value(self):
        file_time = os.stat(self.conf.cinder_huawei_conf_file).st_mtime
        if self.last_modify_time == file_time:
            return

        self.last_modify_time = file_time
        tree = ET.parse(self.conf.cinder_huawei_conf_file)
        xml_root = tree.getroot()
        self._encode_authentication(tree, xml_root)

        attr_funcs = (
            self._san_address,
            self._san_user,
            self._san_password,
            self._san_vstore,
            self._san_product,
            self._ssl_cert_path,
            self._ssl_cert_verify,
            self._iscsi_info,
            self._fc_info,
            self._hypermetro_devices,
            self._replication_devices,
            self._lun_type,
            self._lun_ready_wait_interval,
            self._lun_copy_wait_interval,
            self._lun_timeout,
            self._lun_write_type,
            self._lun_prefetch,
            self._lun_policy,
            self._lun_read_cache_policy,
            self._lun_write_cache_policy,
            self._storage_pools,
        )

        for f in attr_funcs:
            f(xml_root)
Ejemplo n.º 24
0
 def __init__(self, path):
     self.ir_graph = et.parse(path)
Ejemplo n.º 25
0
    def handle(self, *args, **options):  # pylint: disable=too-many-locals,too-many-branches
        parts_xml_path = options['parts_xml_path']

        logger.info('Importing Part Attributes')
        # parse the xml file
        tree = ET.parse(parts_xml_path)
        root = tree.getroot()

        attributes_set_count = 0

        bricklink_external_ids = [
            e.external_id for e in PartExternalId.objects.filter(
                provider=PartExternalId.BRICKLINK)
        ]

        with transaction.atomic():
            for idx, item_tag in enumerate(root.findall('ITEM')):  # pylint: disable=too-many-nested-blocks
                item_id = item_tag.find('ITEMID').text
                item_x = item_tag.find('ITEMDIMX').text
                item_y = item_tag.find('ITEMDIMY').text
                item_z = item_tag.find('ITEMDIMZ').text

                if item_id and any([item_x, item_y, item_z]):
                    part_list = []

                    # First check for Bricklink ID and part_nums as backup
                    if item_id in bricklink_external_ids:
                        # Allow for different bricklink IDs to point to the same part
                        part_list = [
                            e.part for e in PartExternalId.objects.filter(
                                provider=PartExternalId.BRICKLINK,
                                external_id=item_id)
                        ]
                    else:
                        part = Part.objects.filter(part_num=item_id).first()
                        if part:
                            part_list.append(part)

                    # Update related parts
                    for part in part_list:
                        if item_x and item_y and (item_y > item_x):
                            part.length = item_y
                            part.width = item_x
                        else:
                            part.length = item_x
                            part.width = item_y
                        part.height = item_z
                        part.save()

                        attributes_set_count += 1

                        if (attributes_set_count % 1000) == 0:
                            logger.info(
                                F'   Attributes Set on: {attributes_set_count} parts'
                            )
                else:
                    logger.debug(F'  Invalid item Id Found: "{item_id}"')

                if (idx % 1000) == 0:
                    logger.info(F'  Items Processed: {idx}')

        logger.info(F'  Total Attributes Set on: {attributes_set_count} parts')
Ejemplo n.º 26
0
def passwords_xml():
    """
    To add network credentials to Kodi's password xml
    """
    path = path_ops.translate_path('special://userdata/')
    xmlpath = "%spasswords.xml" % path
    try:
        xmlparse = defused_etree.parse(xmlpath)
    except IOError:
        # Document is blank or missing
        root = etree.Element('passwords')
        skip_find = True
    except ParseError:
        LOG.error('Error parsing %s', xmlpath)
        # "Kodi cannot parse {0}. PKC will not function correctly. Please visit
        # {1} and correct your file!"
        messageDialog(lang(29999), lang(39716).format(
            'passwords.xml', 'http://forum.kodi.tv/'))
        return
    else:
        root = xmlparse.getroot()
        skip_find = False

    credentials = settings('networkCreds')
    if credentials:
        # Present user with options
        option = dialog('select',
                        "Modify/Remove network credentials",
                        ["Modify", "Remove"])

        if option < 0:
            # User cancelled dialog
            return

        elif option == 1:
            # User selected remove
            success = False
            for paths in root.getiterator('passwords'):
                for path in paths:
                    if path.find('.//from').text == "smb://%s/" % credentials:
                        paths.remove(path)
                        LOG.info("Successfully removed credentials for: %s",
                                 credentials)
                        etree.ElementTree(root).write(xmlpath,
                                                      encoding="UTF-8")
                        success = True
            if not success:
                LOG.error("Failed to find saved server: %s in passwords.xml",
                          credentials)
                dialog('notification',
                       heading='{plex}',
                       message="%s not found" % credentials,
                       icon='{warning}',
                       sound=False)
                return
            settings('networkCreds', value="")
            dialog('notification',
                   heading='{plex}',
                   message="%s removed from passwords.xml" % credentials,
                   icon='{plex}',
                   sound=False)
            return

        elif option == 0:
            # User selected to modify
            server = dialog('input',
                            "Modify the computer name or ip address",
                            credentials)
            if not server:
                return
    else:
        # No credentials added
        messageDialog("Network credentials",
               'Input the server name or IP address as indicated in your plex '
               'library paths. For example, the server name: '
               '\\\\SERVER-PC\\path\\ or smb://SERVER-PC/path is SERVER-PC')
        server = dialog('input', "Enter the server name or IP address")
        if not server:
            return
        server = quote_plus(server)

    # Network username
    user = dialog('input', "Enter the network username")
    if not user:
        return
    user = quote_plus(user)
    # Network password
    password = dialog('input',
                      "Enter the network password",
                      '',  # Default input
                      type='{alphanum}',
                      option='{hide}')
    # Need to url-encode the password
    password = quote_plus(password)
    # Add elements. Annoying etree bug where findall hangs forever
    if skip_find is False:
        skip_find = True
        for path in root.findall('.//path'):
            if path.find('.//from').text.lower() == "smb://%s/" % server.lower():
                # Found the server, rewrite credentials
                path.find('.//to').text = ("smb://%s:%s@%s/"
                                           % (user, password, server))
                skip_find = False
                break
    if skip_find:
        # Server not found, add it.
        path = etree.SubElement(root, 'path')
        etree.SubElement(path, 'from', {'pathversion': "1"}).text = \
            "smb://%s/" % server
        topath = "smb://%s:%s@%s/" % (user, password, server)
        etree.SubElement(path, 'to', {'pathversion': "1"}).text = topath

    # Add credentials
    settings('networkCreds', value="%s" % server)
    LOG.info("Added server: %s to passwords.xml", server)
    # Prettify and write to file
    indent(root)
    etree.ElementTree(root).write(xmlpath, encoding="UTF-8")
Ejemplo n.º 27
0
    def __init__(self, file, test):
        nscan = ElementTree.parse(file)
        root = nscan.getroot()

        if 'NessusClientData_v2' not in root.tag:
            raise NamespaceErr('This version of Nessus report is not supported. Please make sure the export is '
                               'formatted using the NessusClientData_v2 schema.')
        dupes = {}
        for report in root.iter("Report"):
            for host in report.iter("ReportHost"):
                ip = host.attrib['name']
                fqdn = host.find(".//HostProperties/tag[@name='host-fqdn']").text if host.find(
                    ".//HostProperties/tag[@name='host-fqdn']") is not None else None

                for item in host.iter("ReportItem"):
                    # if item.attrib["svc_name"] == "general":
                    #     continue

                    port = None
                    if float(item.attrib["port"]) > 0:
                        port = item.attrib["port"]
                    description = ""
                    plugin_output = None
                    if item.find("synopsis") is not None:
                        description = item.find("synopsis").text + "\n\n"
                    if item.find("plugin_output") is not None:
                        plugin_output = "Plugin Output: " + ip + (
                            (":" + port) if port is not None else "") + " " + item.find("plugin_output").text + "\n\n"
                        description += plugin_output

                    severity = item.find("risk_factor").text
                    if severity == "None":
                        severity = "Info"

                    impact = item.find("description").text + "\n\n"
                    if item.find("cvss_vector") is not None:
                        impact += "CVSS Vector: " + item.find("cvss_vector").text + "\n"
                    if item.find("cvss_base_score") is not None:
                        impact += "CVSS Base Score: " + item.find("cvss_base_score").text + "\n"
                    if item.find("cvss_temporal_score") is not None:
                        impact += "CVSS Temporal Score: " + item.find("cvss_temporal_score").text + "\n"

                    mitigation = item.find("solution").text if item.find("solution") is not None else "N/A"
                    references = ""
                    for ref in item.iter("see_also"):
                        refs = ref.text.split()
                        for r in refs:
                            references += r + "\n"

                    for xref in item.iter("xref"):
                        references += xref.text + "\n"

                    cwe = None
                    if item.find("cwe") is not None:
                        cwe = item.find("cwe").text
                    title = item.attrib["pluginName"]
                    dupe_key = severity + title

                    if dupe_key in dupes:
                        find = dupes[dupe_key]
                        if plugin_output is not None:
                            find.description += plugin_output
                    else:
                        find = Finding(title=title,
                                       test=test,
                                       active=False,
                                       verified=False,
                                       description=description,
                                       severity=severity,
                                       numerical_severity=Finding.get_numerical_severity(severity),
                                       mitigation=mitigation,
                                       impact=impact,
                                       references=references,
                                       cwe=cwe)
                        find.unsaved_endpoints = list()
                        dupes[dupe_key] = find
                    find.unsaved_endpoints.append(Endpoint(host=ip + (":" + port if port is not None else "")))

                    if fqdn is not None:
                        find.unsaved_endpoints.append(Endpoint(host=fqdn))

        self.items = dupes.values()
Ejemplo n.º 28
0
    def __init__(self, filename, test):
        vscan = ElementTree.parse(filename)
        root = vscan.getroot()

        if 'https://www.veracode.com/schema/reports/export/1.0' not in str(root):
            # version not supported
            raise NamespaceErr('This version of Veracode report is not supported.  '
                               'Please make sure the export is formatted using the '
                               'https://www.veracode.com/schema/reports/export/1.0 schema.')

        dupes = dict()

        for severity in root.iter('{https://www.veracode.com/schema/reports/export/1.0}severity'):
            if severity.attrib['level'] == '5':
                sev = 'Critical'
            elif severity.attrib['level'] == '4':
                sev = 'High'
            elif severity.attrib['level'] == '3':
                sev = 'Medium'
            elif severity.attrib['level'] == '2':
                sev = 'Low'
            else:
                sev = 'Info'

            for category in severity.iter('{https://www.veracode.com/schema/reports/export/1.0}category'):
                recommendations = category.find('{https://www.veracode.com/schema/reports/export/1.0}recommendations')
                mitigation = ''
                for para in recommendations.iter('{https://www.veracode.com/schema/reports/export/1.0}para'):
                    mitigation += para.attrib['text'] + '\n\n'
                    for bullet in para.iter('{https://www.veracode.com/schema/reports/export/1.0}bulletitem'):
                        mitigation += "    * " + bullet.attrib['text'] + '\n'

                for flaw in category.iter('{https://www.veracode.com/schema/reports/export/1.0}flaw'):
                    dupe_key = sev + flaw.attrib['cweid'] + flaw.attrib['module'] + flaw.attrib['type']

                    if dupe_key in dupes:
                        find = dupes[dupe_key]
                    else:
                        dupes[dupe_key] = True
                        description = flaw.attrib['description'].replace('. ', '.\n')
                        if 'References:' in description:
                            references = description[description.index('References:') + 13:].replace(')  ', ')\n')
                        else:
                            references = 'None'

                        if 'date_first_occurrence' in flaw.attrib:
                            find_date = datetime.strptime(flaw.attrib['date_first_occurrence'],
                                                          '%Y-%m-%d %H:%M:%S %Z')
                        else:
                            find_date = test.target_start

                        find = Finding(title=flaw.attrib['categoryname'],
                                       cwe=int(flaw.attrib['cweid']),
                                       test=test,
                                       active=False,
                                       verified=False,
                                       description=description + "\n\nVulnerable Module: " + flaw.attrib[
                                           'module'] + ' Type: ' + flaw.attrib['type'],
                                       severity=sev,
                                       numerical_severity=Finding.get_numerical_severity(sev),
                                       mitigation=mitigation,
                                       impact='CIA Impact: ' + flaw.attrib['cia_impact'].upper(),
                                       references=references,
                                       url='N/A',
                                       date=find_date)
                        dupes[dupe_key] = find

        self.items = dupes.values()
Ejemplo n.º 29
0
    # getting the new images
    newly_created_images = [
        f for f in os.listdir(directory + "/tiff") if f.startswith(filename)
    ]

    if not os.path.isfile("{}/xml/{}.xml".format(directory, filename)):
        print("Warning: associated file '{}.xml' was not found.".format(
            filename))
        continue

    xmlns = r"{http://lamp.cfar.umd.edu/media/projects/GEDI/}"

    try:
        # parsing the associated XML file
        xml = ET.parse("{}/xml/{}.xml".format(directory, filename))
        root = xml.getroot()

        doc = root.find("{}DL_DOCUMENT".format(xmlns))
        if doc == None:
            print("No DL_DOCUMENT")
            raise ET.ParseError()

        pages = doc.findall("{}DL_PAGE".format(xmlns))
        if len(pages) != len(newly_created_images):
            print("not enough pages")
            raise ET.ParseError()

        # changing the image source for each page
        for new_image in newly_created_images:
            # page_id = new_image[-5]
Ejemplo n.º 30
0
def ttml2srt(infile, outfile):
    """
    Conversion routine for subtitle files from
    TTML to SRT

    Args:
        infile(str): full name of the input file

        outfile(str): full name of the converted file
    """
    tree = ET.parse(infile)
    root = tree.getroot()

    # strip namespaces
    for elem in root.getiterator():
        elem.tag = elem.tag.split('}', 1)[-1]
        elem.attrib = {name.split('}', 1)
                       [-1]: value for name, value in list(elem.attrib.items())}

    # get styles
    styles = {}
    for elem in root.findall('./head/styling/style'):
        style = {}
        if 'color' in elem.attrib:
            color = elem.attrib['color']
            if color not in ('#FFFFFF', '#000000'):
                style['color'] = color
        if 'fontStyle' in elem.attrib:
            fontstyle = elem.attrib['fontStyle']
            if fontstyle in ('italic', ):
                style['fontstyle'] = fontstyle
        styles[elem.attrib['id']] = style

    body = root.find('./body')

    # parse correct start and end times
    def _parse_time_expression(expression, default_offset=timedelta(0)):
        offset_time = re.match(
            r'^([0-9]+(\.[0-9]+)?)(h|m|s|ms|f|t)$', expression)
        if offset_time:
            time_value, _, metric = offset_time.groups()
            time_value = float(time_value)
            if metric == 'h':
                return default_offset + timedelta(hours=time_value)
            elif metric == 'm':
                return default_offset + timedelta(minutes=time_value)
            elif metric == 's':
                return default_offset + timedelta(seconds=time_value)
            elif metric == 'ms':
                return default_offset + timedelta(milliseconds=time_value)
            elif metric == 'f':
                raise NotImplementedError(
                    'Parsing time expressions by frame is not supported!')
            elif metric == 't':
                raise NotImplementedError(
                    'Parsing time expressions by ticks is not supported!')

        clock_time = re.match(
            r'^([0-9]{2,}):([0-9]{2,}):([0-9]{2,}(\.[0-9]+)?)$', expression)
        if clock_time:
            hours, minutes, seconds, _ = clock_time.groups()
            return timedelta(hours=int(hours), minutes=int(minutes), seconds=float(seconds))

        clock_time_frames = re.match(
            r'^([0-9]{2,}):([0-9]{2,}):([0-9]{2,}):([0-9]{2,}(\.[0-9]+)?)$', expression)
        if clock_time_frames:
            raise NotImplementedError(
                'Parsing time expressions by frame is not supported!')

        raise ValueError('unknown time expression: %s' % expression)

    def _parse_times(elem, default_begin=timedelta(0)):
        if 'begin' in elem.attrib:
            begin = _parse_time_expression(
                elem.attrib['begin'], default_offset=default_begin)
        else:
            begin = default_begin
        elem.attrib['{abs}begin'] = begin

        end = None
        if 'end' in elem.attrib:
            end = _parse_time_expression(
                elem.attrib['end'], default_offset=default_begin)

        dur = None
        if 'dur' in elem.attrib:
            dur = _parse_time_expression(elem.attrib['dur'])

        if dur is not None:
            if end is None:
                end = begin + dur
            else:
                end = min(end, begin + dur)

        elem.attrib['{abs}end'] = end

        for child in elem:
            _parse_times(child, default_begin=begin)

    _parse_times(body)

    timestamps = set()
    for elem in body.findall('.//*[@{abs}begin]'):
        timestamps.add(elem.attrib['{abs}begin'])

    for elem in body.findall('.//*[@{abs}end]'):
        timestamps.add(elem.attrib['{abs}end'])

    timestamps.discard(None)

    # render subtitles on each timestamp

    def _render_subtitles(elem, timestamp, parent_style=None):

        if timestamp < elem.attrib['{abs}begin']:
            return ''
        if elem.attrib['{abs}end'] is not None and timestamp >= elem.attrib['{abs}end']:
            return ''

        result = ''

        style = parent_style.copy() if parent_style is not None else {}
        if 'style' in elem.attrib:
            style.update(styles[elem.attrib['style']])

        if 'color' in style:
            result += '<font color="%s">' % style['color']

        if style.get('fontstyle') == 'italic':
            result += '<i>'

        if elem.text:
            result += elem.text.strip()
        if elem is not None:
            for child in elem:
                result += _render_subtitles(child, timestamp)
                if child.tail:
                    result += child.tail.strip()

        if 'color' in style:
            result += '</font>'

        if style.get('fontstyle') == 'italic':
            result += '</i>'

        if elem.tag in ('div', 'p', 'br'):
            result += '\n'

        return result

    rendered = []
    for timestamp in sorted(timestamps):
        rendered.append((timestamp, re.sub(r'\n\n\n+', '\n\n',
                                           _render_subtitles(body, timestamp)).strip()))

    if not rendered:
        exit(0)

    # group timestamps together if nothing changes
    rendered_grouped = []
    last_text = None
    for timestamp, content in rendered:
        if content != last_text:
            rendered_grouped.append((timestamp, content))
        last_text = content

    # output srt
    rendered_grouped.append(
        (rendered_grouped[-1][0] + timedelta(hours=24), ''))

    def _format_timestamp(timestamp):
        return ('%02d:%02d:%02.3f' % (timestamp.total_seconds() // 3600,
                                      timestamp.total_seconds() // 60 % 60,
                                      timestamp.total_seconds() % 60)).replace('.', ',')

    if isinstance(outfile, str) or isinstance(outfile, unicode):
        dstfile = io.open(outfile, 'w', encoding='utf-8')
    else:
        dstfile = outfile

    srt_i = 1
    for i, (timestamp, content) in enumerate(rendered_grouped[:-1]):
        if content == '':
            continue
        dstfile.write(bytearray('%d\n' % srt_i, 'utf-8'))
        dstfile.write(bytearray(
            _format_timestamp(timestamp) +
            ' --> ' +
            _format_timestamp(rendered_grouped[i + 1][0]) +
            '\n'
        ))
        dstfile.write(bytearray(content + '\n\n', 'utf-8'))
        srt_i += 1
    dstfile.close()
Ejemplo n.º 31
0
 def parse(self, xml_file):
     root = et.parse(xml_file).getroot()
     for error_tag in root.findall("error"):
         self.errors.append(Error(error_tag))
Ejemplo n.º 32
0
from defusedxml import ElementTree as ET
from defusedxml import EntitiesForbidden


# Fix: Use defusedxml
try:
    tree1 = ET.parse('billion_laughs.xml')
    print(len(tree1._root.text))
except EntitiesForbidden:
    print("Bad XML found")
Ejemplo n.º 33
0
    def __init__(self, filename, test):
        Fortifyscan = ElementTree.parse(filename)
        root = Fortifyscan.getroot()

        # Get Date
        date_string = root.getchildren()[5].getchildren()[1].getchildren(
        )[2].text
        date_list = date_string.split()[1:4]
        date_act = "".join(date_list)
        find_date = parser.parse(date_act)

        # Get Finding Details
        dupes = dict()

        for ReportSection in root.findall('ReportSection'):
            if ReportSection.findtext('Title') == "Results Outline":
                kingdom = ''
                category = ''
                mitigation = ''
                impact = ''
                steps_to_reproduce = ''
                severity_justification = ''
                references = ''
                findingdetail = ''
                title = ''
                filename = ''
                filepath = ''
                linestart = ''
                dupe_key = ''
                filename = ''
                linestart = ''

            for GroupingSection in ReportSection.iter('GroupingSection'):
                for groupTitle in GroupingSection.iter('groupTitle'):
                    grouptitle = groupTitle.text

                for MajorAttributeSummary in GroupingSection.iter(
                        'MajorAttributeSummary'):
                    for MetaInfo in MajorAttributeSummary.iter('MetaInfo'):
                        if MetaInfo.findtext('Name') == "Abstract":
                            for meta_value in MetaInfo.iter('Value'):
                                impact = meta_value.text
                        if MetaInfo.findtext('Name') == "Explanation":
                            for meta_value in MetaInfo.iter('Value'):
                                references = meta_value.text
                        if MetaInfo.findtext('Name') == "Recommendations":
                            for meta_value in MetaInfo.iter('Value'):
                                mitigation = meta_value.text
                        if MetaInfo.findtext('Name') == "Tips":
                            for meta_value in MetaInfo.iter('Value'):
                                mitigation += "\n**Tips:**\n" + meta_value.text

                cwe_id = grouptitle.split(' ')
                if len(cwe_id) > 2:
                    if cwe_id[2].isdigit():
                        cwe_id = cwe_id[2]
                    elif "," in cwe_id[2]:
                        cwe_id = cwe_id[:1]
                    else:
                        cwe_id = 0
                else:
                    cwe_id = 0

                for Friority in GroupingSection.iter('Friority'):
                    sev = Friority.text

                for Category in GroupingSection.iter('Category'):
                    category = Category.text

                for Kingdom in GroupingSection.iter('Kingdom'):
                    kingdom = Kingdom.text

                for LineStart in GroupingSection.iter('LineStart'):
                    linestart = LineStart.text
                    if linestart is not None:
                        findingdetail += "**Line Start:**" + linestart + '\n'

                for Snippet in GroupingSection.iter('Snippet'):
                    snippet = Snippet.text
                    if snippet is not None:
                        findingdetail += "\n**Code:**\n'''\n" + snippet + "\n\n"

                    for FileName in GroupingSection.iter('FileName'):
                        filename = FileName.text
                        if filename is not None:
                            findingdetail += "**FileName:**" + filename + '\n'
                    for FilePath in GroupingSection.iter('FilePath'):
                        filepath = FilePath.text
                        if filepath is not None:
                            findingdetail += "**Filepath:**" + filepath + '\n'

                    title = category + " " + kingdom
                    if not isinstance(cwe_id, int):
                        dupe_key = (title + sev + cwe_id)
                    else:
                        dupe_key = (title + sev + str(cwe_id))

                    if dupe_key in dupes:
                        find = dupes[dupe_key]
                    else:
                        dupes[dupe_key] = True
                        find = Finding(
                            title=title,
                            cwe=cwe_id,
                            test=test,
                            active=False,
                            verified=False,
                            description=findingdetail,
                            severity=sev,
                            numerical_severity=Finding.get_numerical_severity(
                                sev),
                            mitigation=mitigation,
                            impact=impact,
                            references=references,
                            file_path=filepath,
                            steps_to_reproduce=steps_to_reproduce,
                            severity_justification=severity_justification,
                            line=linestart,
                            url='N/A',
                            date=find_date,
                            static_finding=True)
                    dupes[dupe_key] = find
                    findingdetail = ''
                    mitigation = ''

        self.items = dupes.values()
Ejemplo n.º 34
0
    def __init__(self, filename, test):
        dupes = dict()
        self.items = ()

        if filename is None:
            self.items = ()
            return

        tree = ET.parse(filename)
        root = tree.getroot()
        scan = root.find('scandetails')
        # New versions of Nikto have a new file type (nxvmlversion="1.2") which adds an additional niktoscan tag
        # This find statement below is to support new file format while not breaking older Nikto scan files versions.
        if scan is None:
            scan = root.find('./niktoscan/scandetails')

        for item in scan.findall('item'):
            # Title
            titleText = None
            description = item.find("description").text
            # Cut the title down to the first sentence
            sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s',
                                 description)
            if len(sentences) > 0:
                titleText = sentences[0][:900]
            else:
                titleText = description[:900]

            # Url
            ip = item.find("iplink").text
            # Remove the port numbers for 80/443
            ip = ip.replace(":80", "")
            ip = ip.replace(":443", "")

            # Severity
            severity = "Info"  # Nikto doesn't assign severity, default to Info

            # Description
            description = "\n \n".join((
                ("Host: " + ip),
                ("Description: " + item.find("description").text),
                ("HTTP Method: " + item.attrib["method"]),
            ))
            mitigation = "N/A"
            impact = "N/A"
            references = "N/A"

            dupe_key = hashlib.md5(description.encode("utf-8")).hexdigest()

            if dupe_key in dupes:
                finding = dupes[dupe_key]
                if finding.description:
                    finding.description = finding.description + "\nHost:" + ip + "\n" + description
                self.process_endpoints(finding, ip)
                dupes[dupe_key] = finding
            else:
                dupes[dupe_key] = True

                finding = Finding(
                    title=titleText,
                    test=test,
                    active=False,
                    verified=False,
                    description=description,
                    severity=severity,
                    numerical_severity=Finding.get_numerical_severity(
                        severity),
                    mitigation=mitigation,
                    impact=impact,
                    references=references,
                    url='N/A',
                    dynamic_finding=True)

                dupes[dupe_key] = finding
                self.process_endpoints(finding, ip)

        self.items = list(dupes.values())
        sys.exit()
    else:
        # Update existing issue
        issue.edit(title=title, body=body_text)
        print(f"Updated issue in {args.issue_repo}#{issue.number}")
        sys.exit()


junit_path = Path(args.junit_file)
if not junit_path.exists():
    body = "Unable to find junit file. Please see link for details."
    create_or_update_issue(body)
    sys.exit()

# Find failures in junit file
tree = ET.parse(args.junit_file)
failure_cases = []

# Check if test collection failed
error = tree.find("./testsuite/testcase/error")
if error is not None:
    # Get information for test collection error
    failure_cases.append({
        "title": "Test Collection Failure",
        "body": error.text
    })

for item in tree.iter("testcase"):
    failure = item.find("failure")
    if failure is None:
        continue
Ejemplo n.º 36
0
    def __init__(self, filename, test, mode=None):
        cxscan = ElementTree.parse(filename)
        self.test = test
        root = cxscan.getroot()
        if (mode in [None, 'detailed']):
            self.mode = mode
        else:
            raise Exception("Internal error: Invalid mode " + mode +
                            ". Expected: one of None, 'detailed'")

        # Dictonary to hold the aggregated findings with:
        #  - key: the concatenated aggregate keys
        #  - value: the finding
        dupes = dict()
        for query in root.findall('Query'):
            name, cwe, categories = self.getQueryElements(query)
            language = ''
            findingdetail = ''
            group = ''
            find_date = parser.parse(root.get("ScanStart"))

            if query.get('Language') is not None:
                language = query.get('Language')

            if query.get('group') is not None:
                group = query.get('group').replace('_', ' ')

            for result in query.findall('Result'):
                if categories is not None:
                    findingdetail = "{}**Category:** {}\n".format(
                        findingdetail, categories)

                if language is not None:
                    findingdetail = "{}**Language:** {}\n".format(
                        findingdetail, language)
                    if language not in self.language_list:
                        self.language_list.append(language)

                if group is not None:
                    findingdetail = "{}**Group:** {}\n".format(
                        findingdetail, group)

                if result.get('Status') is not None:
                    findingdetail = "{}**Status:** {}\n".format(
                        findingdetail, result.get('Status'))

                deeplink = "[{}]({})".format(result.get('DeepLink'),
                                             result.get('DeepLink'))
                findingdetail = "{}**Finding Link:** {}\n\n".format(
                    findingdetail, deeplink)

                if (self.mode is None):
                    self.process_result_file_name_aggregated(
                        dupes, findingdetail, query, result, find_date)
                elif (self.mode == 'detailed'):
                    self.process_result_detailed(dupes, findingdetail, query,
                                                 result, find_date)
                findingdetail = ''

        for lang in self.language_list:
            add_language(test.engagement.product, lang)

        self.items = list(dupes.values())
Ejemplo n.º 37
0
        f.write(duzenlenmis_xml)

# Kayıt Kontrol Kısmı #
elif deger == 2:
    print("""
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    #                   Kayıt Kontrol Alanı                #
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    """)
    time.sleep(1)
    kontrol = input("Kontrol etmek istediğiniz kaydın ad soyadını giriniz: ")

    file_name = "duzenlenmis_cikti.xml"

    try:
        data = dET.parse(file_name)
    except FileNotFoundError:
        print(f"File not found: {file_name}")
        sys.exit(1)
    root = data.getroot()
    print("""
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    #                Yaşını öğrenmek için => 1               #
    #    Hangi ülke vatandaşı olduğunu öğrenmek için => 2    #
    #               Mevkisini öğrenmek için => 3             #
    #           Oynadığı takımı öğrenmek için => 4           #
    # Oynadığı takımın teknik direktörünü öğrenmek için => 5 #
    #            Oynadığı ligi öğrenmek için => 6            #
    #       İlk 11 oynama sayısını öğrenmek için => 7        #
    #        Toplam gol sayısını öğrenmek için => 8          #
    #       Toplam asist sayısını öğrenmek için => 9         #
Ejemplo n.º 38
0
	def helperParse(self, document):
		tree = _ET.parse(document) 
		root = tree.getroot()
		return root    
Ejemplo n.º 39
0
    def __init__(self, filename, test):
        cxscan = ElementTree.parse(filename)
        root = cxscan.getroot()

        dupes = dict()
        for query in root.findall('Query'):
            categories = ''
            language = ''
            mitigation = 'N/A'
            impact = 'N/A'
            references = ''
            findingdetail = ''
            title = ''
            group = ''
            status = ''
            self.result_dupes = dict()
            find_date = parser.parse(root.get("ScanStart"))
            name = query.get('name')
            cwe = query.get('cweId')

            if query.get('categories') is not None:
                categories = query.get('categories')

            if query.get('Language') is not None:
                language = query.get('Language')

            if query.get('group') is not None:
                group = query.get('group').replace('_', ' ')

            for result in query.findall('Result'):
                if categories is not None:
                    findingdetail = "{}**Category:** {}\n".format(findingdetail, categories)

                if language is not None:
                    findingdetail = "{}**Language:** {}\n".format(findingdetail, language)
                    if language not in self.language_list:
                        self.language_list.append(language)

                if group is not None:
                    findingdetail = "{}**Group:** {}\n".format(findingdetail, group)

                if result.get('Status') is not None:
                    findingdetail = "{}**Status:** {}\n".format(findingdetail, result.get('Status'))

                deeplink = "[{}]({})".format(result.get('DeepLink'), result.get('DeepLink'))
                findingdetail = "{}**Finding Link:** {}\n\n".format(findingdetail, deeplink)

                dupe_key = "{}{}{}{}".format(categories, cwe, name, result.get('FileName'))

                if dupe_key in dupes:
                    find = dupes[dupe_key]
                    title, description, pathnode = self.get_finding_detail(query, result)
                    "{}\n{}".format(find.description, description)
                    dupes[dupe_key] = find
                else:
                    dupes[dupe_key] = True

                    sev = result.get('Severity')
                    result.get('FileName')
                    title, description, pathnode = self.get_finding_detail(query, result)

                    find = Finding(title=title,
                                   cwe=int(cwe),
                                   test=test,
                                   active=False,
                                   verified=False,
                                   description=findingdetail + description,
                                   severity=sev,
                                   numerical_severity=Finding.get_numerical_severity(sev),
                                   mitigation=mitigation,
                                   impact=impact,
                                   references=references,
                                   file_path=pathnode.find('FileName').text,
                                   line=pathnode.find('Line').text,
                                   url='N/A',
                                   date=find_date,
                                   static_finding=True)
                    dupes[dupe_key] = find
                    findingdetail = ''

        for lang in self.language_list:
            add_language(test.engagement.product, lang)

        self.items = dupes.values()
Ejemplo n.º 40
0
MIN_BEFORE_COOLDOWN = 5
CURRENT_ID = 0
CHANNEL = None
GUILD = None
ISSUE_RESULTS = None

BANNED_HOURS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 22, 23, 24]
ROLE_PING = "671696364056477707"
EMOJI_VOTE = ["☑️", "✅"]
EMOJI = [
    ":apple:", ":pineapple:", ":kiwi:", ":cherries:", ":banana:", ":eggplant:",
    ":tomato:", ":corn:", ":carrot:"
]
NATION = 'controlistania'
PATH = 'vote.yml'
RESULTS_XML = DT.parse("test_result.xml")
INPUT_XML = DT.parse("test_input.xml")

load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
PASSWORD = os.getenv('PASSWORD')

bot = commands.Bot(command_prefix=';')

# ----------------------------- LECTURE DES FICHIERS

# Charge la liste des rangs et des bannieres lors du lancement de l'application
with open("list_data.yml") as f:
    data = yaml.load(f, Loader=yaml.FullLoader)
    LIST_RANK_ID = data["ranks"]
    BANNER_TITLES = data["banners"]
Ejemplo n.º 41
0
def xml_upload(request):
    """
    Handling XML upload files.
    :param request:
    :return:
    """
    all_project = project_db.objects.all()

    if request.method == "POST":
        project_id = request.POST.get("project_id")
        scanner = request.POST.get("scanner")
        xml_file = request.FILES['xmlfile']
        scan_url = request.POST.get("scan_url")
        scan_id = uuid.uuid4()
        scan_status = "100"
        if scanner == "zap_scan":
            date_time = datetime.now()
            scan_dump = zap_scans_db(scan_url=scan_url,
                                     scan_scanid=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     vul_status=scan_status,
                                     rescan='No')
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            en_root_xml = ET.tostring(root_xml, encoding='utf8').decode(
                'ascii', 'ignore')
            root_xml_en = ET.fromstring(en_root_xml)
            zap_xml_parser.xml_parser(project_id=project_id,
                                      scan_id=scan_id,
                                      root=root_xml_en)
            return HttpResponseRedirect("/zapscanner/zap_scan_list/")
        elif scanner == "burp_scan":
            date_time = datetime.now()
            scan_dump = burp_scan_db(url=scan_url,
                                     scan_id=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     scan_status=scan_status)
            scan_dump.save()
            # Burp scan XML parser
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            en_root_xml = ET.tostring(root_xml, encoding='utf8').decode(
                'ascii', 'ignore')
            root_xml_en = ET.fromstring(en_root_xml)

            burp_xml_parser.burp_scan_data(root_xml_en, project_id, scan_id)
            print("Save scan Data")
            return HttpResponseRedirect("/burpscanner/burp_scan_list")

        elif scanner == "arachni":
            date_time = datetime.now()
            scan_dump = arachni_scan_db(url=scan_url,
                                        scan_id=scan_id,
                                        date_time=date_time,
                                        project_id=project_id,
                                        scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            arachni_xml_parser.xml_parser(project_id=project_id,
                                          scan_id=scan_id,
                                          root=root_xml)
            print("Save scan Data")
            return HttpResponseRedirect("/arachniscanner/arachni_scan_list")

        elif scanner == 'netsparker':
            date_time = datetime.now()
            scan_dump = netsparker_scan_db(url=scan_url,
                                           scan_id=scan_id,
                                           date_time=date_time,
                                           project_id=project_id,
                                           scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            netsparker_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)

            return HttpResponseRedirect(
                "/netsparkerscanner/netsparker_scan_list/")
        elif scanner == 'webinspect':
            date_time = datetime.now()
            scan_dump = webinspect_scan_db(url=scan_url,
                                           scan_id=scan_id,
                                           date_time=date_time,
                                           project_id=project_id,
                                           scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            webinspect_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)

            return HttpResponseRedirect(
                "/webinspectscanner/webinspect_scan_list/")

        elif scanner == 'acunetix':
            date_time = datetime.now()
            scan_dump = acunetix_scan_db(url=scan_url,
                                         scan_id=scan_id,
                                         date_time=date_time,
                                         project_id=project_id,
                                         scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            acunetix_xml_parser.xml_parser(project_id=project_id,
                                           scan_id=scan_id,
                                           root=root_xml)

            return HttpResponseRedirect("/acunetixscanner/acunetix_scan_list/")

        elif scanner == 'dependencycheck':
            date_time = datetime.now()
            scan_dump = dependencycheck_scan_db(project_name=scan_url,
                                                scan_id=scan_id,
                                                date_time=date_time,
                                                project_id=project_id,
                                                scan_status=scan_status)
            scan_dump.save()
            data = etree.parse(xml_file)
            root = data.getroot()
            dependencycheck_report_parser.xml_parser(project_id=project_id,
                                                     scan_id=scan_id,
                                                     data=root)

            return HttpResponseRedirect(
                "/dependencycheck/dependencycheck_list")

        elif scanner == 'findbugs':
            date_time = datetime.now()
            scan_dump = findbugs_scan_db(project_name=scan_url,
                                         scan_id=scan_id,
                                         date_time=date_time,
                                         project_id=project_id,
                                         scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root = tree.getroot()
            findbugs_report_parser.xml_parser(project_id=project_id,
                                              scan_id=scan_id,
                                              root=root)

            return HttpResponseRedirect("/findbugs/findbugs_list")

        elif scanner == 'nikto':
            date_time = datetime.now()
            scan_dump = nikto_result_db(
                date_time=date_time,
                scan_url=scan_url,
                scan_id=scan_id,
                project_id=project_id,
            )
            scan_dump.save()

            nikto_html_parser(xml_file, project_id, scan_id)

            return HttpResponseRedirect("/tools/nikto/")

    return render(request, 'upload_xml.html', {'all_project': all_project})
Ejemplo n.º 42
0
    def __init__(self, file, test):
        nscan = ElementTree.parse(file)
        root = nscan.getroot()

        if 'NessusClientData_v2' not in root.tag:
            raise NamespaceErr(
                'This version of Nessus report is not supported. Please make sure the export is '
                'formatted using the NessusClientData_v2 schema.')
        dupes = {}
        for report in root.iter("Report"):
            for host in report.iter("ReportHost"):
                ip = host.attrib['name']
                fqdn = host.find(".//HostProperties/tag[@name='host-fqdn']"
                                 ).text if host.find(
                                     ".//HostProperties/tag[@name='host-fqdn']"
                                 ) is not None else None

                for item in host.iter("ReportItem"):
                    # if item.attrib["svc_name"] == "general":
                    #     continue

                    port = None
                    if float(item.attrib["port"]) > 0:
                        port = item.attrib["port"]

                    protocol = None
                    if str(item.attrib["protocol"]):
                        protocol = item.attrib["protocol"]

                    description = ""
                    plugin_output = None
                    if item.findtext("synopsis"):
                        description = item.find("synopsis").text + "\n\n"
                    if item.findtext("plugin_output"):
                        plugin_output = "Plugin Output: " + ip + (
                            (":" + port) if port is not None else "") + \
                            " \n```\n" + item.find("plugin_output").text + \
                            "\n```\n\n"
                        description += plugin_output

                    nessus_severity_id = int(item.attrib["severity"])
                    severity = get_text_severity(nessus_severity_id)

                    impact = ""
                    if item.find("description"):
                        impact = item.find("description").text + "\n\n"
                    if item.findtext("cvss_vector"):
                        impact += "CVSS Vector: " + item.find(
                            "cvss_vector").text + "\n"
                    if item.findtext("cvss_base_score"):
                        impact += "CVSS Base Score: " + item.find(
                            "cvss_base_score").text + "\n"
                    if item.findtext("cvss_temporal_score"):
                        impact += "CVSS Temporal Score: " + item.find(
                            "cvss_temporal_score").text + "\n"

                    mitigation = item.find("solution").text if item.find(
                        "solution") is not None else "N/A"
                    references = ""
                    for ref in item.iter("see_also"):
                        refs = ref.text.split()
                        for r in refs:
                            references += r + "\n"

                    for xref in item.iter("xref"):
                        references += xref.text + "\n"

                    cwe = None
                    if item.findtext("cwe"):
                        cwe = item.find("cwe").text
                    title = item.attrib["pluginName"]
                    dupe_key = severity + title

                    if dupe_key in dupes:
                        find = dupes[dupe_key]
                        if plugin_output is not None:
                            find.description += plugin_output
                    else:
                        find = Finding(
                            title=title,
                            test=test,
                            active=False,
                            verified=False,
                            description=description,
                            severity=severity,
                            numerical_severity=Finding.get_numerical_severity(
                                severity),
                            mitigation=mitigation,
                            impact=impact,
                            references=references,
                            cwe=cwe)
                        find.unsaved_endpoints = list()
                        dupes[dupe_key] = find

                    find.unsaved_endpoints.append(
                        Endpoint(host=ip +
                                 (":" + port if port is not None else ""),
                                 protocol=protocol))
                    if fqdn is not None:
                        find.unsaved_endpoints.append(
                            Endpoint(host=fqdn, protocol=protocol))

        self.items = list(dupes.values())
Ejemplo n.º 43
0
Archivo: ic03.py Proyecto: mindee/doctr
    def __init__(
        self,
        train: bool = True,
        use_polygons: bool = False,
        recognition_task: bool = False,
        **kwargs: Any,
    ) -> None:

        url, sha256, file_name = self.TRAIN if train else self.TEST
        super().__init__(
            url,
            file_name,
            sha256,
            True,
            pre_transforms=convert_target_to_relative
            if not recognition_task else None,
            **kwargs,
        )
        self.train = train
        self.data: List[Tuple[Union[str, np.ndarray], Dict[str, Any]]] = []
        np_dtype = np.float32

        # Load xml data
        tmp_root = (os.path.join(
            self.root, "SceneTrialTrain" if self.train else "SceneTrialTest")
                    if sha256 else self.root)
        xml_tree = ET.parse(os.path.join(tmp_root, "words.xml"))
        xml_root = xml_tree.getroot()

        for image in tqdm(iterable=xml_root,
                          desc="Unpacking IC03",
                          total=len(xml_root)):
            name, resolution, rectangles = image

            # File existence check
            if not os.path.exists(os.path.join(tmp_root, name.text)):
                raise FileNotFoundError(
                    f"unable to locate {os.path.join(tmp_root, name.text)}")

            if use_polygons:
                # (x, y) coordinates of top left, top right, bottom right, bottom left corners
                _boxes = [[
                    [float(rect.attrib["x"]),
                     float(rect.attrib["y"])],
                    [
                        float(rect.attrib["x"]) + float(rect.attrib["width"]),
                        float(rect.attrib["y"])
                    ],
                    [
                        float(rect.attrib["x"]) + float(rect.attrib["width"]),
                        float(rect.attrib["y"]) + float(rect.attrib["height"]),
                    ],
                    [
                        float(rect.attrib["x"]),
                        float(rect.attrib["y"]) + float(rect.attrib["height"])
                    ],
                ] for rect in rectangles]
            else:
                # x_min, y_min, x_max, y_max
                _boxes = [
                    [
                        float(rect.attrib["x"]),  # type: ignore[list-item]
                        float(rect.attrib["y"]),  # type: ignore[list-item]
                        float(rect.attrib["x"]) +
                        float(rect.attrib["width"]),  # type: ignore[list-item]
                        float(rect.attrib["y"]) + float(
                            rect.attrib["height"]),  # type: ignore[list-item]
                    ] for rect in rectangles
                ]

            # filter images without boxes
            if len(_boxes) > 0:
                boxes: np.ndarray = np.asarray(_boxes, dtype=np_dtype)
                # Get the labels
                labels = [
                    lab.text for rect in rectangles for lab in rect if lab.text
                ]

                if recognition_task:
                    crops = crop_bboxes_from_image(img_path=os.path.join(
                        tmp_root, name.text),
                                                   geoms=boxes)
                    for crop, label in zip(crops, labels):
                        if crop.shape[0] > 0 and crop.shape[1] > 0 and len(
                                label) > 0:
                            self.data.append((crop, dict(labels=[label])))
                else:
                    self.data.append(
                        (name.text, dict(boxes=boxes, labels=labels)))

        self.root = tmp_root
Ejemplo n.º 44
0
    def __load_xml(self):
        xml_tree = self.xml_tree or ET.parse(self.path_to_xml)
        xml_root = xml_tree.getroot()
        xml_layers = {}
        xml_edges = []
        statistics = {}

        Edge = namedtuple('edge',
                          ['from_layer', 'from_port', 'to_layer', 'to_port'])

        # Create graph with operations only
        self.graph = Graph()
        self.graph.graph['hashes'] = {}

        self.graph.graph['ir_version'] = int(
            xml_root.attrib['version']) if xml_root.attrib.get(
                'version') is not None else None
        self.graph.graph['layout'] = 'NCHW'
        self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get(
            'name') is not None else None

        # Parse XML
        for child in xml_root:
            if child.tag == 'layers':
                for layer in child:
                    layer_id, layer_attrs = self.__load_layer(layer)
                    xml_layers.update({layer_id: layer_attrs})
            elif child.tag == 'edges':
                for edge in child:
                    xml_edges.append(
                        Edge(edge.attrib['from-layer'],
                             int(edge.attrib['from-port']),
                             edge.attrib['to-layer'],
                             int(edge.attrib['to-port'])))
            elif child.tag == 'statistics':
                layers = child.findall('layer')
                for layer in layers:
                    statistics[layer.find('name').text] = {
                        'min': layer.find('min').text,
                        'max': layer.find('max').text
                    }
            elif child.tag == 'meta_data':
                for elem in child:
                    if elem.tag == 'cli_parameters':
                        for det in elem:
                            if det.tag != 'unset':
                                value = det.attrib['value']
                                if value in ['True', 'False']:
                                    value = False if value == 'False' else True
                                self.meta_data[det.tag] = value
                            else:
                                self.meta_data[det.tag] = det.attrib[
                                    'unset_cli_parameters'].split(',_')
            elif child.tag == 'quantization_parameters':
                # Section with Post Optimization Toolkit parameters
                self.meta_data['quantization_parameters'] = dict()
                for elem in child:
                    if elem.tag == 'config':
                        self.meta_data['quantization_parameters'][
                            'config'] = elem.text
                    elif elem.tag in ['version', 'cli_params']:
                        self.meta_data['quantization_parameters'][
                            elem.tag] = elem.attrib['value']

        self.graph.graph['cmd_params'] = Namespace(
            **self.meta_data)  # TODO check what we need all this attrs

        if len(statistics):
            self.graph.graph['statistics'] = statistics

        for layer in xml_layers.keys():
            self.graph.add_node(layer, **xml_layers[layer])

        xml_edges.sort(key=lambda x: x.to_layer)

        for edge in xml_edges:
            self.graph.add_edges_from([(edge.from_layer, edge.to_layer, {
                'from_port': edge.from_port,
                'to_port': edge.to_port
            })])

        # Insert data nodes between op nodes and insert data nodes with weights
        nodes = list(self.graph.nodes())
        for node in nodes:
            out_edges = Node(self.graph, node).get_outputs()
            data_nodes = {}
            for port in self.graph.node[node]['ports']:
                data = self.graph.unique_id(prefix='data_')
                self.graph.add_node(
                    data, **{
                        'kind': 'data',
                        'shape': self.graph.node[node]['ports'][port][0],
                        'value': None
                    })
                self.graph.add_edges_from([(node, data, {'out': port})])
                data_nodes.update({port: data})

            for out_node, edge_attrs in out_edges:
                self.graph.remove_edge(node, out_node)
                if edge_attrs['from_port'] in data_nodes:
                    data = data_nodes[edge_attrs['from_port']]
                else:
                    raise RuntimeError(
                        "SMTH wrong with IR! There is an edge from not existing port"
                    )
                self.graph.add_edges_from([(data, out_node, {
                    'in': edge_attrs['to_port']
                })])
Ejemplo n.º 45
0
 def get_tree(self):
     """Return the tree and the tree root."""
     tree = ElementTree.parse(self.file)
     return tree, tree.getroot()
Ejemplo n.º 46
0
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as badET
import defusedxml.ElementTree as goodET

xmlString = "<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend!</body>\n</note>"

# unsafe
tree = badET.fromstring(xmlString)
print(tree)
badET.parse("filethatdoesntexist.xml")
badET.iterparse("filethatdoesntexist.xml")
a = badET.XMLParser()

# safe
tree = goodET.fromstring(xmlString)
print(tree)
goodET.parse("filethatdoesntexist.xml")
goodET.iterparse("filethatdoesntexist.xml")
a = goodET.XMLParser()
Ejemplo n.º 47
0
def parsexml_(*args, **kwargs):
    doc = etree_.parse(*args, **kwargs)
    return doc
Ejemplo n.º 48
0
    def post(self, request, format=None):

        project_id = request.data.get("project_id")
        scanner = request.data.get("scanner")
        xml_file = request.data.get("filename")
        scan_url = request.data.get("scan_url")
        scan_id = uuid.uuid4()
        scan_status = "100"
        print xml_file
        if scanner == "zap_scan":
            date_time = datetime.datetime.now()
            scan_dump = zap_scans_db(scan_url=scan_url,
                                     scan_scanid=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     vul_status=scan_status,
                                     rescan='No')
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            zap_xml_parser.xml_parser(project_id=project_id,
                                      scan_id=scan_id,
                                      root=root_xml)
            return Response({"message": "Scan Data Uploaded"})
        elif scanner == "burp_scan":
            date_time = datetime.datetime.now()
            scan_dump = burp_scan_db(url=scan_url,
                                     scan_id=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     scan_status=scan_status)
            scan_dump.save()
            # Burp scan XML parser
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            do_xml_data = burp_plugin.burp_scans(project_id,
                                                 scan_url,
                                                 scan_id)
            do_xml_data.burp_scan_data(root_xml)
            return Response({"message": "Scan Data Uploaded"})

        elif scanner == "arachni":
            date_time = datetime.datetime.now()
            scan_dump = arachni_scan_db(url=scan_url,
                                        scan_id=scan_id,
                                        date_time=date_time,
                                        project_id=project_id,
                                        scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            arachni_xml_parser.xml_parser(project_id=project_id,
                                          scan_id=scan_id,
                                          root=root_xml)
            return Response({"message": "Scan Data Uploaded"})

        elif scanner == 'netsparker':
            date_time = datetime.datetime.now()
            scan_dump = netsparker_scan_db(
                url=scan_url,
                scan_id=scan_id,
                date_time=date_time,
                project_id=project_id,
                scan_status=scan_status
            )
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            netsparker_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)
            return Response({"message": "Scan Data Uploaded"})
        elif scanner == 'webinspect':
            date_time = datetime.datetime.now()
            scan_dump = webinspect_scan_db(
                url=scan_url,
                scan_id=scan_id,
                date_time=date_time,
                project_id=project_id,
                scan_status=scan_status
            )
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            webinspect_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)
            return Response({"message": "Scan Data Uploaded"})

        return Response({"message": "Scan Data Uploaded"})
    def ParseFileObject(self, parser_mediator, file_object):
        """Parses an Windows FileHistory Config file-like object.

        Args:
            parser_mediator (ParserMediator): mediates interactions between parsers
                and other components, such as storage and dfvfs.
            file_object (dfvfs.FileIO): file-like object.

        Raises:
            unableToParseFile: when the file cannot be parsed.
        """
        data = file_object.read(self._HEADER_READ_SIZE)
        if not data.startswith(b'<?xml'):
            raise errors.UnableToParseFile(
                'Not an Windows FileHistory Config.xml file [not XML]')

        _, _, data = data.partition(b'\n')
        if not data.startswith(b'<DataProtectionUserConfig'):
            raise errors.UnableToParseFile(
                'Not an Windows FileHistory Config.xml file [wrong XML root key]'
            )

        # the current offset of the file-like object needs to point at
        # the start of the file for ElementTree to parse the XML data correctly.
        file_object.seek(0, os.SEEK_SET)

        xml = ElementTree.parse(file_object)
        root_node = xml.getroot()
        event_data = FileHistoryConfigEventData()

        for sub_node in root_node:
            str_tag = sub_node.tag
            if str_tag == 'UserName':
                event_data.user_name = sub_node.text
                continue
            if str_tag == 'FriendlyName':
                event_data.friendly_name = sub_node.text
                continue
            if str_tag == 'PCName':
                event_data.pc_name = sub_node.text
                continue
            if str_tag == 'Library':
                for sub_lib_node in sub_node:
                    str_sub_tag = sub_lib_node.tag
                    if str_sub_tag == 'Folder':
                        event_data.library += sub_lib_node.text + ","
                        continue
                    else:
                        continue
            if str_tag == 'UserFolder':
                event_data.user_folder += sub_node.text + ","
                continue
            if 'FolderExclude' in str_tag:
                event_data.folder_exclude += sub_node.text + ","
                continue
            if str_tag == 'RetentionPolicies':
                for sub_ret_node in sub_node:
                    str_sub_tag = sub_ret_node.tag
                    if str_sub_tag == 'RetentionPolicyType':
                        if sub_ret_node.text == 'DISABLED':
                            event_data.retention_policy = "Forever Retention"
                            continue
                        if sub_ret_node.text == 'AGE LIMIT':
                            event_data.retention_policy = "Limited Retention"
                            continue
                        if sub_ret_node.text == 'NO LIMIT':
                            event_data.retention_policy = "Until Space is Needed"
                            continue
                        else:
                            event_data.retention_policy = sub_ret_node.text
                    if str_sub_tag == 'MinimumRetentionAge':
                        if sub_ret_node.text == '1':
                            event_data.minimum_retention_age = "1 Month"
                            continue
                        if sub_ret_node.text == '3':
                            event_data.minimum_retention_age = "3 Month"
                            continue
                        if sub_ret_node.text == '6':
                            event_data.minimum_retention_age = "6 Month"
                            continue
                        if sub_ret_node.text == '9':
                            event_data.minimum_retention_age = "9 Month"
                            continue
                        if sub_ret_node.text == '12':
                            event_data.minimum_retention_age = "1 Year"
                            continue
                        if sub_ret_node.text == '24':
                            event_data.minimum_retention_age = "2 Year"
                            continue
                        else:
                            event_data.minimum_retention_age = sub_ret_node.text
                            continue
            if str_tag == 'DPFrequency':
                event_data.dp_frequency = sub_node.text + ' second'
                continue
            if str_tag == 'DPStatus':
                event_data.dp_status = sub_node.text
                continue
            if str_tag == 'Target':
                for sub_target_node in sub_node:
                    str_sub_tag = sub_target_node.tag
                    if str_sub_tag == 'TargetName':
                        event_data.target_name = sub_target_node.text
                        continue
                    if str_sub_tag == 'TargetUrl':
                        event_data.target_url = sub_target_node.text
                        continue
                    if str_sub_tag == 'TargetVolumePath':
                        event_data.target_volume_path = sub_target_node.text
                        continue
                    if str_sub_tag == 'TargetDriveType':
                        event_data.target_drive_type = sub_target_node.text
                        continue
                    if str_sub_tag == 'TargetBackupStorePath':
                        event_data.target_backup_store_path = sub_target_node.text
                        continue
                    else:
                        continue
            else:
                continue

        date_time = dfdatetime_java_time.JavaTime(timestamp=0)
        event = time_events.DateTimeValuesEvent(
            date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
        parser_mediator.ProduceEventWithEventData(event, event_data)
Ejemplo n.º 50
0
    def __init__(self, filename, test):
        cxscan = ElementTree.parse(filename)
        root = cxscan.getroot()

        dupes = dict()

        for query in root.findall('Query'):
            categories = ''
            language = ''
            mitigation = 'N/A'
            impact = 'N/A'
            references = ''
            findingdetail = ''
            title = ''
            group = ''
            status = ''

            find_date = parser.parse(root.get("ScanStart"))
            name = query.get('name')
            cwe = query.get('cweId')

            if query.get('categories') is not None:
                categories = query.get('categories')

            if query.get('Language') is not None:
                language = query.get('Language')

            if query.get('group') is not None:
                group = query.get('group').replace('_', ' ')

            for result in query.findall('Result'):
                deeplink = result.get('DeepLink')

                if categories is not None:
                    findingdetail = 'Category: ' + categories + '\n'

                if language is not None:
                    findingdetail += 'Language: ' + language + '\n'

                if group is not None:
                    findingdetail += 'Group: ' + group + '\n'

                if result.get('Status') is not None:
                    findingdetail += 'Status: ' + result.get('Status') + '\n'

                findingdetail += 'Finding Link: ' + deeplink + '\n\n'

                dupe_key = categories + cwe + name + result.get(
                    'FileName') + result.get('Line')

                if dupe_key in dupes:
                    find = dupes[dupe_key]
                else:
                    dupes[dupe_key] = True

                    sev = result.get('Severity')
                    result.get('FileName')

                    for path in result.findall('Path'):
                        title = query.get('name').replace(
                            '_', ' ') + ' (' + path.get('PathId') + ')'
                        for pathnode in path.findall('PathNode'):
                            findingdetail += 'Source Object: %s\n' % (
                                pathnode.find('Name').text)

                            for codefragment in pathnode.findall(
                                    'Snippet/Line'):
                                findingdetail += 'Code: %s\n' % (
                                    codefragment.find('Code').text.strip())

                            findingdetail += '\n'

                    find = Finding(
                        title=title,
                        cwe=int(cwe),
                        test=test,
                        active=False,
                        verified=False,
                        description=findingdetail,
                        severity=sev,
                        numerical_severity=Finding.get_numerical_severity(sev),
                        mitigation=mitigation,
                        impact=impact,
                        references=references,
                        file_path=pathnode.find('FileName').text,
                        line=pathnode.find('Line').text,
                        url='N/A',
                        date=find_date,
                        static_finding=True)
                    dupes[dupe_key] = find
                    findingdetail = ''

        self.items = dupes.values()
Ejemplo n.º 51
0
    def __init__(
        self,
        train: bool = True,
        use_polygons: bool = False,
        **kwargs: Any,
    ) -> None:

        url, sha256, file_name = self.TRAIN if train else self.TEST
        super().__init__(url, file_name, sha256, True, **kwargs)
        self.train = train
        self.data: List[Tuple[str, Dict[str, Any]]] = []
        np_dtype = np.float32

        # Load xml data
        tmp_root = os.path.join(
            self.root, 'SceneTrialTrain' if self.train else 'SceneTrialTest') if sha256 else self.root
        xml_tree = ET.parse(os.path.join(tmp_root, 'words.xml'))
        xml_root = xml_tree.getroot()

        for image in xml_root:
            name, resolution, rectangles = image

            # File existence check
            if not os.path.exists(os.path.join(tmp_root, name.text)):
                raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, name.text)}")

            if use_polygons:
                # (x, y) coordinates of top left, top right, bottom right, bottom left corners
                _boxes = [
                    [
                        [float(rect.attrib['x']), float(rect.attrib['y'])],
                        [float(rect.attrib['x']) + float(rect.attrib['width']), float(rect.attrib['y'])],
                        [
                            float(rect.attrib['x']) + float(rect.attrib['width']),
                            float(rect.attrib['y']) + float(rect.attrib['height'])
                        ],
                        [float(rect.attrib['x']), float(rect.attrib['y']) + float(rect.attrib['height'])],
                    ]
                    for rect in rectangles
                ]
            else:
                # x_min, y_min, x_max, y_max
                _boxes = [
                    [float(rect.attrib['x']), float(rect.attrib['y']),  # type: ignore[list-item]
                     float(rect.attrib['x']) + float(rect.attrib['width']),  # type: ignore[list-item]
                     float(rect.attrib['y']) + float(rect.attrib['height'])]  # type: ignore[list-item]
                    for rect in rectangles
                ]

            # filter images without boxes
            if len(_boxes) > 0:
                # Convert them to relative
                w, h = int(resolution.attrib['x']), int(resolution.attrib['y'])
                boxes = np.asarray(_boxes, dtype=np_dtype)
                if use_polygons:
                    boxes[:, :, 0] /= w
                    boxes[:, :, 1] /= h
                else:
                    boxes[:, [0, 2]] /= w
                    boxes[:, [1, 3]] /= h

                # Get the labels
                labels = [lab.text for rect in rectangles for lab in rect if lab.text]

                self.data.append((name.text, dict(boxes=boxes, labels=labels)))

        self.root = tmp_root
Ejemplo n.º 52
0
import os
from glob import iglob
from io import BytesIO

from xml.etree.ElementTree import ElementTree as XMLTree

from defusedxml import ElementTree

for xml_filename in iglob("annotation/Annotation/**/*", recursive=True):
    if os.path.isdir(xml_filename):
        continue
    xml: XMLTree = ElementTree.parse(xml_filename)
    path, basename = os.path.split(xml_filename)
    xml.find("folder").text = ""
    xml.find("filename").text = f"{basename}.jpg"
    for object in xml.findall("object"):
        object.find("pose").text = "Unspecified"
    root = xml.getroot()
    root.remove(root.find("source"))
    root.remove(root.find("segment"))
    new_filename = os.path.join("voc", f"{basename}.xml")
    buffer = BytesIO()
    xml.write(buffer, encoding="utf-8", short_empty_elements=False)
    buffer.seek(0)
    xml_string = buffer.read().replace(b'\n', b'').replace(b'\t', b'')
    with open(new_filename, "wb") as file:
        file.write(xml_string)
Ejemplo n.º 53
0
 def get_findings(self, xml_output, test):
     tree = ElementTree.parse(xml_output)
     vuln_definitions = self.get_vuln_definitions(tree)
     return self.get_items(tree, vuln_definitions, test)
Ejemplo n.º 54
0
discord_logger.addHandler(fh)
discord_logger.addHandler(ch)

db = pymysql.connect(host=config["MySQL"]["host"],
                     user=config["MySQL"]["user"],
                     password=config["MySQL"]["password"],
                     db=config["MySQL"]["database"],
                     charset='utf8mb4',
                     cursorclass=pymysql.cursors.DictCursor)

reddit = praw.Reddit(client_id=config["Reddit"]["client_id"],
                     client_secret=config["Reddit"]["client_secret"],
                     user_agent=config["Reddit"]["user_agent"])

client = discord.Client()
tree = defusedetree.parse(xmlfile)
root = tree.getroot()

    
#  Sending a random message after a user has entered a command. 

#  This function is triggered whenever someone uses the command !reddit or !rule34.
async def send_random_message(channel): 
    global waitingToSend
    try:
    	waitingToSend
    except:
        waitingToSend = False
    if not waitingToSend: 
        #  If the function has already been run within the past 30 minutes (or less), it will not run again.
        waitingToSend = True
Ejemplo n.º 55
0
    def __init__(self, filename, test):
        cxscan = ElementTree.parse(filename)
        root = cxscan.getroot()

        dupes = dict()

        for query in root.findall('Query'):
            categories = ''
            language = ''
            mitigation = ''
            impact = ''
            references = ''
            findingdetail = ''
            title = ''
            group = ''
            status = ''

            find_date = root.get("ScanStart")
            name = query.get('name')
            cwe = query.get('cweId')

            if query.get('categories') is not None:
                categories = query.get('categories')

            if query.get('Language') is not None:
                language = query.get('Language')

            if query.get('group') is not None:
                group = query.get('group').replace('_', ' ')

            for result in query.findall('Result'):
                deeplink = result.get('DeepLink')

                if categories is not None:
                    findingdetail = 'Category: ' +  categories + '\n'

                if language is not None:
                    findingdetail += 'Language: ' +  language + '\n'

                if group is not None:
                    findingdetail += 'Group: ' +  group + '\n'

                if result.get('Status') is not None:
                    findingdetail += 'Status: ' +  result.get('Status') + '\n'

                findingdetail += 'Finding Link: ' +  deeplink + '\n\n'

                dupe_key = categories + cwe + name + result.get('FileName') + result.get('Line')

                if dupe_key in dupes:
                    find = dupes[dupe_key]
                else:
                    dupes[dupe_key] = True

                    sev = result.get('Severity')
                    result.get('FileName')

                    for path in result.findall('Path'):
                        title = query.get('name').replace('_', ' ') + ' (' + path.get('PathId') + ')'
                        for pathnode in path.findall('PathNode'):
                            findingdetail += 'Source Object: ' + pathnode.find('Name').text + '\n'
                            findingdetail += 'Filename: ' + pathnode.find('FileName').text + '\n'
                            findingdetail += 'Line Number: ' + pathnode.find('Line').text + '\n'
                            for codefragment in pathnode.findall('Snippet/Line'):
                                findingdetail += 'Code: ' + codefragment.find('Code').text.strip() + '\n'

                            findingdetail += '\n'

                    find = Finding(title=title,
                                   cwe=int(cwe),
                                   test=test,
                                   active=False,
                                   verified=False,
                                   description=findingdetail,
                                   severity=sev,
                                   numerical_severity=Finding.get_numerical_severity(sev),
                                   mitigation=mitigation,
                                   impact=impact,
                                   references=references,
                                   url='N/A',
                                   date=find_date)
                    dupes[dupe_key] = find
                    findingdetail = ''

        self.items = dupes.values()
Ejemplo n.º 56
0
def write_swc(nmls_path, radius=0):
    node_radius = radius

    # store paths to nmls
    nmls = []
    if os.path.isdir(nmls_path):
        nmls = glob.glob(os.path.normpath(nmls_path) + '/*.nml')
    else:
        nmls.append(nmls_path)

    # create paths for resulting swcs
    swcs = []
    for nml in nmls:
        swcs.append(nml[:-4] + '.swc')
    for swc in swcs:
        if os.path.exists(swc):
            os.remove(swc)

    print('\nReading from .nml files...')
    nml_count = -1
    for nml in nmls:
        nml_count += 1
        tree = ET.parse(nml)
        thing_list = tree.findall('thing')
        for thing in thing_list:
            nodes = thing.find('nodes')
            edges = thing.find('edges')

            child_parent = []
            for edge in edges.findall('edge'):
                child = edge.get('target')
                parent = edge.get('source')
                child_parent.append((child, parent))

            child_list = [pair[0] for pair in child_parent]
            node_id, node_x, node_y, node_z, node_parent = ['']*5

            for node in nodes.findall('node'):
                node_id = node.get('id')
                node_x = node.get('x')
                node_y = node.get('y')
                node_z = float(node.get('z')) * 5.4545
                if node_radius == 0:
                    node_radius = node.get('radius')

                if node_id in child_list:
                    node_parent = child_parent[child_list.index(node_id)][1]
                else:
                    node_parent = -1

                swc = open(swcs[nml_count], 'a')
                swc.write(str(node_id) + ' 3 ' + str(node_x) + ' ' + str(node_y) + ' ' + str(node_z) + ' ' + str(node_radius) + ' ' + str(node_parent) + '\n')
                swc.close()

    # correct indexing: enforce consecutive natural numbering
    print('Writing final .swc files...')
    for swc in swcs:
        lines_to_write = []
        index_map = {'-1':-1}
        f = open(swc, 'r')
        lines = f.readlines()
        f.close()
        n = 1
        for line in lines:
            index_map[line.split(' ')[0]] = n
            n += 1

        for line in lines:
            line = str(index_map[line.split(' ')[0]]) + line[len(line.split(' ')[0]):]
            lines_to_write.append(line)

        n = 0
        for line in lines_to_write:
            line = line[:-len(line.split(' ')[6])] + str(index_map[(line.split(' ')[6])[:-1]])
            lines_to_write[n] = line
            n += 1
            if node_id in child_list:
                node_parent = child_parent[child_list.index(node_id)][1]
            else:
                node_parent = -1

        f = open(swc, 'w')
        for line in lines_to_write:
            f.write(line + '\n')
        f.close()
        print(swc)
Ejemplo n.º 57
0
    def __init__(self, filename, test):

        if "VulnerabilitiesSummary.xml" not in str(filename):
            raise NamespaceErr('Please ensure that you are uploading AppSpider\'s VulnerabilitiesSummary.xml file.'
                               'At this time it is the only file that is consumable by DefectDojo.')

        vscan = ElementTree.parse(filename)
        root = vscan.getroot()

        if "VulnSummary" not in str(root.tag):
            raise NamespaceErr('Please ensure that you are uploading AppSpider\'s VulnerabilitiesSummary.xml file.'
                               'At this time it is the only file that is consumable by DefectDojo.')

        dupes = dict()

        for finding in root.iter('Vuln'):

            severity = finding.find("AttackScore").text
            if severity == "0-Safe":
                severity = "Info"
            elif severity == "1-Informational":
                severity = "Low"
            elif severity == "2-Low":
                severity = "Medium"
            elif severity == "3-Medium":
                severity = "High"
            elif severity == "4-High":
                severity = "Critical"
            else:
                severity = "Info"

            title = finding.find("VulnType").text
            description = finding.find("Description").text
            mitigation = finding.find("Recommendation").text
            vuln_url = finding.find("VulnUrl").text

            parts = urlparse.urlparse(vuln_url)

            cwe = int(finding.find("CweId").text)

            dupe_key = severity + title
            unsaved_endpoints = list()
            unsaved_req_resp = list()

            if title is None:
                title = ''
            if description is None:
                description = ''
            if mitigation is None:
                mitigation = ''

            if dupe_key in dupes:
                find = dupes[dupe_key]

                unsaved_endpoints.append(find.unsaved_endpoints)
                unsaved_req_resp.append(find.unsaved_req_resp)

            else:
                find = Finding(title=title,
                               test=test,
                               active=False,
                               verified=False,
                               description=html2text.html2text(description),
                               severity=severity,
                               numerical_severity=Finding.get_numerical_severity(severity),
                               mitigation=html2text.html2text(mitigation),
                               impact="N/A",
                               references=None,
                               cwe=cwe)
                find.unsaved_endpoints = unsaved_endpoints
                find.unsaved_req_resp = unsaved_req_resp
                dupes[dupe_key] = find

                for attack in finding.iter("AttackRequest"):
                    req = attack.find("Request").text
                    resp = attack.find("Response").text

                    find.unsaved_req_resp.append({"req": req, "resp": resp})

                find.unsaved_endpoints.append(Endpoint(protocol=parts.scheme,
                                                       host=parts.netloc,
                                                       path=parts.path,
                                                       query=parts.query,
                                                       fragment=parts.fragment,
                                                       product=test.engagement.product))

        self.items = dupes.values()
Ejemplo n.º 58
0
def write_swc(nmls_path, radius=0):
    node_radius = radius

    # store paths to nmls
    nmls = []
    if os.path.isdir(nmls_path):
        nmls = glob.glob(os.path.normpath(nmls_path) + '/*.nml')
    else:
        nmls.append(nmls_path)

    # create paths for resulting swcs
    swcs = []
    for nml in nmls:
        swcs.append(nml[:-4] + '.swc')
    for swc in swcs:
        if os.path.exists(swc):
            os.remove(swc)

    print('\nReading from .nml files...')
    nml_count = -1
    for nml in nmls:
        nml_count += 1
        tree = ET.parse(nml)
        thing_list = tree.findall('thing')
        for thing in thing_list:
            nodes = thing.find('nodes')
            edges = thing.find('edges')
            comments = thing.find('comments')

            child_parent = []
            for edge in edges.findall('edge'):
                child = edge.get('target')
                parent = edge.get('source')
                child_parent.append((child, parent))

            child_list = [pair[0] for pair in child_parent]
            node_id, node_x, node_y, node_z, node_parent = ['']*5

            # parse comments to give special type to some swc nodes later
            id_to_type = {}
            for comment in comments.findall('comment'):
                comment_text = comment.get('content')
                if re.search('[Ii][Nn][Pp][Uu][Tt]', comment_text):
                    id_to_type[comment.get('node')] = 7
                elif re.search('[Ll][Oo][Ss][Tt]', comment_text):
                    id_to_type[comment.get('node')] = 6
                elif re.search('[Mm][Yy][Ee][Ll][Ii][Nn]', comment_text):
                    id_to_type[comment.get('node')] = 0

            for node in nodes.findall('node'):
                node_id = node.get('id')
                node_x = float(node.get('x'))
                node_y = float(node.get('y'))
                node_z = float(node.get('z')) * 5.4545
                if node_radius == 0:
                    node_radius = node.get('radius')

                if node_id in child_list:
                    node_parent = child_parent[child_list.index(node_id)][1]
                else:
                    node_parent = -1

                swc = open(swcs[nml_count], 'a')
                if node_id in id_to_type:
                    node_type = id_to_type[node_id]
                else:
                    node_type = 3
                swc.write(str(node_id) + ' ' + str(node_type) + ' ' + str(node_x) + ' ' + str(node_y) + ' ' + str(node_z) + ' ' + str(node_radius) + ' ' + str(node_parent) + '\n')
                swc.close()

    # correct indexing: enforce consecutive natural numbering
    print('Writing final .swc files...')
    for swc in swcs:
        lines_to_write = correct(swc, child_list, child_parent, node_id)
        f = open(swc, 'w')
        for line in lines_to_write:
            f.write(line + '\n')
        f.close()
        print(swc)
Ejemplo n.º 59
0
def get_rhsa_and_rhba_lists_from_file(bz2_file):
    # Init
    tar = tarfile.open(mode='r:bz2', fileobj=BytesIO(bz2_file))
    rhsa_list = []
    rhsa_id_list = []
    rhba_list = []
    rhba_id_list = []
    rhsa_info_list = []
    rhsa_info_id_list = []
    rhba_info_list = []
    rhba_info_id_list = []
    for xml_file in tar.getmembers():
        if xml_file.size > 0:
            xml_file_content = tar.extractfile(xml_file.name)
            root = ET.parse(xml_file_content).getroot().find(
                '{http://oval.mitre.org/XMLSchema/oval-definitions-5}definitions'
            )
            for entry in root.findall(
                    '{http://oval.mitre.org/XMLSchema/oval-definitions-5}definition'
            ):
                # Init
                metadata = entry.find(
                    '{http://oval.mitre.org/XMLSchema/oval-definitions-5}metadata'
                )
                detail_info = {}

                # Get IDs
                rhsa_id = None
                rhba_id = None
                cves = []
                for reference in metadata.findall(
                        "{http://oval.mitre.org/XMLSchema/oval-definitions-5}reference"
                ):
                    # Get RHSA (Red Hat Security Advisory)
                    if 'RHSA' in reference.attrib['ref_id']:
                        rhsa_id = reference.attrib['ref_id']
                        if "-" in rhsa_id[5:]:
                            rhsa_id = rhsa_id[:rhsa_id.index("-", 5)]
                    # RHBA (Red Hat Bug Advisory)
                    if 'RHBA' in reference.attrib['ref_id']:
                        rhba_id = reference.attrib['ref_id']
                        if "-" in rhba_id[5:]:
                            rhba_id = rhba_id[:rhba_id.index("-", 5)]
                    # Get related CVEs
                    if reference.attrib['source'] == 'CVE':
                        cves.append(reference.attrib['ref_id'])

                detail_info['cve'] = cves

                # Get title and description
                detail_info['title'] = metadata.findtext(
                    '{http://oval.mitre.org/XMLSchema/oval-definitions-5}title'
                )
                detail_info['description'] = metadata.findtext(
                    '{http://oval.mitre.org/XMLSchema/oval-definitions-5}description'
                )

                # Get severity
                detail_info['severity'] = metadata.find("{http://oval.mitre.org/XMLSchema/oval-definitions-5}advisory") \
                                                    .find("{http://oval.mitre.org/XMLSchema/oval-definitions-5}severity").text
                # Append detail info
                if rhsa_id is not None:
                    detail_info['rhsa_id'] = rhsa_id
                    if rhsa_id not in rhsa_info_id_list:
                        rhsa_info_id_list.append(rhsa_id)
                        rhsa_info_list.append(detail_info)
                if rhba_id is not None:
                    detail_info['rhba_id'] = rhba_id
                    if rhba_id not in rhba_info_id_list:
                        rhba_info_id_list.append(rhba_id)
                        rhba_info_list.append(detail_info)

                # Get vulnerable products
                affected_cpe_list = metadata.find("{http://oval.mitre.org/XMLSchema/oval-definitions-5}advisory") \
                                            .find("{http://oval.mitre.org/XMLSchema/oval-definitions-5}affected_cpe_list")
                for cpe in affected_cpe_list:
                    if cpe.text is not None:
                        info_item = {}
                        splitted_product = cpe.text.split(":")
                        info_item['vendor'] = splitted_product[2]
                        info_item['product'] = splitted_product[3]
                        try:
                            info_item['version'] = splitted_product[4]
                        except IndexError:
                            info_item['version'] = '-'

                        tmp = '#' + info_item['vendor'] + '#' + info_item[
                            'product'] + '#' + info_item['version']
                        if rhsa_id is not None:
                            info_item['rhsa_id'] = rhsa_id
                            tmp = rhsa_id + tmp
                            if tmp not in rhsa_id_list:
                                rhsa_id_list.append(tmp)
                                rhsa_list.append(info_item)
                        if rhba_id is not None:
                            info_item['rhba_id'] = rhba_id
                            tmp = rhba_id + tmp
                            if tmp not in rhba_id_list:
                                rhba_id_list.append(tmp)
                                rhba_list.append(info_item)

    # Return
    return rhsa_list, rhba_list, rhsa_info_list, rhba_info_list
Ejemplo n.º 60
0
def OpenVAS_xml_upload(request):
    """
    OpenVAS XML file upload.
    :param request:
    :return:
    """
    all_project = project_db.objects.all()
    if request.method == "POST":
        project_id = request.POST.get("project_id")
        scanner = request.POST.get("scanner")
        xml_file = request.FILES['xmlfile']
        scan_ip = request.POST.get("scan_url")
        scan_id = uuid.uuid4()
        scan_status = "100"
        if scanner == "openvas":
            date_time = datetime.now()
            scan_dump = scan_save_db(scan_ip=scan_ip,
                                     scan_id=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            OpenVas_Parser.xml_parser(project_id=project_id,
                                      scan_id=scan_id,
                                      root=root_xml)
            return HttpResponseRedirect("/networkscanners/")
        elif scanner == "nessus":
            date_time = datetime.now()
            scan_dump = nessus_scan_db(scan_ip=scan_ip,
                                       scan_id=scan_id,
                                       date_time=date_time,
                                       project_id=project_id,
                                       scan_status=scan_status)
            scan_dump.save()
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            Nessus_Parser.nessus_parser(
                root=root_xml,
                scan_id=scan_id,
                project_id=project_id,
            )
            return HttpResponseRedirect("/networkscanners/nessus_scan")
        elif scanner == "nmap":
            # date_time = datetime.now()
            # scan_dump = nessus_scan_db(
            #     scan_ip=scan_ip,
            #     scan_id=scan_id,
            #     date_time=date_time,
            #     project_id=project_id,
            #     scan_status=scan_status
            # )
            # scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            nmap_parser.xml_parser(
                root=root_xml,
                scan_id=scan_id,
                project_id=project_id,
            )
            return HttpResponseRedirect("/tools/nmap_scan/")

    return render(request, 'net_upload_xml.html', {'all_project': all_project})