def doSomethingWithResult(response): if response is None: sys.stderr.write("KO\n") return "KO" else: tree = ET.fromstring(response.text.encode('utf-8')) sys.stderr.write("XML response: "+str(response.text.encode('utf-8'))+"\n") status = "Free" # arrgh, namespaces!! elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType") for elem in elems: if status == "Free" : status=elem.text sys.stderr.write("Change status to: "+str(status)+"\n") tree2=ET.fromstring(response.request.body.encode('utf-8')) elems=tree2.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}Address") for e in elems: room=e.text elems=tree.findall(".//faultcode") if elems: print("Error occured") status= "N/A" sys.stderr.write(str(datetime.datetime.now().isoformat())+": Status for room: "+str(rooms[room])+" => "+status+"\n") result.append((status, rooms[room], room)) return "OK"
def handle_SOAP(): upload_key = current_app.config.get('UPLOAD_KEY') soapaction = request.headers.get('SOAPAction') current_app.logger.debug("Received SOAPAction: {}".format(soapaction)) if soapaction == '"urn:StartSession"': root = etree.fromstring(request.data) transfermode = root.find(".//transfermode").text transfermodetimestamp = root.find(".//transfermodetimestamp").text cnonce = root.find(".//cnonce").text macaddress = root.find(".//macaddress").text credential = create_credential(macaddress, cnonce, upload_key) # EyeFi card doesn't accept cookies, so set a global var instead global snonce snonce = make_snonce() return render_template('start_session.xml', transfermode=transfermode, transfermodetimestamp=transfermodetimestamp, credential=credential, snonce=snonce) elif soapaction == '"urn:GetPhotoStatus"': root = etree.fromstring(request.data) macaddress = root.find(".//macaddress").text credential = root.find(".//credential").text # Unused, here for future reference # filename = root.find(".//filename").text # filesize = root.find(".//filesize").text # filesignature = root.find(".//filesignature").text # flags = root.find(".//flags").text expected_cred = create_credential(macaddress, snonce, upload_key, from_eyefi=True) current_app.logger.debug("Credential: {}\n" "Expected: {}".format(credential, expected_cred)) if credential == expected_cred: return render_template('get_photo_status.xml', fileid=1, offset=0) else: return abort(403) elif soapaction == '"urn:MarkLastPhotoInRoll"': root = etree.fromstring(request.data) # Unused, here for future reference # macaddress = root.find(".//macaddress").text # mergedelta = root.find(".//mergedelta").text return render_template("mark_last.xml")
def parse_junit(xml, filename): """Generate failed tests as a series of (name, duration, text, filename) tuples.""" try: tree = ET.fromstring(xml) except ET.ParseError, e: logging.exception('parse_junit failed for %s', filename) try: tree = ET.fromstring(re.sub(r'[\x00\x80-\xFF]+', '?', xml)) except ET.ParseError, e: yield 'Gubernator Internal Fatal XML Parse Error', 0.0, str(e), filename return
def parse_xml(self, xml, filename): if not xml: return # can't extract results from nothing! try: tree = ET.fromstring(xml) except ET.ParseError, e: logging.exception('parse_junit failed for %s', filename) try: tree = ET.fromstring(re.sub(r'[\x00\x80-\xFF]+', '?', xml)) except ET.ParseError, e: self.failed.append( ('Gubernator Internal Fatal XML Parse Error', 0.0, str(e), filename, '')) return
def import_groups(service, username, password): r = requests.get('https://%s.%s/companies.xml' % (service, BASE_URL), auth=(username, password)) if r.status_code != 200: return r.status_code xml = ElementTree.fromstring(r.text) count = 0; created = 0 for company in xml.findall('company'): bc_id = company.find('id').text try: bc_company = BCCompany.objects.get(bc_id=bc_id) except BCCompany.DoesNotExist: bc_company = BCCompany(bc_id=bc_id) created += 1 bc_company.name = company.find('name').text bc_company.address = company.find('address-one').text bc_company.address1 = company.find('address-two').text bc_company.country = 'US' # Not Correct bc_company.city = company.find('city').text bc_company.state = 'HI' # Not Correct #company.find('state').text bc_company.locale = company.find('locale').text bc_company.fax = company.find('phone-number-fax').text bc_company.phone = company.find('phone-number-office').text bc_company.web = company.find('web-address').text bc_company.timezone = settings.TIME_ZONE bc_company.save() count += 1 import_users.delay(service, username, password) return count, created
def parse(self, raw_data, filename=None): """ Parse the specified replay XML Args: raw_data: Raw XML to be parsed Returns: None Raises: AssertionError: If the XML file has more than top-level children (Expected: pov and doctype) AssertionError: If the first child is not a Doctype instance AssertionError: If the doctype does not specify the replay.dtd AssertionError: If the second child is not named 'pov' AssertionError: If the 'pov' element has more than two elements AssertionError: If the 'pov' element does not contain a 'cbid' element AssertionError: If the 'cbid' element value is blank """ self.filename = filename tree = ET.fromstring(raw_data) assert tree.tag == 'pov' assert len(tree) in [2, 3] assert tree[0].tag == 'cbid' assert len(tree[0].tag) > 0 self.name = tree[0].text assert tree[1].tag in ['seed', 'replay'] seed_tree = None replay_tree = None if tree[1].tag == 'seed': seed_tree = tree[1] replay_tree = tree[2] else: seed_tree = None replay_tree = tree[1] if seed_tree is not None: assert len(seed_tree.tag) > 0 seed = seed_tree.text assert len(seed) == 96 if self.seed is not None: print "# Seed is set by XML and command line, using XML seed" self.seed = seed.decode('hex') parse_fields = { 'decl': self.parse_decl, 'read': self.parse_read, 'write': self.parse_write, 'delay': self.parse_delay, } for replay_element in replay_tree: assert replay_element.tag in parse_fields parse_fields[replay_element.tag](replay_element)
def parse_notice(path): with open(path, 'r') as f: m = re.search(r'(?:<\?xml.*>\s+)?<infringement.*</infringement>', f.read(), re.IGNORECASE|re.DOTALL) if not m: raise RuntimeError('Couldn\'t find <infringement> tag in DMCA notice') try: xml = ElementTree.fromstring(m.group()) except ElementTree.ParseError as e: log.error(e) raise RuntimeError('Could not parse DMCA notice XML') from e ns = {'acns': 'http://www.acns.net/ACNS'} try: ts = xml.findall('./acns:Source/acns:TimeStamp', ns)[0].text ip = xml.findall('./acns:Source/acns:IP_Address', ns)[0].text port = int(xml.findall('./acns:Source/acns:Port', ns)[0].text) except (IndexError, ValueError) as e: log.error(e) raise RuntimeError('Error parsing DMCA notice') from e try: ts = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ') ts.replace(tzinfo=pytz.utc) except ValueError as e: raise ValueError('Could not parse timestamp: %s' % ts) from e return (ts, ip, port)
def clear_metadata(instream, outstream): try: with zipfile.ZipFile(instream) as inzip: opf_path = _get_path_of_content_opf(inzip) opf_content = _read_content_opf(inzip, opf_path) removed_a_node = False try: root = defused_etree.fromstring(opf_content) for main_element in root: logger.debug("main el %s " % main_element.tag) if re.match(".*metadata$", main_element.tag): logger.debug("Found metadata tag, cleaning") while list(main_element): # do not remove using a for loop # - this will skip elements in python 2.7.5! node_to_remove = list(main_element)[0] logger.debug("Removing node %s" % node_to_remove.tag) main_element.remove(node_to_remove) removed_a_node = True except defused_etree.ParseError, e: logger.error("Caught a parse error while trying to clear epub metadata: %s" % repr(e)) raise ValueError("Invalid EPUB syntax") if removed_a_node: logger.debug("Writing a new epub file") with zipfile.ZipFile(outstream, 'w') as outzip: try: _copy_zip_contents(inzip, outzip, [opf_path]) except zipfile.BadZipfile, e: raise ValueError("Caught a BadZipFile exception: %s" % repr(e)) new_content = ElementTree.tostring(root) _write_content_opf(outzip, opf_path, new_content)
def check_feed(cls): """ Return a generator over the latest uploads to CPAN by querying an RSS feed. """ url = "https://metacpan.org/feed/recent" try: response = cls.call_url(url) except Exception: # pragma: no cover raise AnityaPluginException("Could not contact %s" % url) try: root = ET.fromstring(response.text) except ET.ParseError: raise AnityaPluginException("No XML returned by %s" % url) for item in root.iter(tag="{http://purl.org/rss/1.0/}item"): title = item.find("{http://purl.org/rss/1.0/}title") try: name, version = title.text.rsplit("-", 1) except ValueError: _log.info("Unable to parse CPAN package %s into a name and version") homepage = "https://metacpan.org/release/%s/" % name yield name, homepage, cls.name, version
def findRooms(prefix=None,anywhere=False): global rooms rooms={} xml_template = open("resolvenames_template.xml", "r").read() xml = Template(xml_template) data = unicode(xml.substitute(name=prefix)) headers = {} headers["Content-type"] = "text/xml; charset=utf-8" response=requests.post(url,headers = headers, data= data, auth= HttpNtlmAuth(user,password)) tree = ET.fromstring(response.text.encode('utf-8')) elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}Resolution") for elem in elems: email = elem.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}EmailAddress") name = elem.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}DisplayName") sys.stderr.write("Perhaps found "+str(name[0].text)+" <"+str(email[0].text)+">\n") if prefix is not None: if len(email) > 0 and len(name) > 0 : if email[0].text.startswith("conf_") or email[0].text.startswith("CONF_"): if name[0].text.startswith(prefix.upper()) or anywhere: sys.stderr.write("Validate "+str(name[0].text)+" <"+str(email[0].text)+">\n") rooms[email[0].text] = name[0].text else: sys.stderr.write("Not validated due to not starting with prefix: "+str(prefix.upper())+"\n") else: sys.stderr.write("Not validated due to not starting with conf_\n") else: sys.stderr.write("Not validated due to null length\n") else: sys.stderr.write("Not validated due to prefix is none\n") return rooms
def get(id_): text = requests.get(URL % id_, stream=True).raw.read() text = text.decode(utils.web.getEncoding(text) or 'utf8') root = ElementTree.fromstring(text) assert root.tag == 'root', root resto = root[0] assert resto.tag == 'resto', resto res = [] for menu in resto: assert menu.tag == 'menu', menu date = menu.attrib['date'] parsed_date = datetime.datetime.strptime(date, '%Y-%m-%d') day_limit = datetime.datetime.now() - datetime.timedelta(hours=14) if parsed_date < day_limit: continue midi = menu[0] assert midi.tag == 'midi', midi interesting = INTERESTING.get(id_, None) if interesting: meals = [x.text for x in midi if x.attrib['nom'] in interesting] else: meals = [x.text for x in midi if not any(y in x.text.lower() for y in BLACKLIST)] meals = [x.strip().replace('\n', ' ; ').strip() for x in meals if x.strip()] res.append((date, meals)) return res
def findPlayer(saveFileLocation,read_data=False): if read_data == False: root = ET.parse(saveFileLocation).getroot() else: root = ET.fromstring(saveFileLocation) player = root.find("player") return player
def postSimpleMessage(): postdata = request.body.read().decode("utf-8") message = "<Result><StatusCode>FAILED</StatusCode><Text>Authentication failed.</Text></Result>" tree = xmlParser.fromstring(postdata) userNameFromRequest, passwordFromRequest = auth.extractAuth(tree) if (auth.handleCommunityAuth(userNameFromRequest, passwordFromRequest)): message = "<Result><StatusCode>OK</StatusCode><Text></Text></Result>" handleAlerts(tree, True) elif auth.authenticate(userNameFromRequest, passwordFromRequest, mongohost, mongoport): message = "<Result><StatusCode>OK</StatusCode><Text></Text></Result>" handleAlerts(tree, False) else: print("Authentication failed....") response = {} headers = {'Content-type': 'application/html'} response['status'] = "Success" raise HTTPResponse(message, status=200, headers=headers)
def test_parseissuexml_with_issue_has_finding(self): single_finding = """<?xml version="1.0" encoding="utf-8"?> <!--XML Export of VCG Results for directory: C:\Projects\WebGoat.Net. Scanned for C# security issues.--> <CodeIssueCollection> <CodeIssue> <Priority>6</Priority> <Severity>Suspicious Comment</Severity> <Title>Comment Indicates Potentially Unfinished Code</Title> <Description>The comment includes some wording which indicates that the developer regards it as unfinished or does not trust it to work correctly.</Description> <FileName>Findings.xml</FileName> <Line>21</Line> <CodeLine>TODO: Check the Code</CodeLine> <Checked>False</Checked> <CheckColour>LawnGreen</CheckColour> </CodeIssue> </CodeIssueCollection>""" vcgscan = ElementTree.fromstring(single_finding) finding = self.parser.parse_issue(vcgscan.findall('CodeIssue')[0], Test()) self.assertEqual('Info', finding.severity) self.assertEqual('S4', finding.numerical_severity) self.assertEqual('Comment Indicates Potentially Unfinished Code', finding.title)
def autosetup_ihc_products(hass: HomeAssistantType, config, ihc_controller, controller_id): """Auto setup of IHC products from the IHC project file.""" from defusedxml import ElementTree project_xml = ihc_controller.get_project() if not project_xml: _LOGGER.error("Unable to read project from IHC controller") return False project = ElementTree.fromstring(project_xml) # If an auto setup file exist in the configuration it will override yaml_path = hass.config.path(AUTO_SETUP_YAML) if not os.path.isfile(yaml_path): yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML) yaml = load_yaml_config_file(yaml_path) try: auto_setup_conf = AUTO_SETUP_SCHEMA(yaml) except vol.Invalid as exception: _LOGGER.error("Invalid IHC auto setup data: %s", exception) return False groups = project.findall('.//group') for component in IHC_PLATFORMS: component_setup = auto_setup_conf[component] discovery_info = get_discovery_info( component_setup, groups, controller_id) if discovery_info: discovery.load_platform( hass, component, DOMAIN, discovery_info, config) return True
def update_from_xml_str(oa_block, xml, **kwargs): """ Update the OpenAssessment XBlock's content from an XML string definition. Parses the string using a library that avoids some known security vulnerabilities in etree. Args: oa_block (OpenAssessmentBlock): The open assessment block to update. xml (unicode): The XML definition of the XBlock's content. Kwargs: same as `update_from_xml` Returns: OpenAssessmentBlock Raises: UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated. InvalidRubricError: The rubric was not semantically valid. InvalidAssessmentsError: The assessments are not semantically valid. """ # Parse the XML content definition # Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree: # http://docs.python.org/2/library/xml.html#xml-vulnerabilities try: root = safe_etree.fromstring(xml.encode('utf-8')) except (ValueError, safe_etree.ParseError): raise UpdateFromXmlError(_("An error occurred while parsing the XML content.")) return update_from_xml(oa_block, root, **kwargs)
def is_available(room_email,start_time,end_time): xml_template = open("getavailibility_template.xml", "r").read() xml = Template(xml_template) headers = {} headers["Content-type"] = "text/xml; charset=utf-8" data=unicode(xml.substitute(email=room_email,starttime=start_time,endtime=end_time)).strip() status = "KO" response=requests.post(url,headers = headers, data= data, auth= HttpNtlmAuth(user,password)) if response is not None : status = "Free" tree = ET.fromstring(response.text.encode('utf-8')) # arrgh, namespaces!! elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType") for elem in elems: status=elem.text elems=tree.findall(".//faultcode") if elems: sys.stderr.write("Error occured\n") sys.stderr.write("tree: "+str(tree)+"\n") sys.stderr.write("response: "+response.text.encode('utf-8')+"\n") status= "N/A" sys.stderr.write("Room status: "+str(status)+"\n") return (status == "Free")
def get_bugzilla_bug(bugzilla_url, bug_id): ''' Read bug XML, return all fields and values in a dictionary. ''' bug_xml = _fetch_bug_content(bugzilla_url, bug_id) tree = ElementTree.fromstring(bug_xml) bug_fields = { "long_desc": [], "attachment": [], "cc": [], } for bug in tree: for field in bug: if field.tag in ("long_desc", "attachment"): new = {} for data in field: new[data.tag] = data.text bug_fields[field.tag].append(new) elif field.tag == "cc": bug_fields[field.tag].append(field.text) else: bug_fields[field.tag] = field.text return bug_fields
def stations(): db_stations_cache = g.mongo.db.caches.find_one({"_id": "stations"}) bulk_op = g.mongo.db.stations.initialize_unordered_bulk_op() bulk_run = False if not db_stations_cache or db_stations_cache["cached_until"] < time.time(): xml_stations_response = requests.get( "https://api.eveonline.com/eve/ConquerableStationList.xml.aspx", headers=xml_headers ) # XML Parse try: xml_stations_tree = ElementTree.fromstring(xml_stations_response.text) except ElementTree.ParseError: print(xml_stations_response.text) return None # Store in database xml_time_pattern = "%Y-%m-%d %H:%M:%S" g.mongo.db.caches.update( {"_id": "stations"}, {"cached_until": int(calendar.timegm(time.strptime(xml_stations_tree[2].text, xml_time_pattern)))}, upsert=True, ) for station in xml_stations_tree[1][0]: bulk_run = True bulk_op.find({"_id": int(station.attrib["stationID"])}).upsert().update( {"$set": {"name": station.attrib["stationName"]}} ) if bulk_run: bulk_op.execute()
def get_print_list(username, chart, period, api_key): '''return LastFM XML chart as a simple list''' url = 'http://ws.audioscrobbler.com/2.0/?method=user.gettop%s&user=%s&period=%s&api_key=%s' % (chart, username, period, api_key) print url raw_xml = urllib2.urlopen(url) print_list = [] charts = ElementTree.fromstring(raw_xml.read()) if chart == 'artists': for artist in charts.findall('topartists/artist'): print_list.append(artist.find('name').text) elif chart == 'albums': for album in charts.findall('topalbums/album'): for artist in album.findall('artist'): print_list.append("%s|%s" % (artist.find('name').text, album.find('name').text)) elif chart == 'tracks': for track in charts.findall('toptracks/track'): for artist in track.findall('artist'): print_list.append("%s|%s" % (artist.find('name').text, track.find('name').text)) else: raise CLIError(Exception("unknown type %s" % chart)) return print_list
def etree_title_provider(body): """Get the title of a page from its content body. This implementation uses the defusedxml wrapper for etree. If no title is found on the page then None is returned. Args: body (str): The content body of an xhtml page. Returns: str: The text of the first <title></title> tag or None if the title is not found or the body is invalid xhtml. """ try: root = ElementTree.fromstring(body) except ElementTree.ParseError: return None for title in root.getiterator('title'): return title.text return None
def test_has_children(self): root_node = ElementTree.fromstring(''' <data dataset="countries"> <country name="Liechtenstein" gdppc="141100" /> <country name="Singapore" gdppc="59900" /> </data> ''') result = utils.convert_xml_to_dict(root_node) expected_result = { 'dataset': 'countries', 'data': [ { 'name': 'Liechtenstein', 'gdppc': 141100 }, { 'name': 'Singapore', 'gdppc': 59900, }, ], } self.assertEqual(expected_result, result)
def _parse(cls, xml): """ Parse the XML into a Normalizer.NormalizeResult """ try: node = ElementTree.fromstring(xml).find("Address") except ElementTree.ParseError: log.error("Failed to parse xml", exc_info=True) return NormalFactory.NORMALIZE_FAILED try: result = Normalizer.NormalizeResult( success = True, line1 = cls._get_or_none(node, "Address2"), line2 = cls._get_or_none(node, "Address1"), city = cls._get_or_none(node, "City"), state = cls._get_or_none(node, "State"), postalCode = cls._get_or_none(node, "Zip5"), raw = xml) except: log.error("Failed to parse", exc_info=True) result = Normalizer.NormalizeResult( success = True, line1 = None, line2 = None, city = None, state = None, postalCode = None, raw = xml) return result
def download_translations(self, source, language, text, unit, user): """Download list of possible translations from the service.""" translations = [] xp_translated = self.MS_TM_XPATH + 'TranslatedText' xp_confidence = self.MS_TM_XPATH + 'ConfidenceLevel' xp_original = self.MS_TM_XPATH + 'OriginalText' resp = self.soap_req( 'GetTranslations', uuid=uuid4(), text=text, from_lang=source, to_lang=language, max_result=20, ) root = ElementTree.fromstring(resp.read()) results = root.find(self.MS_TM_XPATH + 'GetTranslationsResult') if results is not None: for translation in results: translations.append(( translation.find(xp_translated).text, int(translation.find(xp_confidence).text), self.name, translation.find(xp_original).text, )) return translations
def character(char_ids): """ :param char_ids: [character_id, ...] :return: """ missing_names = [] for char_id in char_ids: db_character = g.mongo.db.characters.find_one({"_id": char_id}) if not db_character: missing_names.append(char_id) db_characters_cache = g.mongo.db.caches.find_one({"_id": "characters"}) bulk_op = g.mongo.db.characters.initialize_unordered_bulk_op() bulk_run = False if missing_names or not db_characters_cache or db_characters_cache["cached_until"] < time.time(): if db_characters_cache and db_characters_cache["cached_until"] > time.time(): character_payload = {"ids": ",".join([str(x) for x in missing_names])} else: character_payload = {"ids": ",".join([str(x) for x in char_ids])} xml_character_response = requests.get( "https://api.eveonline.com/eve/CharacterAffiliation.xml.aspx", data=character_payload, headers=xml_headers ) # XML Parse try: xml_character_tree = ElementTree.fromstring(xml_character_response.text) except ElementTree.ParseError: print(xml_character_response.text) return None xml_time_pattern = "%Y-%m-%d %H:%M:%S" g.mongo.db.caches.update( {"_id": "characters"}, {"cached_until": int(calendar.timegm(time.strptime(xml_character_tree[2].text, xml_time_pattern)))}, upsert=True, ) if xml_character_tree[1].tag == "error": print(xml_character_tree[1].attrib["code"], xml_character_tree[1].text) else: for name in xml_character_tree[1][0]: bulk_run = True bulk_op.find({"_id": int(name.attrib["characterID"])}).upsert().update( { "$set": { "name": name.attrib["characterName"], "corporation_id": int(name.attrib["corporationID"]), "corporation_name": name.attrib["corporationName"], "alliance_id": int(name.attrib["allianceID"]), "alliance_name": name.attrib["allianceName"], } } ) if bulk_run: bulk_op.execute()
def validate_svg(svg_bytes): """Quick checks that a file is an svg. Returns the file as an xml tree""" try: tree = ElementTree.fromstring(svg_bytes) if tree.tag.upper().endswith('SVG'): # endswith b/c namespaces return tree except ElementTree.ParseError: pass
def __init__(self, saveFile, read_data=False): self.saveFile = saveFile if read_data == False: root = parse(saveFile).getroot() else: root = ElementTree.fromstring(saveFile) self.root = root
def read_metadata_from_string(xml): root = ET.fromstring(xml) result = Metadata() for child in root: if child.tag.endswith('metadata'): for meta_element in child: _parse_meta_element(meta_element, result) return result
def test_to_dict(self): element_tree = ElementTree.fromstring(CHECKSTYLE_SAMPLE_XML) dictionary1 = element_tree_to_dict(element_tree, filter_keys=[]) dictionary2 = element_tree_to_dict(element_tree, filter_keys=None) for dictionary in [dictionary1, dictionary2]: self.assertTrue(isinstance(dictionary, dict)) self.assertEqual(dictionary["tag"], "checkstyle") self.assertTrue(len(dictionary["children"]) > 0) self.assertEqual(dictionary["attrib"]["version"], "8.9")
def parseSafeNetXML(xml): """ This function parses XML data of a Aladdin/SafeNet XML file for eToken PASS It returns a dictionary of serial : { otpkey , counter, type } """ TOKENS = {} elem_tokencontainer = etree.fromstring(xml) if getTagName(elem_tokencontainer) != "Tokens": raise ImportException("No toplevel element Tokens") for elem_token in list(elem_tokencontainer): SERIAL = None COUNTER = None HMAC = None DESCRIPTION = None if getTagName(elem_token) == "Token": SERIAL = elem_token.get("serial") log.debug("Found token with serial {0!s}".format(SERIAL)) for elem_tdata in list(elem_token): tag = getTagName(elem_tdata) if "ProductName" == tag: DESCRIPTION = elem_tdata.text log.debug("The Token with the serial %s has the " "productname %s" % (SERIAL, DESCRIPTION)) if "Applications" == tag: for elem_apps in elem_tdata: if getTagName(elem_apps) == "Application": for elem_app in elem_apps: tag = getTagName(elem_app) if "Seed" == tag: HMAC = elem_app.text if "MovingFactor" == tag: COUNTER = elem_app.text if not SERIAL: log.error("Found token without a serial") else: if HMAC: hashlib = "sha1" if len(HMAC) == 64: hashlib = "sha256" TOKENS[SERIAL] = {'otpkey': HMAC, 'counter': COUNTER, 'type': 'hotp', 'hashlib': hashlib } else: log.error("Found token {0!s} without a element 'Seed'".format( SERIAL)) return TOKENS
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=config_loader(dataset="WMS_request_timeout"), headers=None, verify=None, cert=None, auth=None, proxies=None): # (mss) added proxies # (mss) timeout default of 30secs set by the config_loader """ Function to open URLs. Uses requests library but with additional checks for OGC service exceptions and url formatting. Also handles cookies and simple user password authentication. """ headers = headers if headers is not None else {} rkwargs = {} rkwargs['timeout'] = timeout if auth: if username: auth.username = username if password: auth.password = password if cert: auth.cert = cert verify = verify and auth.verify else: auth = Authentication(username, password, cert, verify) if auth.username and auth.password: rkwargs['auth'] = (auth.username, auth.password) rkwargs['cert'] = auth.cert rkwargs['verify'] = verify # FIXUP for WFS in particular, remove xml style namespace # @TODO does this belong here? method = method.split("}")[-1] if method.lower() == 'post': try: etree.fromstring(data) headers['Content-Type'] = 'text/xml' except (ParseError, UnicodeEncodeError) as error: # (mss) logging.debug("ParseError, UnicodeEncodeError %s", error) rkwargs['data'] = data elif method.lower() == 'get': rkwargs['params'] = data else: raise ValueError( f"Unknown method ('{method}'), expected 'get' or 'post'") if cookies is not None: rkwargs['cookies'] = cookies req = requests.request( method.upper(), url_base, headers=headers, # MSS proxies=proxies, **rkwargs) if req.status_code in [400, 401]: raise ServiceException(req.text) if req.status_code in [404, 500, 502, 503, 504]: # add more if needed req.raise_for_status() # check for service exceptions without the http header set if 'Content-Type' in req.headers and req.headers['Content-Type'] in [ 'text/xml', 'application/xml', 'application/vnd.ogc.se_xml', 'application/vnd.ogc.wms_xml' ]: # just in case 400 headers were not set, going to have to read the xml to see if it's an exception report. se_tree = etree.fromstring(req.content) # to handle the variety of namespaces and terms across services # and versions, especially for "legacy" responses like WMS 1.3.0 possible_errors = [ '{http://www.opengis.net/ows}Exception', '{http://www.opengis.net/ows/1.1}Exception', '{http://www.opengis.net/ogc}ServiceException', 'ServiceException' ] for possible_error in possible_errors: serviceException = se_tree.find(possible_error) if serviceException is not None: # and we need to deal with some message nesting raise ServiceException('\n'.join([ str(t).strip() for t in serviceException.itertext() if t.strip() ])) return ResponseWrapper(req)
def nlcd_helper() -> Dict[str, Any]: """Get legends and properties of the NLCD cover dataset. Notes ----- The following references have been used: - https://github.com/jzmiller1/nlcd - https://www.mrlc.gov/data-services-page - https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend Returns ------- dict Years where data is available and cover classes and categories, and roughness estimations. """ url = ("https://www.mrlc.gov/downloads/sciweb1/shared/mrlc/metadata/" + "NLCD_2016_Land_Cover_Science_product_L48.xml") r = RetrySession().get(url) root = etree.fromstring(r.content) clist = root[4][1][1].text.split("\n")[2:] _colors = [i.split() for i in clist] colors = {int(c): (float(r), float(g), float(b)) for c, r, g, b in _colors} classes = { root[4][0][3][i][0][0].text: root[4][0][3][i][0][1].text.split("-")[0].strip() for i in range(3, len(root[4][0][3])) } nlcd_meta = { "impervious_years": [2016, 2011, 2006, 2001], "canopy_years": [2016, 2011], "cover_years": [2016, 2013, 2011, 2008, 2006, 2004, 2001], "classes": classes, "categories": { "Unclassified": ("0"), "Water": ("11", "12"), "Developed": ("21", "22", "23", "24"), "Barren": ("31", ), "Forest": ("41", "42", "43", "45", "46"), "Shrubland": ("51", "52"), "Herbaceous": ("71", "72", "73", "74"), "Planted/Cultivated": ("81", "82"), "Wetlands": ("90", "95"), }, "roughness": { "11": 0.001, "12": 0.022, "21": 0.0404, "22": 0.0678, "23": 0.0678, "24": 0.0404, "31": 0.0113, "41": 0.36, "42": 0.32, "43": 0.4, "45": 0.4, "46": 0.24, "51": 0.24, "52": 0.4, "71": 0.368, "72": np.nan, "81": 0.325, "82": 0.16, "90": 0.086, "95": 0.1825, }, "colors": colors, } return nlcd_meta
def __init__(self, paste_xml): paste_xml = "\n".join(paste_xml.strip().split('\r\n')) + "\n</paste>" root = ET.fromstring(paste_xml) for child in root.getchildren(): setattr(self, child.tag.split('_')[1], child.text)
def read_event_file(eventxml): """Read event.xml file from disk, returning a dictionary of attributes. Input XML format looks like this (all elements are required unless explicitly labeled optional): .. code-block:: xml <earthquake id="2008ryan" netid="us" network="USGS National Network" (required but may be empty string) lat="30.9858" lon="103.3639" mag="7.9" depth="19.0" time="YYYY-mm-ddTHH:MM:SS.ffffffZ" (omitting fractional seconds is also supported) locstring="EASTERN SICHUAN, CHINA" mech='SS' | 'NM' | 'RS' | 'ALL' (optional) reference="Smith et al. 2016" (optional) productcode='us2008ryan' (optional) /> Args: eventxml (str): Path to event XML file OR file-like object. Returns: dict: Dictionary with keys: - id: Origin network and origin code (i.e., us2017abcd). - netid: Origin network ("us"). - network: (A long-form description of the network) - lat: Origin latitude - lon: Origin longitude - mag: Origin magnitude - depth: Origin depth - time: Origin time as an HistoricTime object. - locstring: Location string - mech: (optional) Moment mechanism, one of: - 'RS' (Reverse) - 'SS' (Strike-Slip) - 'NM' (Normal) - 'ALL' (Undetermined) - reference: (optional) A description of the source of the data. - productcode: (optional) This product source's unique code for this particular ShakeMap. Raises: ValueError: If the time string cannot be parsed into a datetime object KeyError: If any of the required attributes are missing from event.xml """ if isinstance(eventxml, str): tree = dET.parse(eventxml) root = tree.getroot() else: data = eventxml.read() root = dET.fromstring(data) # Turn XML content into dictionary if root.tag == 'earthquake': xmldict = dict(root.items()) else: eq = root.find('earthquake') xmldict = dict(eq.items()) eqdict = {} ######################################################### # A Short Primer on PDL-style Identifiers # Because Everybody (Including The Author) Forgets It. # # In PDL, there are 4 identifiers that fully specify a product: # - source The network that generated the *product* (us, ci, etc.). # - code The unique ID string that identifies this product, # usually prepended by *source* (us2008abcd). # - eventsource The network that created the *origin* (us, ci, etc.) # - eventsourcecode The code within that network that uniquely # identifies the event (2008abcd). # # For our purposes, we're storing *source* and *code* as # *productsource* and *productcode* respectively in the # container, in an effort to reduce confusion about their # meaning. Time will tell. ######################################################### # read in the id fields eqdict['id'] = xmldict['id'] # This isn't optional, but maybe it isn't in some old files if 'network' in xmldict: eqdict['network'] = xmldict['network'] else: eqdict['network'] = "" eqdict['netid'] = xmldict['netid'] # look for the productcode attribute in the xml, # otherwise use the event id if 'productcode' in xmldict: eqdict['productcode'] = xmldict['productcode'] elif isinstance(eventxml, str): eqdict['productcode'] = eqdict['id'] else: # It's up to the user of this data how to construct the # product code pass # Support old event file date/times if 'time' in xmldict: try: eqdict['time'] = HistoricTime.strptime(xmldict['time'], constants.TIMEFMT) except ValueError: try: eqdict['time'] = HistoricTime.strptime(xmldict['time'], constants.ALT_TIMEFMT) except ValueError: raise ValueError("Couldn't convert %s to HistoricTime" % xmldict['time']) else: if 'year' not in xmldict or 'month' not in xmldict or \ 'day' not in xmldict or 'hour' not in xmldict or \ 'minute' not in xmldict or 'second' not in xmldict: raise ValueError("Missing date/time elements in event file.") eqdict['time'] = HistoricTime.datetime(xmldict['year'], xmldict['month'], xmldict['day'], xmldict['hour'], xmldict['minute'], xmldict['second']) eqdict['lat'] = float(xmldict['lat']) eqdict['lon'] = float(xmldict['lon']) eqdict['depth'] = float(xmldict['depth']) eqdict['mag'] = float(xmldict['mag']) eqdict['locstring'] = xmldict['locstring'] if 'mech' in xmldict: eqdict['mech'] = xmldict['mech'] # Older files may have "type" instead of "mech" if 'type' in xmldict: eqdict['type'] = xmldict['type'] if 'reference' in xmldict: eqdict['reference'] = xmldict['reference'] return eqdict
def excheck_table(): try: from os import listdir from pathlib import PurePath, Path from datetime import datetime from flask import request if request.method == "GET": jsondata = {"ExCheck": {'Templates': [], 'Checklists': []}} from FreeTAKServer.controllers.ExCheckControllers.templateToJsonSerializer import templateSerializer excheckTemplates = DatabaseController().query_ExCheck() for template in excheckTemplates: templateData = template.data templatejson = { "filename": templateData.filename, "name": templateData.keywords.name, "submissionTime": templateData.submissionTime, "submitter": str( dbController.query_user( query=f'uid == "{template.creatorUid}"', column=['callsign'])), "uid": templateData.uid, "hash": templateData.hash, "size": templateData.size, "description": templateData.keywords.description } jsondata["ExCheck"]['Templates'].append(templatejson) excheckChecklists = DatabaseController().query_ExCheckChecklist() for checklist in excheckChecklists: try: templatename = checklist.template.data.name except AttributeError: templatename = "template removed" checklistjson = { "filename": checklist.filename, "name": checklist.name, "startTime": datetime.strftime(checklist.startTime, "%Y-%m-%dT%H:%M:%S.%fZ"), "submitter": checklist.callsign, "uid": checklist.uid, "description": checklist.description, "template": templatename } jsondata["ExCheck"]['Checklists'].append(checklistjson) return json.dumps(jsondata), 200 elif request.method == "DELETE": jsondata = request.data ExCheckArray = json.loads(jsondata)["ExCheck"] for item in ExCheckArray["Templates"]: templateitem = DatabaseController().query_ExCheck( f'ExCheckData.uid == "{item["uid"]}"', verbose=True)[0] os.remove( str( PurePath(Path(MainConfig.ExCheckFilePath), Path(templateitem.data.filename)))) DatabaseController().remove_ExCheck( f'PrimaryKey == "{templateitem.PrimaryKey}"') for item in ExCheckArray["Checklists"]: checklistitem = DatabaseController().query_ExCheckChecklist( f'uid == "{item["uid"]}"')[0] os.remove( str( PurePath(Path(MainConfig.ExCheckChecklistFilePath), Path(checklistitem.filename)))) DatabaseController().remove_ExCheckChecklist( f'uid == "{item["uid"]}"') return 'success', 200 elif request.method == "POST": try: import uuid from FreeTAKServer.controllers.ExCheckControllers.templateToJsonSerializer import templateSerializer xmlstring = f'<?xml version="1.0"?><event version="2.0" uid="{uuid.uuid4()}" type="t-x-m-c" time="2020-11-28T17:45:51.000Z" start="2020-11-28T17:45:51.000Z" stale="2020-11-28T17:46:11.000Z" how="h-g-i-g-o"><point lat="0.00000000" lon="0.00000000" hae="0.00000000" ce="9999999" le="9999999" /><detail><mission type="CHANGE" tool="ExCheck" name="exchecktemplates" authorUid="S-1-5-21-2720623347-3037847324-4167270909-1002"><MissionChanges><MissionChange><contentResource><filename>61b01475-ad44-4300-addc-a9474ebf67b0.xml</filename><hash>018cd5786bd6c2e603beef30d6a59987b72944a60de9e11562297c35ebdb7fd6</hash><keywords>test init</keywords><keywords>dessc init</keywords><keywords>FEATHER</keywords><mimeType>application/xml</mimeType><name>61b01475-ad44-4300-addc-a9474ebf67b0</name><size>1522</size><submissionTime>2020-11-28T17:45:47.980Z</submissionTime><submitter>wintak</submitter><tool>ExCheck</tool><uid>61b01475-ad44-4300-addc-a9474ebf67b0</uid></contentResource><creatorUid>S-1-5-21-2720623347-3037847324-4167270909-1002</creatorUid><missionName>exchecktemplates</missionName><timestamp>2020-11-28T17:45:47.983Z</timestamp><type>ADD_CONTENT</type></MissionChange></MissionChanges></mission></detail></event>' # this is where the client will post the xmi of a template from datetime import datetime from defusedxml import ElementTree as etree import hashlib # possibly the uid of the client submitting the template authoruid = request.args.get('clientUid') if not authoruid: authoruid = 'server-uid' XMI = request.data.decode() serializer = templateSerializer(XMI) object = serializer.convert_template_to_object() object.timestamp = datetime.strptime(object.timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") serializer.create_DB_object(object) xml = etree.fromstring(XMI) path = str( PurePath(Path(MainConfig.ExCheckFilePath), Path(f'{object.data.uid}.xml'))) with open(path, 'w+') as file: file.write(XMI) file.close() uid = object.data.uid temp = etree.fromstring(XMI) cot = etree.fromstring(xmlstring) cot.find('detail').find('mission').set("authorUid", authoruid) resources = cot.find('detail').find('mission').find( 'MissionChanges').find('MissionChange').find( 'contentResource') resources.find('filename').text = temp.find( 'checklistDetails').find('uid').text + '.xml' resources.findall('keywords')[0].text = temp.find( 'checklistDetails').find('name').text resources.findall('keywords')[1].text = temp.find( 'checklistDetails').find('description').text resources.findall('keywords')[2].text = temp.find( 'checklistDetails').find('creatorCallsign').text resources.find('uid').text = temp.find( 'checklistDetails').find('uid').text resources.find('name').text = temp.find( 'checklistDetails').find('uid').text resources.find('size').text = str(len(XMI)) resources.find('hash').text = str( hashlib.sha256(str(XMI).encode()).hexdigest()) z = etree.tostring(cot) from FreeTAKServer.model.testobj import testobj object = testobj() object.xmlString = z APIPipe.put(object) return str(uid), 200 except Exception as e: print(str(e)) except Exception as e: return str(e), 500
def _parse_xml(self, xml_str: str) -> Any: try: xml = ET.fromstring(xml_str) return xml except ET.ParseError: raise PcsNoStatusException('Broken XML was given')
def auth_crest(code, refresh=False, discord_roles=True): # Code is CharacterOwnerHash on refresh and actual authorization code on non-refresh # SSO Authentication auth_headers = { "Authorization": "Basic " + str( base64.b64encode( bytes(secrets["client_id"] + ":" + secrets["secret_key"], "utf8")))[2:-1], "Content-Type": "application/x-www-form-urlencoded", "Host": "login.eveonline.com" } if not refresh: given_user = None auth_payload = {"grant_type": "authorization_code", "code": code} else: given_user = g.mongo.db.users.find_one({"_id": code}) if given_user and given_user.get("refresh_token"): auth_payload = { "grant_type": "refresh_token", "refresh_token": given_user["refresh_token"] } elif given_user and not given_user.get("refresh_token"): # Resets users with invalid refresh tokens. # Note to remove from code if refresh tokens become stale too often. g.mongo.db.users.update({"_id": code}, { "$set": { "corporation_id": 0, "corporation_name": "", "alliance_id": 0, "alliance_name": "", "cached_until": 0, "refresh_token": "" } }) return None, None else: return None, None auth_response = requests.post("https://login.eveonline.com/oauth/token", data=auth_payload, headers=auth_headers) # Abort on EVE API server errors try: auth_token = auth_response.json() if not auth_token.get("access_token"): print(auth_token) g.mongo.db.users.update({"_id": code}, { "$set": { "corporation_id": 0, "corporation_name": "", "alliance_id": 0, "alliance_name": "", "cached_until": 0, "refresh_token": "" } }) if given_user: if given_user.get("discord_id"): g.redis.publish("titdev-auth", "#" + given_user["discord_id"] + " None") if given_user.get("email"): forum_edit(given_user, "log_out") return None, None except ValueError: auth_token = None if not refresh: abort(400) else: g.mongo.db.users.update({"_id": code}, { "$set": { "corporation_id": 0, "corporation_name": "", "alliance_id": 0, "alliance_name": "", "cached_until": 0, "refresh_token": "" } }) if given_user: if given_user.get("discord_id"): g.redis.publish("titdev-auth", "#" + given_user["discord_id"] + " None") if given_user.get("email"): forum_edit(given_user, "log_out") return None, None # CREST Authentication character_headers = { "User-Agent": user_agent, "Authorization": "Bearer " + auth_token["access_token"], "Host": "login.eveonline.com" } crest_char_response = requests.get( "https://login.eveonline.com/oauth/verify", headers=character_headers) crest_char = crest_char_response.json() # Check user cache db_user = g.mongo.db.users.find_one( {"_id": crest_char["CharacterOwnerHash"]}) # Update character info if cache has finished or character doesn't exist. if not db_user or db_user["cached_until"] < time.time(): # XML Character xml_char_payload = {"characterID": crest_char["CharacterID"]} xml_char_headers = {"User-Agent": user_agent} xml_char_response = requests.get( "https://api.eveonline.com/eve/CharacterInfo.xml.aspx", data=xml_char_payload, headers=xml_char_headers) # XML Parse xml_tree = ElementTree.fromstring(xml_char_response.text) # Update Database xml_time_pattern = "%Y-%m-%d %H:%M:%S" if refresh: g.mongo.db.users.update( {"_id": crest_char["CharacterOwnerHash"]}, { "$set": { "character_id": crest_char["CharacterID"], "character_name": crest_char["CharacterName"], "corporation_id": int(xml_tree[1][7].text), "corporation_name": xml_tree[1][8].text.strip(), "alliance_id": int(float(xml_tree[1][10].text)), "alliance_name": xml_tree[1][11].text.strip(), "refresh_token": auth_token["refresh_token"], "cached_until": int( calendar.timegm( time.strptime(xml_tree[2].text, xml_time_pattern))) } }, upsert=True) else: g.mongo.db.users.update( {"_id": crest_char["CharacterOwnerHash"]}, { "$set": { "character_id": crest_char["CharacterID"], "character_name": crest_char["CharacterName"], "corporation_id": int(xml_tree[1][7].text), "corporation_name": xml_tree[1][8].text.strip(), "alliance_id": int(float(xml_tree[1][10].text)), "alliance_name": xml_tree[1][11].text.strip(), "refresh_token": auth_token["refresh_token"], "last_sign_on": int(time.time()), "cached_until": int( calendar.timegm( time.strptime(xml_tree[2].text, xml_time_pattern))) } }, upsert=True) # Refresh current user db_user = g.mongo.db.users.find_one( {"_id": crest_char["CharacterOwnerHash"]}) # Check only on refresh due to rate limits if db_user and db_user.get("discord_id"): auth_discord(crest_char["CharacterOwnerHash"], sync=discord_roles) if db_user.get("forum_id"): forum_edit(db_user, "log_out") return db_user, crest_char
def _decode(self, data, **kwargs): return ET.fromstring(data)
def zap_result_save(self, all_vuln, project_id, un_scanid, username, target_url): """ The function save all data in Archery Database :param all_vuln: :param project_id: :param un_scanid: :return: """ date_time = datetime.now() zap_enabled = False all_zap = zap_settings_db.objects.filter(username=username) for zap in all_zap: zap_enabled = zap.enabled if zap_enabled is False: root_xml = ET.fromstring(all_vuln) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode('ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) try: zap_xml_parser.xml_parser(username=username, project_id=project_id, scan_id=un_scanid, root=root_xml_en) self.zap.core.delete_all_alerts() except Exception as e: print(e) else: global name, attack, wascid, description, reference, \ reference, sourceid, \ solution, \ param, \ method, url, messageId, alert, pluginId, other, evidence, cweid, risk, vul_col, false_positive for data in all_vuln: for key, value in data.items(): if key == 'name': name = value if key == 'attack': attack = value if key == 'wascid': wascid = value if key == 'description': description = value if key == 'reference': reference = value if key == 'sourceid': sourceid = value if key == 'solution': solution = value if key == 'param': param = value if key == 'method': method = value if key == 'url': url = value if key == 'pluginId': pluginId = value if key == 'other': other = value if key == 'alert': alert = value if key == 'attack': attack = value if key == 'messageId': messageId = value if key == 'evidence': evidence = value if key == 'cweid': cweid = value if key == 'risk': risk = value if risk == "High": vul_col = "danger" risk = "High" elif risk == 'Medium': vul_col = "warning" risk = "Medium" elif risk == 'info': vul_col = "info" risk = "Low" else: vul_col = "info" risk = "Low" dup_data = name + risk + target_url duplicate_hash = hashlib.sha256(dup_data.encode('utf-8')).hexdigest() match_dup = zap_scan_results_db.objects.filter( dup_hash=duplicate_hash).values('dup_hash').distinct() lenth_match = len(match_dup) vuln_id = uuid.uuid4() if lenth_match == 0: duplicate_vuln = 'No' dump_data = zap_scan_results_db(vuln_id=vuln_id, vuln_color=vul_col, scan_id=un_scanid, project_id=project_id, confidence=confidence, wascid=wascid, risk=risk, reference=reference, url=url, name=name, solution=solution, param=url, sourceid=sourceid, pluginId=pluginId, alert=alert, description=description, false_positive='No', rescan='No', vuln_status='Open', dup_hash=duplicate_hash, vuln_duplicate=duplicate_vuln, evidence=evidence, username=username ) dump_data.save() else: duplicate_vuln = 'Yes' dump_data = zap_scan_results_db(vuln_id=vuln_id, vuln_color=vul_col, scan_id=un_scanid, project_id=project_id, confidence=confidence, wascid=wascid, risk=risk, reference=reference, url=url, name=name, solution=solution, param=url, sourceid=sourceid, pluginId=pluginId, alert=alert, description=description, false_positive='Duplicate', rescan='No', vuln_status='Duplicate', dup_hash=duplicate_hash, vuln_duplicate=duplicate_vuln, evidence=evidence, username=username ) dump_data.save() false_p = zap_scan_results_db.objects.filter( false_positive_hash=duplicate_hash) fp_lenth_match = len(false_p) if fp_lenth_match == 1: false_positive = 'Yes' else: false_positive = 'No' vul_dat = zap_scan_results_db.objects.filter(username=username, vuln_id=vuln_id) full_data = [] for data in vul_dat: key = 'Evidence' value = data.evidence instance = key + ': ' + value full_data.append(instance) removed_list_data = ','.join(full_data) zap_scan_results_db.objects.filter(username=username, vuln_id=vuln_id).update(param=full_data) zap_all_vul = zap_scan_results_db.objects.filter(username=username, scan_id=un_scanid, false_positive='No') duplicate_count = zap_scan_results_db.objects.filter(username=username, scan_id=un_scanid, vuln_duplicate='Yes') total_high = len(zap_all_vul.filter(risk="High")) total_medium = len(zap_all_vul.filter(risk="Medium")) total_low = len(zap_all_vul.filter(risk="Low")) total_info = len(zap_all_vul.filter(risk="Informational")) total_duplicate = len(duplicate_count.filter(vuln_duplicate='Yes')) total_vul = total_high + total_medium + total_low + total_info zap_scans_db.objects.filter(username=username, scan_scanid=un_scanid) \ .update(total_vul=total_vul, date_time=date_time, high_vul=total_high, medium_vul=total_medium, low_vul=total_low, info_vul=total_info, total_dup=total_duplicate, scan_url=target_url ) if total_vul == total_duplicate: zap_scans_db.objects.filter(username=username, scan_scanid=un_scanid) \ .update(total_vul=total_vul, date_time=date_time, high_vul=total_high, medium_vul=total_medium, low_vul=total_low, total_dup=total_duplicate )
def granule_preview(self, dataset_id='', image_variable='', path=''): '''The PODAAC Image service renders granules in the \ PO.DAACs catalog to images such as jpeg and/or png. \ This image service also utilizes OGC WMS protocol. \ (http://www.opengeospatial.org/standards/wms). If the \ granule does not have any data in the given selected \ bounding box, HTTP 500 will be thrown since there is \ no data to be imaged. Granule Search service can be used \ to find level 2 swath data. However, the level 2 \ spatial search uses coverage footprint polygons \ generated for each granule, and this footprint can \ contain no data or gaps. If the selected bounding box \ resides on no data or gaps, HTTP 500 will be thrown. \ There are three request methods in this service. They \ are GetCapabilities, GetLegendGraphic, and GetMap. :param dataset_id: dataset persistent ID. dataset_id or \ short_name is required for a granule search. Example: \ PODAAC-ASOP2-25X01 :mod:`string` :type dataset_id: :mod:`string` :param image_variable: variables of the granule which have \ 'Preview Images'. Image variables can be found \ from Dataset Variable service. Use "id" from "imgVariable" \ element.\ :type image_variable: :mod:`string` :param path: Destination directory into which the granule \ needs to be downloaded. :type format: :mod:`string` :returns: a png image file. ''' try: bbox = '-180,-90,180,90' if dataset_id == '': raise Exception("Required dataset_id") image_data = self.granule_search(dataset_id=dataset_id, bbox=bbox) root = ET.fromstring(image_data.encode('utf-8')) # fetching the [URL Template] url_template = '' for entry in root.iter('{http://www.w3.org/2005/Atom}entry'): for element in entry: if element.tag == '{http://www.w3.org/2005/Atom}link': if element.attrib['title'] == "Preview Image": url_template = element.attrib['href'] break if url_template == '': raise Exception( "Preview Image not available for this dataset.") url = url_template + '/' + image_variable + '.png' if path == '': path = os.path.join(os.path.dirname(__file__), dataset_id + '.png') else: path = path + '/' + dataset_id + '.png' with open(path, 'wb') as image: image.write(urlopen(url).read()) except Exception as e: print(e) raise return image
def get_from_hbogo(self, url, response_format='json', use_cache=True, retry=0): self.log("GET FROM HBO URL: " + url) self.log("GET FROM HBO RESPONSE FORMAT: " + response_format) if not self.use_cache: use_cache = False url_hash = Util.hash256_string(url) if use_cache: self.log("GET FROM HBO USING CACHE...") cached_data = self.get_from_cache(url_hash) if cached_data is not None and cached_data is not False: self.log("GET FROM HBO Serving from cache...") if response_format == 'json': return json.loads(py2_encode(cached_data)) elif response_format == 'xml': return ET.fromstring(py2_encode(cached_data)) if cached_data is False: self.log( "GET FROM HBO, URL on exclude list, cache disabled...") use_cache = False try: self.log("GET FROM HBO, requesting from Hbo Go...") r = requests.get(url, headers=self.loggedin_headers) self.log("GET FROM HBO STATUS: " + str(r.status_code)) if int(r.status_code) != 200: if retry < self.max_comm_retry: self.log("RETURNED STATUS " + str(r.status_code) + " resetting login and retrying request...") self.del_login() self.login() return self.get_from_hbogo(url, response_format, use_cache, retry + 1) xbmcgui.Dialog().ok(self.LB_ERROR, self.language(30008) + str(r.status_code)) return False if use_cache: try: self.log("SAVING URL TO CACHE") self.cache(url_hash, r.text) except Exception: self.log("Caching WARNING: " + traceback.format_exc()) if response_format == 'json': return r.json() elif response_format == 'xml': return ET.fromstring(py2_encode(r.text)) except requests.RequestException as e: self.log("GET FROM HBO ERROR: " + repr(e)) xbmcgui.Dialog().ok(self.LB_ERROR, self.language(30005)) return False except Exception: self.log("GET TO HBO UNEXPECTED ERROR: " + traceback.format_exc()) xbmcgui.Dialog().ok(self.LB_ERROR, self.language(30004)) return False
def launch_arachni_scan(target, project_id, rescan_id, rescan, scan_id, user): arachni_hosts = None arachni_ports = None all_arachni = arachni_settings_db.objects.all() for arachni in all_arachni: arachni_hosts = arachni.arachni_url arachni_ports = arachni.arachni_port arachni = PyArachniapi.arachniAPI(arachni_hosts, arachni_ports) check = [ "xss_event", "xss", "xss_script_context", "xss_tag", "xss_path", "xss_dom_script_context", "xss_dom", "sql_injection", "sql_injection_differential", "sql_injection_timing", "csrf", "common_files", "directory_listing", ] data = {"url": target, "checks": check, "audit": {}} d = json.dumps(data) scan_launch = arachni.scan_launch(d) time.sleep(3) try: scan_data = scan_launch.data for key, value in scan_data.items(): if key == 'id': scan_run_id = value notify.send(user, recipient=user, verb='Arachni Scan Started on URL %s' % target) except Exception: notify.send(user, recipient=user, verb='Arachni Connection Not found') print("Arachni Connection Not found") return date_time = datetime.now() try: save_all_scan = arachni_scan_db( project_id=project_id, url=target, scan_id=scan_id, date_time=date_time, rescan_id=rescan_id, rescan=rescan, ) save_all_scan.save() except Exception as e: print(e) scan_data = scan_launch.data for key, value in scan_data.items(): if key == 'id': scan_run_id = value scan_sum = arachni.scan_summary(id=scan_run_id).data for key, value in scan_sum.items(): if key == 'status': scan_status = value while scan_status != 'done': status = '0' if scan_sum['statistics']['browser_cluster'][ 'queued_job_count'] and scan_sum['statistics'][ 'browser_cluster']['total_job_time']: status = 100 - scan_sum['statistics']['browser_cluster']['queued_job_count'] * 100 / \ scan_sum['statistics']['browser_cluster']['total_job_time'] arachni_scan_db.objects.filter(scan_id=scan_id).update( scan_status=status) scan_sum = arachni.scan_summary(id=scan_run_id).data for key, value in scan_sum.items(): if key == 'status': scan_status = value time.sleep(3) if scan_status == 'done': xml_report = arachni.scan_xml_report(id=scan_run_id).data root_xml = ET.fromstring(xml_report) arachni_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) arachni_scan_db.objects.filter(scan_id=scan_id).update( scan_status='100') print("Data uploaded !!!!") notify.send(user, recipient=user, verb='Arachni Scan Completed on URL %s' % target)
def parse_junit(xml): """Generate failed tests as a series of dicts. Ignore skipped tests.""" # NOTE: this is modified from gubernator/view_build.py try: tree = ET.fromstring(xml) except ET.ParseError: print("Malformed xml, skipping") yield from [] #return empty itterator to skip results for this test return # pylint: disable=redefined-outer-name def make_result(name, time, failure_text): if failure_text: if time is None: return { 'name': name, 'failed': True, 'failure_text': failure_text } return { 'name': name, 'time': time, 'failed': True, 'failure_text': failure_text } if time is None: return {'name': name} return {'name': name, 'time': time} # Note: skipped tests are ignored because they make rows too large for BigQuery. # Knowing that a given build could have ran a test but didn't for some reason # isn't very interesting. def parse_result(child_node): time = float(child_node.attrib.get('time') or 0) #time val can be '' failure_text = None for param in child_node.findall('failure'): failure_text = param.text or param.attrib.get( 'message', 'No Failure Message Found') skipped = child_node.findall('skipped') return time, failure_text, skipped if tree.tag == 'testsuite': for child in tree.findall('testcase'): name = child.attrib.get('name', '<unspecified>') time, failure_text, skipped = parse_result(child) if skipped: continue yield make_result(name, time, failure_text) elif tree.tag == 'testsuites': for testsuite in tree: suite_name = testsuite.attrib.get('name', '<unspecified>') for child in testsuite.findall('testcase'): name = '%s %s' % (suite_name, child.attrib.get('name', '<unspecified>')) time, failure_text, skipped = parse_result(child) if skipped: continue yield make_result(name, time, failure_text) else: logging.error('unable to find failures, unexpected tag %s', tree.tag)
def get_vt_xml(self, single_vt: Tuple[str, Dict]) -> Element: """Gets a single vulnerability test information in XML format. Returns: String of single vulnerability test information in XML format. """ if not single_vt or single_vt[1] is None: return Element('vt') vt_id, vt = single_vt name = vt.get('name') vt_xml = Element('vt') vt_xml.set('id', vt_id) for name, value in [('name', name)]: elem = SubElement(vt_xml, name) elem.text = str(value) xml_helper = XmlStringVTHelper() if vt.get('vt_params'): params_xml_str = xml_helper.get_params_vt_as_xml_str( vt_id, vt.get('vt_params')) vt_xml.append(secET.fromstring(params_xml_str)) if vt.get('vt_refs'): refs_xml_str = xml_helper.get_refs_vt_as_xml_str( vt_id, vt.get('vt_refs')) vt_xml.append(secET.fromstring(refs_xml_str)) if vt.get('vt_dependencies'): dependencies = xml_helper.get_dependencies_vt_as_xml_str( vt_id, vt.get('vt_dependencies')) vt_xml.append(secET.fromstring(dependencies)) if vt.get('creation_time'): vt_ctime = xml_helper.get_creation_time_vt_as_xml_str( vt_id, vt.get('creation_time')) vt_xml.append(secET.fromstring(vt_ctime)) if vt.get('modification_time'): vt_mtime = xml_helper.get_modification_time_vt_as_xml_str( vt_id, vt.get('modification_time')) vt_xml.append(secET.fromstring(vt_mtime)) if vt.get('summary'): summary_xml_str = xml_helper.get_summary_vt_as_xml_str( vt_id, vt.get('summary')) vt_xml.append(secET.fromstring(summary_xml_str)) if vt.get('impact'): impact_xml_str = xml_helper.get_impact_vt_as_xml_str( vt_id, vt.get('impact')) vt_xml.append(secET.fromstring(impact_xml_str)) if vt.get('affected'): affected_xml_str = xml_helper.get_affected_vt_as_xml_str( vt_id, vt.get('affected')) vt_xml.append(secET.fromstring(affected_xml_str)) if vt.get('insight'): insight_xml_str = xml_helper.get_insight_vt_as_xml_str( vt_id, vt.get('insight')) vt_xml.append(secET.fromstring(insight_xml_str)) if vt.get('solution'): solution_xml_str = xml_helper.get_solution_vt_as_xml_str( vt_id, vt.get('solution'), vt.get('solution_type'), vt.get('solution_method'), ) vt_xml.append(secET.fromstring(solution_xml_str)) if vt.get('detection') or vt.get('qod_type') or vt.get('qod'): detection_xml_str = xml_helper.get_detection_vt_as_xml_str( vt_id, vt.get('detection'), vt.get('qod_type'), vt.get('qod')) vt_xml.append(secET.fromstring(detection_xml_str)) if vt.get('severities'): severities_xml_str = xml_helper.get_severities_vt_as_xml_str( vt_id, vt.get('severities')) vt_xml.append(secET.fromstring(severities_xml_str)) if vt.get('custom'): custom_xml_str = xml_helper.get_custom_vt_as_xml_str( vt_id, vt.get('custom')) vt_xml.append(secET.fromstring(custom_xml_str)) return vt_xml
def xml_upload(request): """ Handling XML upload files. :param request: :return: """ all_project = project_db.objects.all() if request.method == "POST": project_id = request.POST.get("project_id") scanner = request.POST.get("scanner") xml_file = request.FILES['xmlfile'] scan_url = request.POST.get("scan_url") scan_id = uuid.uuid4() scan_status = "100" if scanner == "zap_scan": date_time = datetime.now() scan_dump = zap_scans_db(scan_url=scan_url, scan_scanid=scan_id, date_time=date_time, project_id=project_id, vul_status=scan_status, rescan='No') scan_dump.save() tree = ET.parse(xml_file) root_xml = tree.getroot() en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) zap_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml_en) return HttpResponseRedirect(reverse('zapscanner:zap_scan_list')) elif scanner == "burp_scan": date_time = datetime.now() scan_dump = burp_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() # Burp scan XML parser tree = ET.parse(xml_file) root_xml = tree.getroot() en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) burp_xml_parser.burp_scan_data(root_xml_en, project_id, scan_id) print("Save scan Data") return HttpResponseRedirect(reverse('burpscanner:burp_scan_list')) elif scanner == "arachni": date_time = datetime.now() scan_dump = arachni_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() tree = ET.parse(xml_file) root_xml = tree.getroot() arachni_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) print("Save scan Data") return HttpResponseRedirect( reverse('arachniscanner:arachni_scan_list')) elif scanner == 'netsparker': date_time = datetime.now() scan_dump = netsparker_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() tree = ET.parse(xml_file) root_xml = tree.getroot() netsparker_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return HttpResponseRedirect( reverse('netsparkerscanner:netsparker_scan_list')) elif scanner == 'webinspect': date_time = datetime.now() scan_dump = webinspect_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() tree = ET.parse(xml_file) root_xml = tree.getroot() webinspect_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return HttpResponseRedirect( reverse('webinspectscanner:webinspect_scan_list')) elif scanner == 'acunetix': date_time = datetime.now() scan_dump = acunetix_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() tree = ET.parse(xml_file) root_xml = tree.getroot() acunetix_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return HttpResponseRedirect( reverse('acunetixscanner:acunetix_scan_list')) elif scanner == 'dependencycheck': date_time = datetime.now() scan_dump = dependencycheck_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() data = etree.parse(xml_file) root = data.getroot() dependencycheck_report_parser.xml_parser(project_id=project_id, scan_id=scan_id, data=root) return HttpResponseRedirect( reverse('dependencycheck:dependencycheck_list')) elif scanner == 'findbugs': date_time = datetime.now() scan_dump = findbugs_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() tree = ET.parse(xml_file) root = tree.getroot() findbugs_report_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root) return HttpResponseRedirect(reverse('findbugs:findbugs_list')) elif scanner == 'nikto': date_time = datetime.now() scan_dump = nikto_result_db( date_time=date_time, scan_url=scan_url, scan_id=scan_id, project_id=project_id, ) scan_dump.save() nikto_html_parser(xml_file, project_id, scan_id) return HttpResponseRedirect(reverse('tools:nikto')) return render(request, 'upload_xml.html', {'all_project': all_project})
async def processor(self, msg): podAllowed = False mention = "" mention_len = "" try: firstname = msg['user']['firstName'] except: firstname = "N/A" try: lastname = msg['user']['lastName'] except: lastname = "N/A" displayName = msg['user']['displayName'] email = msg['user']['email'] userID = msg['user']['userId'] try: username = msg['user']['username'] except: username = "******" streamID = msg['stream']['streamId'] streamType = msg['stream']['streamType'] userFromid = UserClient.get_user_from_id(self, userID) userCompany = (userFromid['company']) logging.debug("--> User ID: " + str(userID) + " & full name: " + str(firstname) + " " + str(lastname)) try: logging.debug("--> User email: " + str(email) + " & username: "******" display Name: " + str(displayName)) except: logging.debug("--> User email: " + str(email) + " & displayName: " + str(displayName)) logging.debug("--> Stream Type: " + str(streamType) + " with stream ID: " + str(streamID)) logging.debug("--> User is from: \"" + userCompany + "\" pod") ## Normal message in the chat - no @mention of #hashtag nor $cashtag msg_xml = msg['message'] msg_root = ET.fromstring(msg_xml) msg_text = msg_root[0].text logging.debug(msg_text) try: ## Get the command send and check its lenght message_raw = self.sym_message_parser.get_text(msg) list_len = int(len(message_raw)) ## Adds the items to one variable var_raw = "" for l in range(list_len): var_raw += str(message_raw[l]) + " " message_reader = str(var_raw).replace("[", "").replace("'", "").replace( "]", "") logging.debug("message_reader: " + str(message_reader)) except: return await Help.help(self, msg) ## Getting @mention details try: mention_raw = self.sym_message_parser.get_mentions(msg) mention = str(mention_raw).replace("['", "").replace("', '", ", ").replace( "']", "") logging.debug("mentions, hashtags, cashtags: " + str(mention)) mention_split = str(mention).split(",") mention_len = len(str(mention_split[0])) firstMention = mention_split[0] logging.debug("firstMention: " + str(firstMention)) except: firstMention = mention logging.debug("No @mention", exc_info=True) """ This is to make sure the user is from the allowed pod(s) """ if userCompany in _config['allowedPod']: logging.debug("Inside allowed Pod(s), True") podAllowed = True else: podAllowed = False logging.debug("Outside allowed Pod(s), False") try: ## If within allowed Pod if podAllowed: ## Making sure the bot @mention is used and matches to respond back if str(firstMention) == str(_config['bot@Mention']): logging.debug("mention: " + str(mention)) commandName = str(message_reader)[int(mention_len) + 1:] logging.debug("commandName: " + str(commandName)) try: if "/all" in str(commandName): logging.info("Calling /all by " + str(displayName)) if audit_stream != "": self.botaudit = dict( message= """<messageML>Function /all called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) return await AtRoom.atRoom(self, msg) except: logging.error("/all is not working") traceback.print_exc() self.botaudit = dict( message= """<messageML>ERROR - Function /all called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) return logging.debug("/all is not working", exc_info=True) try: if "/whois" in str(commandName): logging.info("Calling /whois by " + str(displayName)) if audit_stream != "": self.botaudit = dict( message= """<messageML>Function /whois called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) msg_mentions = self.sym_message_parser.get_mention_ids( msg) return await Whois.whois(self, msg_mentions, msg) except: logging.error("/whois is not working") traceback.print_exc() if audit_stream != "": self.botaudit = dict( message= """<messageML>ERROR: Function /whois called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) return logging.debug("/whois is not working", exc_info=True) try: ## Help command when called via :@mention /help call if "/help" in str(commandName): logging.info("Calling /help by " + str(displayName)) if audit_stream != "": self.botaudit = dict( message= """<messageML>Function /help called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) return await Help.help(self, msg) except: logging.error("/help is not working") traceback.print_exc() if audit_stream != "": self.botaudit = dict( message= """<messageML>ERROR: Function /help called by <b>""" + str(displayName) + """</b> in """ + str(streamID) + """ (""" + str(streamType) + """)</messageML>""") self.bot_client.get_message_client().send_msg( audit_stream, self.botaudit) return logging.debug("Help is not working", exc_info=True) else: return logging.debug( "bot @mentioned does not match expected, or not calling bot command" ) else: return logging.debug("User is not from the allowed Pod(s)") except: traceback.print_exc() return logging.debug("bot @mentioned was not used", exc_info=True)
def test_get_latest_completed_build(self): """ Test the get_latest_completed_build function """ # Succeed when calling the get_latest_completed_build function with a # valid results_api and the http_get method returns an # ElementTree.Element which contains the provided app_id with patch.object( ResultsAPI, "http_get", return_value=test_constants. VALID_RESULTS_API_GETAPPBUILDS_RESPONSE_XML_PASSING_POLICY_COMPLIANCE_STATUS[ "Element"], ): with patch("veracode.check_compliance.element_contains_error", return_value=False): with patch("veracode.api.get_app_id", return_value="1337"): results_api = ResultsAPI(app_name="TestApp") output = check_compliance.get_latest_completed_build( results_api=results_api) expected = ElementTree.fromstring( b'<ns0:application xmlns:ns0="https://analysiscenter.veracode.com/schema/2.0/applicationbuilds" app_name="TestApp" app_id="1337" industry_vertical="Manufacturing" assurance_level="Very High" business_criticality="Very High" origin="Not Specified" modified_date="2019-08-13T14:00:10-04:00" cots="false" business_unit="Not Specified" tags="">\n <ns0:customfield name="Custom 1" value="" />\n <ns0:customfield name="Custom 2" value="" />\n <ns0:customfield name="Custom 3" value="" />\n <ns0:customfield name="Custom 4" value="" />\n <ns0:customfield name="Custom 5" value="" />\n <ns0:customfield name="Custom 6" value="" />\n <ns0:customfield name="Custom 7" value="" />\n <ns0:customfield name="Custom 8" value="" />\n <ns0:customfield name="Custom 9" value="" />\n <ns0:customfield name="Custom 10" value="" />\n <ns0:build version="2019-10 Testing" build_id="1234321" submitter="Jon Zeolla" platform="Not Specified" lifecycle_stage="Deployed (In production and actively developed)" results_ready="true" policy_name="Veracode Recommended Medium" policy_version="1" policy_compliance_status="Pass" rules_status="Pass" grace_period_expired="false" scan_overdue="false">\n <ns0:analysis_unit analysis_type="Static" published_date="2019-10-13T16:20:30-04:00" published_date_sec="1570998030" status="Results Ready" />\n </ns0:build>\n </ns0:application>\n' ) self.assertEqual([output.tag, output.attrib], [expected.tag, expected.attrib]) # However, return False when the element_contains_error function # returns True with patch("veracode.check_compliance.element_contains_error", return_value=True): self.assertFalse( check_compliance.get_latest_completed_build( results_api=results_api)) # Return False when calling the get_latest_completed_build function # with a valid results_api and the http_get method returns an # ElementTree.Element which doesn't contain the provided app_id with patch.object( ResultsAPI, "http_get", return_value=test_constants. VALID_RESULTS_API_GETAPPBUILDS_RESPONSE_XML_PASSING_POLICY_COMPLIANCE_STATUS[ "Element"], ): with patch("veracode.check_compliance.element_contains_error", return_value=False): with patch("veracode.api.get_app_id", return_value="31337"): results_api = ResultsAPI(app_name="TestApp") output = check_compliance.get_latest_completed_build( results_api=results_api) self.assertFalse(output) # Return False when calling the get_latest_completed_build function # with a valid results_api and the http_get method raises one of a # series of exceptions with patch("veracode.api.get_app_id", return_value="1337"): results_api = ResultsAPI(app_name="TestApp") for err in [ HTTPError, ConnectionError, Timeout, TooManyRedirects, RequestException, ]: with patch( "veracode.check_compliance.element_contains_error", return_value=False, ): with patch.object(ResultsAPI, "http_get", side_effect=err): output = check_compliance.get_latest_completed_build( results_api=results_api) self.assertFalse(output)
def parse_assembly_meta(accession): """Return dict of metadata values for an assembly.""" LOGGER.info("Fetching assembly metadata") meta = { "assembly": { "accession": accession }, "busco": { "lineages": [] }, "reads": { "paired": [], "single": [] }, "revision": 0, "settings": { "tmp": "/tmp", "blast_chunk": 100000, "blast_max_chunks": 10, "blast_overlap": 0, "blast_min_length": 1000, "stats_chunk": 1000, "stats_windows": [0.1, 0.01, 100000, 1000000], }, "similarity": { "defaults": { "evalue": 1.0e-10, "import_evalue": 1.0e-25, "max_target_seqs": 10, "taxrule": "buscogenes", }, "diamond_blastx": { "name": "reference_proteomes" }, "diamond_blastp": { "name": "reference_proteomes", "import_max_target_seqs": 100000, "taxrule": "blastp=buscogenes", }, "blastn": { "name": "nt" }, }, "taxon": {}, "version": 1, } xml = fetch_assembly_meta_xml(accession) root = ET.fromstring(xml) asm = root.find("ASSEMBLY") meta["assembly"]["bioproject"] = deep_find_text( asm, ("STUDY_REF", "IDENTIFIERS", "PRIMARY_ID")) meta["assembly"]["biosample"] = deep_find_text( asm, ("SAMPLE_REF", "IDENTIFIERS", "PRIMARY_ID")) meta["taxon"]["taxid"] = deep_find_text(asm, ("TAXON", "TAXON_ID")) meta["taxon"]["name"] = deep_find_text(asm, ("TAXON", "SCIENTIFIC_NAME")) meta["assembly"]["level"] = asm.find("ASSEMBLY_LEVEL").text meta["assembly"]["alias"] = asm.attrib["alias"] wgs_prefix = deep_find_text(asm, ("WGS_SET", "PREFIX")) wgs_version = deep_find_text(asm, ("WGS_SET", "VERSION")) if wgs_prefix and wgs_version: meta["assembly"]["prefix"] = "%s%s" % (wgs_prefix, wgs_version.zfill(2)) elif " " not in meta["assembly"]["alias"]: meta["assembly"]["prefix"] = meta["assembly"]["alias"].replace( ".", "_") else: meta["assembly"]["prefix"] = meta["assembly"]["accession"].replace( ".", "_") attributes = asm.find("ASSEMBLY_ATTRIBUTES") for attribute in attributes.findall("ASSEMBLY_ATTRIBUTE"): if attribute.find("TAG").text == "total-length": meta["assembly"]["span"] = int(attribute.find("VALUE").text) elif attribute.find("TAG").text == "scaffold-count": meta["assembly"]["scaffold-count"] = int( attribute.find("VALUE").text) return meta
def decorated_function(*args, **kwargs): # Check session hash if session.get("CharacterOwnerHash", None): db_user = g.mongo.db.users.find_one( {"_id": session["CharacterOwnerHash"]}) else: # Redirect if not logged in if "forum" in roles: session["redirect"] = "forum" session["client_id"] = request.args.get("client_id") session["redirect_uri"] = request.args.get("redirect_uri") session["response_type"] = request.args.get( "response_type") session["scope"] = request.args.get("scope") session["state"] = request.args.get("state") else: session["redirect"] = request.path return redirect(url_for("auth.sso_redirect")) # Check cache if not db_user: # Redirect if user doesn't exist if "forum" in roles: session["redirect"] = "forum" else: session["redirect"] = request.path return redirect(url_for("auth.sso_redirect")) elif db_user["cached_until"] < time.time(): # Refresh character if cache expires. # XML Character xml_char_payload = {"characterID": db_user["character_id"]} xml_char_headers = {"User-Agent": user_agent} xml_char_response = requests.get( "https://api.eveonline.com/eve/CharacterInfo.xml.aspx", data=xml_char_payload, headers=xml_char_headers) # XML Parse xml_tree = ElementTree.fromstring(xml_char_response.text) # Update Database xml_time_pattern = "%Y-%m-%d %H:%M:%S" g.mongo.db.users.update({"_id": db_user["_id"]}, { "$set": { "corporation_id": int(xml_tree[1][7].text), "corporation_name": xml_tree[1][8].text, "alliance_id": int(float(xml_tree[1][10].text)), "alliance_name": xml_tree[1][11].text, "last_sign_on": int(time.time()), "cached_until": int( calendar.timegm( time.strptime(xml_tree[2].text, xml_time_pattern))) } }) # Refresh db_user db_user = g.mongo.db.users.find_one( {"_id": session["CharacterOwnerHash"]}) # Update UI # !!Warning: Use these variables for UI ONLY. Not to be used for page auth!! # with open("configs/base.json", "r") as base_config_file: base_config = json.load(base_config_file) if db_user["corporation_id"] == base_config["corporation_id"]: session["UI_Corporation"] = True else: session["UI_Corporation"] = False forum_edit(db_user, "log_out") if db_user["alliance_id"] == base_config["alliance_id"]: session["UI_Alliance"] = True else: session["UI_Alliance"] = False # Update UI after cache check session["UI_Roles"] = [] for role_ui in g.mongo.db.eve_auth.find(): if session["CharacterOwnerHash"] in role_ui["users"]: session["UI_Roles"].append(role_ui["_id"]) # Super User db_super_admins = g.mongo.db.eve_auth.find_one( {"_id": "super_admin"}) if db_super_admins and session[ "CharacterOwnerHash"] in db_super_admins["users"]: session["UI_Roles"] = [] for role_ui in g.mongo.db.eve_auth.find(): session["UI_Roles"].append(role_ui["_id"]) # Auth check after checking if user exists and updating cache if necessary if not any([auth_check(x) for x in roles]) and "forum" not in roles: abort(403) return function(*args, **kwargs)
def zap_result_save(self, all_vuln, project_id, un_scanid): """ The function save all data in Archery Database :param all_vuln: :param project_id: :param un_scanid: :return: """ # global name, attack, wascid, description, reference, \ # reference, sourceid, \ # solution, \ # param, \ # method, url, messageId, alert, pluginId, other, evidence, cweid, risk, vul_col # for data in all_vuln: # for key, value in data.items(): # if key == 'name': # name = value # # if key == 'attack': # attack = value # # if key == 'wascid': # wascid = value # # if key == 'description': # description = value # # if key == 'reference': # reference = value # # if key == 'sourceid': # sourceid = value # # if key == 'solution': # solution = value # # if key == 'param': # param = value # # if key == 'method': # method = value # # if key == 'url': # url = value # # if key == 'pluginId': # pluginId = value # # if key == 'other': # other = value # # if key == 'alert': # alert = value # # if key == 'attack': # attack = value # # if key == 'messageId': # messageId = value # # if key == 'evidence': # evidence = value # # if key == 'cweid': # cweid = value # # if key == 'risk': # risk = value # if risk == "High": # vul_col = "danger" # risk = "High" # elif risk == 'Medium': # vul_col = "warning" # risk = "Medium" # elif risk == 'info': # vul_col = "info" # risk = "Low" # elif risk == 'Informational': # vul_col = "info" # risk = "Informational" # # dup_data = name + risk # duplicate_hash = hashlib.sha256(dup_data.encode('utf-8')).hexdigest() # match_dup = zap_scan_results_db.objects.filter( # dup_hash=duplicate_hash).values('dup_hash').distinct() # lenth_match = len(match_dup) # # if lenth_match == 1: # duplicate_vuln = 'Yes' # elif lenth_match == 0: # duplicate_vuln = 'No' # else: # duplicate_vuln = 'None' # # false_p = zap_scan_results_db.objects.filter( # false_positive_hash=duplicate_hash) # fp_lenth_match = len(false_p) # # if fp_lenth_match == 1: # false_positive = 'Yes' # else: # false_positive = 'No' # # vuln_id = uuid.uuid4() # dump_data = zap_scan_results_db(vuln_id=vuln_id, # vuln_color=vul_col, # scan_id=un_scanid, # project_id=project_id, # confidence=confidence, # wascid=wascid, # risk=risk, # reference=reference, # url=url, # name=name, # solution=solution, # param=url, # sourceid=sourceid, # pluginId=pluginId, # alert=alert, # description=description, # false_positive=false_positive, # rescan='No', # vuln_status='Open', # dup_hash=duplicate_hash, # vuln_duplicate=duplicate_vuln, # evidence=evidence, # ) # dump_data.save() # full_data = [] # key = 'Evidence' # instance = key + ': ' + "NA" # # full_data.append(instance) # removed_list_data = ','.join(full_data) # zap_scan_results_db.objects.filter(vuln_id=vuln_id).update(param=removed_list_data) # # zap_all_vul = zap_scan_results_db.objects.filter(scan_id=un_scanid, false_positive='No') # # total_high = len(zap_all_vul.filter(risk="High")) # total_medium = len(zap_all_vul.filter(risk="Medium")) # total_low = len(zap_all_vul.filter(risk="Low")) # total_info = len(zap_all_vul.filter(risk="Informational")) # total_duplicate = len(zap_all_vul.filter(vuln_duplicate='Yes')) # total_vul = total_high + total_medium + total_low + total_info # # zap_scans_db.objects.filter(scan_scanid=un_scanid) \ # .update(total_vul=total_vul, # high_vul=total_high, # medium_vul=total_medium, # low_vul=total_low, # info_vul=total_info, # total_dup=total_duplicate, # ) root_xml = ET.fromstring(all_vuln) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode('ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) zap_xml_parser.xml_parser(project_id=project_id, scan_id=un_scanid, root=root_xml_en) self.zap.core.delete_all_alerts()
def hbci_transactions(event, conf): try: from defusedxml import ElementTree except: from xml.etree import ElementTree log = [] data = [] accname = event.id + '_' + str(int(time.time())) try: try: subprocess.call([ 'aqhbci-tool4', 'deluser', '-a', '--all', '-b', conf['hbci_blz'], '-u', conf['hbci_userid'] ]) except subprocess.CalledProcessError: pass aqhbci_params = [ 'aqhbci-tool4', 'adduser', '-N', accname, '-b', conf['hbci_blz'], '-s', conf['hbci_server'], '-t', conf['hbci_tokentype'], '-u', conf['hbci_userid'] ] if conf['hbci_customerid']: aqhbci_params += ['-c', conf['hbci_customerid']] if conf['hbci_tokenname']: aqhbci_params += ['-n', conf['hbci_tokenname']] if conf['hbci_version']: aqhbci_params += ['--hbciversion=' + str(conf['hbci_version'])] aqhbci_add = subprocess.check_output(aqhbci_params) log.append("$ " + " ".join(aqhbci_params)) log.append(aqhbci_add.decode("utf-8")) with tempfile.NamedTemporaryFile() as f, tempfile.NamedTemporaryFile( ) as g: f.write(('PIN_%s_%s = "%s"\n' % ( conf['hbci_blz'], conf['hbci_userid'], conf['pin'], )).encode("utf-8")) f.flush() aqhbci_params = [ 'aqhbci-tool4', '-P', f.name, '-n', '-A', 'getsysid' ] aqhbci_test = subprocess.check_output(aqhbci_params) log.append("$ " + " ".join(aqhbci_params)) log.append(aqhbci_test.decode("utf-8")) aqbanking_params = [ 'aqbanking-cli', '-P', f.name, '-A', '-n', 'request', '--transactions', '-c', g.name ] aqbanking_trans = subprocess.check_output(aqbanking_params) log.append("$ " + " ".join(aqbanking_params)) log.append(aqbanking_trans.decode("utf-8")) aqbanking_params = [ 'aqbanking-cli', 'listtrans', '-c', g.name, '--exporter=xmldb', ] aqbanking_conv = subprocess.check_output(aqbanking_params) log.append("$ " + " ".join(aqbanking_params)) root = ElementTree.fromstring(aqbanking_conv) trans_list = root.find('accountInfoList').find('accountInfo').find( 'transactionList') for trans in trans_list.findall('transaction'): payer = [] for child in trans: if child.tag.startswith('remote'): payer.append(child.find('value').text) date = '%s-%02d-%02d' % (trans.find('date').find('date').find( 'year').find('value').text, int( trans.find('date').find('date'). find('month').find('value').text), int( trans.find('date').find('date'). find('day').find('value').text)) value = trans.find('value').find('value').find('value').text if "/" in value: parts = value.split("/") num = int(parts[0]) denom = int(parts[1]) value = Decimal(num) / Decimal(denom) value = str(value.quantize(Decimal('.01'))) data.append({ 'payer': "\n".join(payer), 'reference': trans.find('purpose').find('value').text, 'amount': value, 'date': date }) except subprocess.CalledProcessError as e: log.append("Command %s failed with %d and output:" % (e.cmd, e.returncode)) log.append(e.output.decode("utf-8")) except Exception as e: log.append(str(e)) finally: subprocess.call(['aqhbci-tool4', 'deluser', '-a', '-N', accname]) log = "\n".join(log) return data, log
def test_bad_char_rejection(trivial): with trivial.open_metadata() as xmp: xmp['dc:description'] = 'Bad characters \x00 \x01 \x02' xmp['dc:creator'] = ['\ue001bad', '\ufff0bad'] ET.fromstring(str(xmp))
def store_blobs( # noqa: C901 destination_path, blob_data, content_type, should_apply_pii_filter): attribute_option, schema = None, None allowed_attributes = [] if content_type == "application/json": url, method = ( request.base_url.replace(request.host_url, "/"), request.method.lower(), ) json_schema = None try: json_schema = current_app.paths[url][method]["requestBody"][ "content"]["application/json"]["schema"]["$ref"] except KeyError: pass if json_schema and current_app.schemas: schema = current_app.schemas[json_schema.split("/")[-1]] attribute_option = schema.get("x-strict-attributes") if schema and attribute_option == "strict": def _get_schema_keys(dictionary): for key, value in dictionary.items(): if type(value) is dict and key != "properties": allowed_attributes.append(key) if type(value) is dict: _get_schema_keys(value) _get_schema_keys(schema) if content_type in [ "text/xml", "application/xml", "application/xml-external-parsed-entity", "text/xml-external-parsed-entity", "application/xml-dtd", ]: # Run blob_data through defusedxml's ElementTree first to mitigate exposure from XML attacks safe_xml_tree = defusedxml_ET.fromstring(blob_data) # Run safe_xml_tree through lxml's ElementTree second to process xml_tree = ET.fromstring( defusedxml_ET.tostring(safe_xml_tree)) # nosec # Run through elements and select the local name for each tag to clean data of extra exposed namespaces for elem in xml_tree.getiterator(): elem.tag = ET.QName(elem).localname blob_data = ET.tostring(xml_tree) if attribute_option == "strict" and allowed_attributes: logging.info("Running with strict attribute rules") if type(blob_data) != list and type(blob_data) != dict: blob_data = json.loads(blob_data) blob_data = json.dumps( clear_keys_from_list(blob_data, allowed_attributes)) if should_apply_pii_filter and (not attribute_option or attribute_option in ["filter", "strict"]): if type(blob_data) != list and type(blob_data) != dict: blob_data_to_filter = json.loads(blob_data) else: blob_data_to_filter = blob_data blob_data_pii = json.dumps( apply_pii_filter(blob_data_to_filter, current_app.__pii_filter_def__)) else: blob_data_pii = blob_data if type(blob_data) == list or type(blob_data) == dict: blob_data = json.dumps(blob_data) logging.info("Storing blob data on path: {}".format(destination_path)) for cs in current_app.cloudstorage: cs.storeBlob(destination_path, blob_data_pii, content_type) for cs in current_app.cloudlogstorage: cs.storeBlob(destination_path, blob_data, content_type)
def get_bugzilla_bug(bugzilla_url, bug_id): bug_xml = _fetch_bug_content(bugzilla_url, bug_id) tree = ElementTree.fromstring(bug_xml) return tree.find("bug")
def post(self, request, format=None): project_id = request.data.get("project_id") scanner = request.data.get("scanner") file = request.data.get("filename") scan_url = request.data.get("scan_url") scan_id = uuid.uuid4() scan_status = "100" if scanner == "zap_scan": date_time = datetime.datetime.now() scan_dump = zap_scans_db(scan_url=scan_url, scan_scanid=scan_id, date_time=date_time, project_id=project_id, vul_status=scan_status, rescan='No') scan_dump.save() root_xml = ET.fromstring(file) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) zap_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml_en, source='parser') return Response({ "message": "ZAP Scan Data Uploaded", "scanner": scanner, "project_id": project_id, "scan_id": scan_id }) elif scanner == "burp_scan": date_time = datetime.datetime.now() scan_dump = burp_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() # Burp scan XML parser root_xml = ET.fromstring(file) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) burp_xml_parser.burp_scan_data(root_xml_en, project_id, scan_id) return Response({ "message": "Burp Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == "arachni": date_time = datetime.datetime.now() scan_dump = arachni_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) arachni_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == "acunetix": date_time = datetime.datetime.now() scan_dump = acunetix_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) acunetix_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml_en) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'netsparker': date_time = datetime.datetime.now() scan_dump = netsparker_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) netsparker_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'webinspect': date_time = datetime.datetime.now() scan_dump = webinspect_scan_db(url=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) webinspect_xml_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'banditscan': date_time = datetime.datetime.now() scan_dump = bandit_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() data = json.loads(file) bandit_report_json(data=data, project_id=project_id, scan_id=scan_id) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'dependencycheck': date_time = datetime.datetime.now() scan_dump = dependencycheck_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() xml_dat = bytes(bytearray(file, encoding='utf-8')) data = etree.XML(xml_dat) dependencycheck_report_parser.xml_parser(project_id=project_id, scan_id=scan_id, data=data) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'findbugs': date_time = datetime.datetime.now() scan_dump = findbugs_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) findbugs_report_parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'clair': date_time = datetime.datetime.now() scan_dump = clair_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() data = json.loads(file) clair_json_report_parser.clair_report_json(project_id=project_id, scan_id=scan_id, data=data) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'trivy': date_time = datetime.datetime.now() scan_dump = trivy_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() data = json.loads(file) trivy_json_report_parser.trivy_report_json(project_id=project_id, scan_id=scan_id, data=data) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'inspec': date_time = datetime.datetime.now() scan_dump = inspec_scan_db(project_name=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() data = json.loads(file) inspec_json_parser.inspec_report_json(project_id=project_id, scan_id=scan_id, data=data) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'nessus': date_time = datetime.datetime.now() scan_dump = nessus_scan_db(scan_ip=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) Nessus_Parser.nessus_parser( root=root_xml_en, scan_id=scan_id, project_id=project_id, ) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'openvas': date_time = datetime.datetime.now() scan_dump = scan_save_db(scan_ip=scan_url, scan_id=scan_id, date_time=date_time, project_id=project_id, scan_status=scan_status) scan_dump.save() root_xml = ET.fromstring(file) en_root_xml = ET.tostring(root_xml, encoding='utf8').decode( 'ascii', 'ignore') root_xml_en = ET.fromstring(en_root_xml) OpenVas_Parser.xml_parser(project_id=project_id, scan_id=scan_id, root=root_xml_en) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) elif scanner == 'nikto': date_time = datetime.datetime.now() scan_dump = nikto_result_db( date_time=date_time, scan_url=scan_url, scan_id=scan_id, project_id=project_id, ) scan_dump.save() nikto_html_parser(file, project_id, scan_id) return Response({ "message": "Scan Data Uploaded", "project_id": project_id, "scan_id": scan_id, "scanner": scanner }) return Response({"message": "Scan Data Uploaded"})
async def _find_around_me_handle_requet( self, oef_message: OefSearchMessage, oef_search_dialogue: OefSearchDialogue, radius: float, params: Dict[str, List[str]], ) -> None: """ Find agents around me. :param oef_message: OefSearchMessage :param oef_search_dialogue: OefSearchDialogue :param radius: the radius in which to search :param params: the parameters for the query :return: None """ assert self.in_queue is not None, "Inqueue not set!" self.logger.debug("Searching in radius={} of myself".format(radius)) response_text = await self._generic_oef_command( "find_around_me", { "range_in_km": [str(radius)], **params }) root = ET.fromstring(response_text) agents = {key: {} for key in self.SUPPORTED_CHAIN_IDENTIFIERS } # type: Dict[str, Dict[str, str]] agents_l = [] # type: List[str] for agent in root.findall(path=".//agent"): chain_identifier = "" for identities in agent.findall("identities"): for identity in identities.findall("identity"): for ( chain_identifier_key, chain_identifier_name, ) in identity.items(): if chain_identifier_key == "chain_identifier": chain_identifier = chain_identifier_name agent_address = identity.text agent_distance = agent.find("range_in_km").text if chain_identifier in agents: agents[chain_identifier][agent_address] = agent_distance agents_l.append(agent_address) message = OefSearchMessage( performative=OefSearchMessage.Performative.SEARCH_RESULT, dialogue_reference=oef_search_dialogue.dialogue_label. dialogue_reference, agents=tuple(agents_l), target=oef_message.message_id, message_id=oef_message.message_id + 1, ) message.counterparty = oef_message.counterparty assert oef_search_dialogue.update(message) envelope = Envelope( to=message.counterparty, sender=SOEFConnection.connection_id.latest, protocol_id=message.protocol_id, message=message, ) await self.in_queue.put(envelope)
def from_xml(cls, text): """Initalise JRD with XML plaintext. args: text - XML text to parse. Must be a string. """ XMLNSMAP = {"XRD": 'http://docs.oasis-open.org/ns/xri/xrd-1.0'} def parse_properties(node): ret = {} properties = node.findall("XRD:Property", XMLNSMAP) if not properties: return ret for property in properties: if "type" not in property.attrib: raise WebFingerJRDError("type is required with property") key = property.attrib["type"] has_nil = property.attrib.get("xsi:nil", "").lower() if has_nil and has_nil == "true": value = None else: value = property.text ret[key] = value return ret try: root = DefusedElementTree.fromstring(text) except Exception as e: raise WebFingerXRDERror("error parsing XRD XML") from e subject = root.find("XRD:Subject", XMLNSMAP) if subject is None: raise WebFingerXRDError("subject is required") jrd = {"subject": subject.text} aliases = root.findall("XRD:Alias", XMLNSMAP) if aliases: aliases_jrd = jrd["aliases"] = [] for alias in aliases: if not alias.text: raise WebFingerXRDError("alias had no content") aliases_jrd.append(alias.text) properties = parse_properties(root) if properties: jrd["properties"] = properties links = root.findall("XRD:Link", XMLNSMAP) if links: links_jrd = jrd["links"] = [] for link in links: link_jrd = {} # Retrieve basic attributes for attrib, value in link.attrib.items(): link_jrd[attrib] = value # Properties properties = parse_properties(link) if properties: link_jrd["properties"] = properties # Titles titles = link.findall("XRD:Title", XMLNSMAP) if titles: titles_jrd = jrd["titles"] = {} for title in titles: lang = title.attrib.get("xml:lang", "und") title = title.text titles_jrd[title] = lang links_jrd.append(link_jrd) # TODO - any other elements return cls(jrd)
def updatetemplate(checklistid, taskid): from flask import request from defusedxml import ElementTree as etree from FreeTAKServer.controllers.SpecificCoTControllers.SendExcheckUpdateController import SendExcheckUpdateController from FreeTAKServer.controllers.XMLCoTController import XMLCoTController from FreeTAKServer.model.FTSModel.Event import Event from FreeTAKServer.model.RawCoT import RawCoT import uuid import hashlib data = request.data xml = etree.parse( str( PurePath(Path(MainConfig.ExCheckChecklistFilePath), Path(checklistid + '.xml')))).getroot() updatedTask = etree.fromstring(data) tasks = xml.find('checklistTasks') for task in tasks: uid = task.find('uid') if uid.text == taskid: tasks.replace(task, updatedTask) else: pass with open( str( PurePath(Path(MainConfig.ExCheckChecklistFilePath), Path(checklistid + '.xml'))), 'w+') as file: file.write(etree.tostring(xml).decode()) file.close() # Create Object to send to client object = Event.ExcheckUpdate() object.setuid(str(uuid.uuid4())) object.setversion('2.0') object.detail.mission.settype("CHANGE") object.detail.mission.settool("ExCheck") object.detail.mission.setname(checklistid) object.detail.mission.setauthorUid(request.args.get("clientUid")) object.detail.mission.MissionChanges.MissionChange.creatorUid.setINTAG( request.args.get("clientUid")) object.detail.mission.MissionChanges.MissionChange.missionName.setINTAG( checklistid) object.detail.mission.MissionChanges.MissionChange.type.setINTAG( "ADD_CONTENT") object.detail.mission.MissionChanges.MissionChange.contentResource.filename.setINTAG( taskid + '.xml') object.detail.mission.MissionChanges.MissionChange.contentResource.hash.setINTAG( str( hashlib.sha256( str( open( MainConfig.ExCheckChecklistFilePath + '/' + checklistid + '.xml', 'r')).encode()).hexdigest())) object.detail.mission.MissionChanges.MissionChange.contentResource.keywords.setINTAG( 'Task') object.detail.mission.MissionChanges.MissionChange.contentResource.name.setINTAG( taskid) object.detail.mission.MissionChanges.MissionChange.contentResource.size.setINTAG( str(len(data))) #TODO: change this value object.detail.mission.MissionChanges.MissionChange.contentResource.submitter.setINTAG( 'atak') object.detail.mission.MissionChanges.MissionChange.contentResource.uid.setINTAG( taskid) '''object = etree.fromstring(templateex) object.uid = uuid.uuid4() object.find('detail').find('mission').type= "CHANGE" object.find('detail').find('mission').name = taskid object.find('detail').find('mission').Uid = request.args.get("clientUid") object.find('detail').find('mission').find('MissionChanges').find('MissionChange').find('creatorUid').text = request.args.get("clientUid") object.find('detail').find('mission').find('MissionChanges').find('MissionChange').find('missionName').text = taskid object.find('detail').find('mission').find('MissionChanges').find('MissionChange').find('filename').text = checklistid+'.xml' object.detail.mission.MissionChanges.MissionChange.contentResource.hash.setINTAG(str(hashlib.sha256(str(data).encode()).hexdigest())) object.detail.mission.MissionChanges.MissionChange.contentResource.keywords.setINTAG('Task') object.detail.mission.MissionChanges.MissionChange.contentResource.name.setINTAG(checklistid) object.detail.mission.MissionChanges.MissionChange.contentResource.size.setINTAG(str(len(data))) #TODO: change this value object.detail.mission.MissionChanges.MissionChange.contentResource.submitter.setINTAG('test') object.detail.mission.MissionChanges.MissionChange.contentResource.uid.setINTAG(checklistid)''' rawcot = RawCoT() xml = XMLCoTController().serialize_model_to_CoT(object) rawcot.xmlString = xml PIPE.put(rawcot) #PIPE.send() return '', 200
returnvalue = self.dest[self.__index] return returnvalue else: self.__index = 0 return self.dest[self.__index] except IndexError: self.__index = 0 return self.dest[self.__index] def setdest(self, Dest=None): if not Dest: Dest = DestObject.geochat() self.dest.append(DestObject.geochat()) self.__index += 1 if __name__ == "__main__": from FreeTAKServer.controllers.XMLCoTController import XMLCoTController from defusedxml import ElementTree as etree a = Marti.other() b = etree.fromstring(b'<marti><dest callsign = "bbbb"/></marti>') x = XMLCoTController().serialize_CoT_to_model(a, b) y = x.getdest().callsign print(a.__dict__) M = DestObject() M.setcallsign('13243432w') a.setdest(M) for x in a.dest: f = a.getdest() print('done')
def parse(string): root = ET.fromstring(string) conference_element = root.find('conference') conference = Conference( title=conference_element.find('title').text, start=datetime.strptime( conference_element.find('start').text, '%Y-%m-%d'), end=datetime.strptime( conference_element.find('end').text, '%Y-%m-%d'), days=int(conference_element.find('days').text), day_change=conference_element.find('day_change').text, city=conference_element.find('city').text, timeslot_duration=conference_element.find( 'timeslot_duration').text, venue=conference_element.find('venue').text, ) for day_element in root.findall('day'): day = Day(date=datetime.strptime(day_element.get('date'), '%Y-%m-%d'), index=int(day_element.get('index'))) for room_element in day_element.findall('room'): room = Room(name=room_element.get('name')) for event_element in room_element.findall('event'): event = Event(id=int(event_element.get('id')), date=day.date, start=get_text(event_element, 'start'), duration=get_text(event_element, 'duration'), track=get_text(event_element, 'track'), abstract=get_text(event_element, 'abstract'), title=get_text(event_element, 'title'), type=get_text(event_element, 'type'), description=get_text(event_element, 'description'), conf_url=get_text(event_element, 'conf_url'), full_conf_url=get_text( event_element, 'conf_url'), level=get_text(event_element, 'level')) persons_element = event_element.find('persons') for person_element in persons_element.findall('person'): person = Person( id=int(person_element.get('id')), name=person_element.text, ) event.add_person(person) links_element = event_element.find('links') if links_element: for link_element in links_element.findall('link'): link_url = link_element.get('href') if not event.video_url and ( 'mp4' in link_url or 'webm' in link_url or 'youtube' in link_url or 'avi' in link_url): event.video_url = link_url if not event.audio_url and ( 'mp3' in link_url or 'wav' in link_url or 'soundcloud' in link_url): event.audio_url = link_url if not event.slides_url and ( 'ppt' in link_url or 'pptx' in link_url or 'slide' in link_url): event.slides_url = link_url room.add_event(event) day.add_room(room) conference.add_day(day) return conference