Ejemplo n.º 1
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/5 16:09
# @Author : chenxin
# @Site : 
# @File : xml创建.py
# @Software: PyCharm

import xml.etree.ElementTree as ET

new_xml = ET.Element("personinfolist")
personinfo = ET.SubElement(new_xml, "personinfo", attrib={"enrolled": "yes"})
name= ET.SubElement(personinfo, "name",attrib={'checked':'no'})
name.text = 'alex'

age = ET.SubElement(personinfo, "age", attrib={"checked": "no"})
sex = ET.SubElement(personinfo, "sex")
age.text = '33'
personinfo2 = ET.SubElement(new_xml, "personinfo", attrib={"enrolled": "no"})
name = ET.SubElement(personinfo2, "name")
name.text = 'oldboy'
age = ET.SubElement(personinfo2,'age')
age.text = '19'

et = ET.ElementTree(new_xml)  # 生成文档对象
et.write("test.txt.xml", encoding="utf-8", xml_declaration=True)

ET.dump(new_xml)  # 打印生成的格式

Ejemplo n.º 2
0
def get(id) -> dict:
    '''
        Connects to ARES, receives XML response, parse useful data
        returns - dict {'name': subject_name, 'ic': subject_ic, 'town': subject_town,
                'street':ubject_street, 'house_no': .subject_house_no, 'zipcode': subject_zipcode, 'pf': subject_pf}
                or Not found
        '''
    response = requests.get(
        f"https://wwwinfo.mfcr.cz/cgi-bin/ares/darv_std.cgi?ico={id}")
    xml = ET.fromstring(response.content.decode('utf-8'))
    tree = ET.ElementTree(xml)
    root = tree.getroot()

    subject_type: str = None
    subject_name: str = None
    subject_id: str = None
    subject_town: str = None
    subject_town_part: str = None
    subject_street: str = None
    subject_house_no: str = None
    subject_zipcode: str = None

    try:
        for tp in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}Kod_PF'
        ):
            subject_type = gst.get_subject_type(tp.text)
        for name in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_answer/v_1.0.1}Obchodni_firma'
        ):
            subject_name = name.text
        for ico in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_answer/v_1.0.1}ICO'
        ):
            subject_id = ico.text
        for town in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}Nazev_obce'
        ):
            subject_town = town.text
        for town_part in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}Nazev_casti_obce'
        ):
            subject_town_part = town_part.text
        for street in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}Nazev_ulice'
        ):
            subject_street = street.text
        for house_no in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}Cislo_domovni'
        ):
            subject_house_no = house_no.text
        for zipcode in root.iter(
                '{http://wwwinfo.mfcr.cz/ares/xml_doc/schemas/ares/ares_datatypes/v_1.0.4}PSC'
        ):
            subject_zipcode = zipcode.text

        if not subject_street:
            subject_street = subject_town_part

        return {
            'name': subject_name,
            'id': subject_id,
            'town': subject_town,
            'street': subject_street,
            'house_no': subject_house_no,
            'zipcode': subject_zipcode,
            'type': subject_type
        }
    except Exception as e:
        return {"Not found": e}
Ejemplo n.º 3
0
import sys
import xml.etree.ElementTree as etree

# https://www.hackerrank.com/challenges/xml-1-find-the-score

count = int(sys.stdin.readline().strip())
data = ""
for x in xrange(count):
    data += sys.stdin.readline().strip()

tree = etree.ElementTree(etree.fromstring(data))

score = 0

for elem in tree.iter():
    score += len(elem.attrib)

print score
Ejemplo n.º 4
0
def passwords_xml():
    """
    To add network credentials to Kodi's password xml
    """
    path = path_ops.translate_path('special://userdata/')
    xmlpath = "%spasswords.xml" % path
    try:
        xmlparse = defused_etree.parse(xmlpath)
    except IOError:
        # Document is blank or missing
        root = etree.Element('passwords')
        skip_find = True
    except etree.ParseError:
        LOG.error('Error parsing %s', xmlpath)
        # "Kodi cannot parse {0}. PKC will not function correctly. Please visit
        # {1} and correct your file!"
        messageDialog(
            lang(29999),
            lang(39716).format('passwords.xml', 'http://forum.kodi.tv/'))
        return
    else:
        root = xmlparse.getroot()
        skip_find = False

    credentials = settings('networkCreds')
    if credentials:
        # Present user with options
        option = dialog('select', "Modify/Remove network credentials",
                        ["Modify", "Remove"])

        if option < 0:
            # User cancelled dialog
            return

        elif option == 1:
            # User selected remove
            success = False
            for paths in root.getiterator('passwords'):
                for path in paths:
                    if path.find('.//from').text == "smb://%s/" % credentials:
                        paths.remove(path)
                        LOG.info("Successfully removed credentials for: %s",
                                 credentials)
                        etree.ElementTree(root).write(xmlpath,
                                                      encoding="UTF-8")
                        success = True
            if not success:
                LOG.error("Failed to find saved server: %s in passwords.xml",
                          credentials)
                dialog('notification',
                       heading='{plex}',
                       message="%s not found" % credentials,
                       icon='{warning}',
                       sound=False)
                return
            settings('networkCreds', value="")
            dialog('notification',
                   heading='{plex}',
                   message="%s removed from passwords.xml" % credentials,
                   icon='{plex}',
                   sound=False)
            return

        elif option == 0:
            # User selected to modify
            server = dialog('input', "Modify the computer name or ip address",
                            credentials)
            if not server:
                return
    else:
        # No credentials added
        messageDialog(
            "Network credentials",
            'Input the server name or IP address as indicated in your plex '
            'library paths. For example, the server name: '
            '\\\\SERVER-PC\\path\\ or smb://SERVER-PC/path is SERVER-PC')
        server = dialog('input', "Enter the server name or IP address")
        if not server:
            return
        server = quote_plus(server)

    # Network username
    user = dialog('input', "Enter the network username")
    if not user:
        return
    user = quote_plus(user)
    # Network password
    password = dialog(
        'input',
        "Enter the network password",
        '',  # Default input
        type='{alphanum}',
        option='{hide}')
    # Need to url-encode the password
    password = quote_plus(password)
    # Add elements. Annoying etree bug where findall hangs forever
    if skip_find is False:
        skip_find = True
        for path in root.findall('.//path'):
            if path.find(
                    './/from').text.lower() == "smb://%s/" % server.lower():
                # Found the server, rewrite credentials
                path.find('.//to').text = ("smb://%s:%s@%s/" %
                                           (user, password, server))
                skip_find = False
                break
    if skip_find:
        # Server not found, add it.
        path = etree.SubElement(root, 'path')
        etree.SubElement(path, 'from', attrib={'pathversion': "1"}).text = \
            "smb://%s/" % server
        topath = "smb://%s:%s@%s/" % (user, password, server)
        etree.SubElement(path, 'to', attrib={'pathversion': "1"}).text = topath

    # Add credentials
    settings('networkCreds', value="%s" % server)
    LOG.info("Added server: %s to passwords.xml", server)
    # Prettify and write to file
    indent(root)
    etree.ElementTree(root).write(xmlpath, encoding="UTF-8")
Ejemplo n.º 5
0
    def print_xml_results(self, filename):
        root = ET.Element("report")
        hdr = ET.SubElement(root, "header")
        title = ET.SubElement(hdr, "title")
        title.text = "XSSer Security Report: " + str(datetime.datetime.now())
        abstract = ET.SubElement(root, "abstract")
        total_injections = len(self.instance.hash_found) + len(
            self.instance.hash_notfound)

        if len(self.instance.hash_found) + len(
                self.instance.hash_notfound) == 0:
            pass
        injections = ET.SubElement(abstract, "injections")
        total_inj = ET.SubElement(injections, "total")
        failed_inj = ET.SubElement(injections, "failed")
        success_inj = ET.SubElement(injections, "successful")
        accur_inj = ET.SubElement(injections, "accur")

        total_inj_i = len(self.instance.hash_found) + len(
            self.instance.hash_notfound)

        total_inj.text = str(total_inj_i)
        failed_inj.text = str(len(self.instance.hash_notfound))
        success_inj.text = str(len(self.instance.hash_found))
        try:
            accur_inj.text = "%s %%" % (str(
                (len(self.instance.hash_found) * 100) / total_inj_i), )
        except ZeroDivisionError:
            accur_inj.text = "0 %"

        if self.instance.options.statistics:
            stats = ET.SubElement(root, "stats")
            test_time = datetime.datetime.now() - self.instance.time
            time_ = ET.SubElement(stats, "duration")
            time_.text = str(test_time)
            total_connections = self.instance.success_connection + self.instance.not_connection + self.instance.forwarded_connection + self.instance.other_connection
            con = ET.SubElement(stats, "connections")
            tcon = ET.SubElement(con, "total")
            tcon.text = str(total_connections)
            okcon = ET.SubElement(con, "ok")
            okcon.text = str(self.instance.success_connection)
            notfound = ET.SubElement(con, "notfound")
            notfound.text = str(self.instance.not_connection)
            forbidden = ET.SubElement(con, "forbidden")
            forbidden.text = str(self.instance.forwarded_connection)
            othercon = ET.SubElement(con, "other")
            othercon.text = str(self.instance.other_connection)
            st_accur = ET.SubElement(con, "accur")
            try:
                st_accur.text = "%s %%" % (str(
                    ((len(str((self.instance.success_connection) * 100))) /
                     total_connections)), )
            except ZeroDivisionError:
                st_accur.text = "0 %"
            st_inj = ET.SubElement(stats, "injections")
            st_inj_total = ET.SubElement(st_inj, "total")
            st_inj_total.text = str(total_injections)
            st_success = ET.SubElement(st_inj, "successful")
            st_success.text = str(len(self.instance.hash_found))
            st_failed = ET.SubElement(st_inj, "failed")
            st_failed.text = str(len(self.instance.hash_notfound))
            st_accur = ET.SubElement(st_inj, "accur")
            try:
                st_accur.text = "%s %%" % (str(
                    ((len(self.instance.hash_found) * 100) /
                     total_injections)), )
            except ZeroDivisionError:
                st_accur.text = "0 %"
        results = ET.SubElement(root, "results")
        for line in self.instance.hash_found:
            attack = ET.SubElement(results, "attack")
            url_ = ET.SubElement(attack, "injection")
            url_.text = line[0]
            attack_url = self.instance.apply_postprocessing(
                line[0], line[1], line[2], line[3], line[4], line[5], line[6])
            if self.instance.options.onm or self.instance.options.ifr or self.instance.options.b64 or self.instance.options.dos or self.instance.options.doss or self.instance.options.finalremote or self.instance.options.finalpayload:
                aurl = ET.SubElement(attack, "finalattack")
            else:
                aurl = None
            if line[2] == "xsr":
                self.xsr_founded = self.xsr_founded + 1
                xsr_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if xsr_vulnerable_host[0]["payload"] == line[
                        4] and xsr_vulnerable_host[0]["target"] == line[
                            6] and self.xsr_founded > 1:
                    pass
                else:
                    aurl.text = "Cross Site Referer Scripting!! " + str(
                        line[6]) + "/" + str(line[4])
            elif line[2] == "xsa":
                self.xsa_founded = self.xsa_founded + 1
                xsa_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if xsa_vulnerable_host[0]["payload"] == line[
                        4] and xsa_vulnerable_host[0]["target"] == line[
                            6] and self.xsa_founded > 1:
                    pass
                else:
                    aurl.text = "Cross Site Agent Scripting!! " + str(
                        line[6]) + "/" + str(line[4])
            elif line[2] == "coo":
                self.coo_founded = self.coo_founded + 1
                coo_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if coo_vulnerable_host[0]["payload"] == line[
                        4] and coo_vulnerable_host[0]["target"] == line[
                            6] and self.coo_founded > 1:
                    pass
                else:
                    aurl.text = "Cross Site Cookie Scripting!! " + str(
                        line[6]) + "/" + str(line[4])
            elif line[2] == "dcp":
                self.dcp_founded = self.dcp_founded + 1
                dcp_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if dcp_vulnerable_host[0]["payload"] == line[
                        4] and dcp_vulnerable_host[0]["target"] == line[
                            6] and self.dcp_founded > 1:
                    pass
                else:
                    aurl.text = "Data Control Protocol injections!! " + str(
                        line[6]) + "/" + str(line[4])
            elif line[2] == "dom":
                self.dom_founded = self.dom_founded + 1
                dom_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if dom_vulnerable_host[0]["payload"] == line[
                        4] and dom_vulnerable_host[0]["target"] == line[
                            6] and self.dom_founded > 1:
                    pass
                else:
                    aurl.text = "Document Object Model injections!! " + str(
                        line[6]) + "/" + str(line[4])
            elif line[2] == "ind":
                self.ind_founded = self.ind_founded + 1
                ind_vulnerable_host = [{
                    "payload": str(line[4]),
                    "target": str(line[6])
                }]
                if ind_vulnerable_host[0]["payload"] == line[
                        4] and ind_vulnerable_host[0]["target"] == line[
                            6] and self.ind_founded > 1:
                    pass
                else:
                    aurl.text = "HTTP Response Splitting Induced code!! " + str(
                        line[6]) + "/" + str(line[4])
            else:
                if aurl == None:
                    pass
                else:
                    aurl.text = attack_url
            if line[2] not in ["xsr", "xsa", "coo", "dcp", "dom", "ind"]:
                browsers = ET.SubElement(attack, "browsers")
                browsers.text = line[1]
                method = ET.SubElement(attack, "method")
                method.text = line[2]

        if not self.instance.hash_found:
            msg = ET.SubElement(results, "message")
            msg.text = "Failed injection(s): " + str(''.join(
                [u[0] for u in self.instance.hash_notfound]))
        tree = ET.ElementTree(root)
        tree.write(filename)
 def save_xml(self, save_as):
     tree = ET.ElementTree(self.ASAP_Annotations)
     tree.write(save_as, xml_declaration=True)
Ejemplo n.º 7
0
    def set_project_folder(self,
                           path,
                           ask_for_new_project=True,
                           close_all=True):
        if self.project_file is not None or close_all:
            # Close existing project (if any) or existing files if requested
            self.main_controller.close_all_files()
        FileOperator.RECENT_PATH = path
        util.PROJECT_PATH = path
        self.project_path = path
        self.project_file = os.path.join(self.project_path,
                                         constants.PROJECT_FILE)
        collapse_project_tabs = False
        if not os.path.isfile(self.project_file):
            if ask_for_new_project:
                reply = QMessageBox.question(
                    self.main_controller, "Project File",
                    "Do you want to create a Project File for this folder?\n"
                    "If you chose No, you can do it later via File->Convert Folder to Project.",
                    QMessageBox.Yes | QMessageBox.No)

                if reply == QMessageBox.Yes:
                    self.main_controller.show_project_settings()
                else:
                    self.project_file = None

            if self.project_file is not None:
                root = ET.Element("UniversalRadioHackerProject")
                tree = ET.ElementTree(root)
                tree.write(self.project_file)
                self.modulation_was_edited = False
        else:
            tree = ET.parse(self.project_file)
            root = tree.getroot()

            collapse_project_tabs = bool(
                int(root.get("collapse_project_tabs", 0)))
            self.modulation_was_edited = bool(
                int(root.get("modulation_was_edited", 0)))
            cfc = self.main_controller.compare_frame_controller
            self.read_parameters(root)
            self.participants[:] = Participant.read_participants_from_xml_tag(
                xml_tag=root.find("protocol"))
            self.main_controller.add_files(self.read_opened_filenames())
            self.read_compare_frame_groups(root)
            self.decodings = Encoding.read_decoders_from_xml_tag(
                root.find("protocol"))

            cfc.proto_analyzer.message_types[:] = self.read_message_types()
            cfc.message_type_table_model.update()
            cfc.proto_analyzer.from_xml_tag(root=root.find("protocol"),
                                            participants=self.participants,
                                            decodings=cfc.decodings)

            cfc.updateUI()

            try:
                for message_type in cfc.proto_analyzer.message_types:
                    for lbl in filter(lambda x: not x.show, message_type):
                        cfc.set_protocol_label_visibility(lbl)
            except Exception as e:
                logger.exception(e)

            self.modulators = self.read_modulators_from_project_file()
            self.main_controller.simulator_tab_controller.load_config_from_xml_tag(
                root.find("simulator_config"))

        if len(self.project_path) > 0 and self.project_file is None:
            self.main_controller.ui.actionConvert_Folder_to_Project.setEnabled(
                True)
        else:
            self.main_controller.ui.actionConvert_Folder_to_Project.setEnabled(
                False)

        self.main_controller.adjust_for_current_file(path)
        self.main_controller.filemodel.setRootPath(path)
        self.main_controller.ui.fileTree.setRootIndex(
            self.main_controller.file_proxy_model.mapFromSource(
                self.main_controller.filemodel.index(path)))
        self.main_controller.ui.fileTree.setToolTip(path)
        self.main_controller.ui.splitter.setSizes([1, 1])
        if collapse_project_tabs:
            self.main_controller.collapse_project_tab_bar()
        else:
            self.main_controller.expand_project_tab_bar()

        self.main_controller.setWindowTitle("Universal Radio Hacker [" + path +
                                            "]")

        self.project_loaded_status_changed.emit(self.project_loaded)
        self.project_updated.emit()
def convert_poi(input, output, map_category=(), map_icon=(), ignore_tags=()):
    """Convert PoI from GARMIN to Mercedes-Benz GPX extensions.

    Parameters::

        :param input: input document.
        :type input: string with filename or file-like object.
        :param output: output document.
        :type output: string with filename or file-like object.
        :param map_category: list of link href and associated category.
        :type map_category: list or tuple with (key, value) pairs.
        :param map_icon: list of link href and associated icon.
        :type map_icon: list or tuple with (key, value) pairs.
        :param ignore_tags: list of tags to ignore (may include namespace).
        :type ignore_tags: list or tuple with tags to ignore.

    Mappings (map_icon and map_category) provide tuples of key and
    value, where the key is an fnmatch() expression.

    These are provided to infer the values from ``<link href="LINKHREF">``
    when they are not provided (ie: GARMIN specifies ``<Categories>``)
    """

    # some tools write v2, others v3. Use RegExp to find which
    rx = re.compile("xmlns:([^= ]+) *=['\"]([^'\"]+/GpxExtensions/[^'\"]+)")
    m = None
    for line in input:
        m = rx.search(line)
        if m:
            break

    if m:
        gpxx_ns = m.group(1)
        gpxx_url = m.group(2)
        logging.info("Found XML Namespace %s=%s", gpxx_ns, gpxx_url)
    else:
        gpxx_ns = "gpxx"
        gpxx_url = "http://www.garmin.com/xmlschemas/GpxExtensions/v3"
        logging.warning("No XML Namespace for GpxExtensions! Using %s=%s",
                        gpxx_ns, gpxx_url)

    ns = {
        "xsi": "http://www.w3.org/2001/XMLSchema-instance",
        "gpx": "http://www.topografix.com/GPX/1/1",
        "gpxd": "http://www.daimler.com/DaimlerGPXExtensions/V2.7.2",
        gpxx_ns: gpxx_url
    }
    for k, v in ns.items():
        ET.register_namespace(k, v)

    logging.info("Parsing %s", input)
    input.seek(0)
    indoc = ET.parse(input)
    outroot = ET.Element(
        "{http://www.topografix.com/GPX/1/1}gpx",
        attrib={
            "version":
            "1.1",
            "creator":
            indoc.getroot().get("creator", "gpx-poi-garmin-to-mercedesbenz"),
            "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation":
            "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd",
        })

    it = []
    for i in ignore_tags:
        if i.startswith("{"):
            it.append(i)
        else:
            i = i.split(':', 1)
            if len(i) == 1:
                i = ('gpx', i[0])
            it_ns = ns.get(i[0], i[0])
            it_tag = i[1]
            it.append("{%s}%s" % (it_ns, it_tag))

    ignore_tags = it

    logging.info("Converting...")
    for wpt in indoc.findall("gpx:wpt", ns):
        wpt_out = ET.Element(wpt.tag, attrib=wpt.attrib)
        link_href = ""
        for cel in wpt:
            if cel.tag == "{http://www.topografix.com/GPX/1/1}link":
                link_href = cel.get("href", "")

            for i in ignore_tags:
                if cel.tag == i:
                    break
            else:
                if cel.tag == "{http://www.topografix.com/GPX/1/1}extensions":
                    _convert_wpt_extension(cel, wpt_out, ns, link_href,
                                           map_category, map_icon, ignore_tags)
                else:
                    _copy_gpx_tags(cel, wpt_out)

        if len(wpt_out):
            outroot.append(wpt_out)

    if len(outroot) > 30000:
        logging.warning("Writing %d entries (>30000!) to %s", len(outroot),
                        output)
    else:
        logging.info("Writing %d entries to %s", len(outroot), output)
    ET.ElementTree(outroot).write(output,
                                  encoding="utf-8",
                                  xml_declaration=True)
    logging.info("Finished")
Ejemplo n.º 9
0
EntityOutput = set()

# Renovator Entities
tree = ET.parse("Resources/Entities.xml")
entityRenoXML = tree.getroot()

# Isaac Entities
isaacResourceFolder = (
    "/Users/Chronometrics/Dropbox/Basement Renovator/Afterbirth Rooms/resources/"
)

tree = ET.parse(isaacResourceFolder + "entities2.xml")
entityXML = tree.getroot()
root = ET.Element("data")
ETout = ET.ElementTree(root)

openMap()

from operator import *

for e in sorted(EntityOutput, key=itemgetter(0, 3, 2)):
    new = ET.SubElement(root, "entity")
    new.set("ID", str(e[0]))
    new.set("Name", str(e[1]))
    new.set("Subtype", str(e[2]))
    new.set("Variant", str(e[3]))
    new.set("Image", e[4])
    new.set("Group", e[5])

    if len(e) > 6:
Ejemplo n.º 10
0
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as read
tree = read.ElementTree(file = 'sinhvien.xml')
root = tree.getroot()

# all items data
print('Danh sách sinh viên:')
for elem in root:
    i = 0
    
    print('********************Sinh vien ' + str(elem.get('id')) +'********************** \n')
    print("")

    for subelem in elem:

        print(subelem.text)
    # if(elem.tag == 'sinhvien'):
    #     print('ID: ' + elem.get('id'))
    #     for i in elem:
    #         print(i.text)
    #         print('*****************')

        
Ejemplo n.º 11
0
    def generate_files(self):
        self.root = ET.Element(
            'Wix', {'xmlns': 'http://schemas.microsoft.com/wix/2006/wi'})
        product = ET.SubElement(
            self.root, 'Product', {
                'Name': self.product_name,
                'Manufacturer': 'The Meson Development Team',
                'Id': self.guid,
                'UpgradeCode': self.update_guid,
                'Language': '1033',
                'Codepage': '1252',
                'Version': self.version,
            })

        package = ET.SubElement(
            product, 'Package', {
                'Id': '*',
                'Keywords': 'Installer',
                'Description': 'Meson %s installer' % self.version,
                'Comments': 'Meson is a high performance build system',
                'Manufacturer': 'The Meson Development Team',
                'InstallerVersion': '500',
                'Languages': '1033',
                'Compressed': 'yes',
                'SummaryCodepage': '1252',
            })

        if self.bytesize == 64:
            package.set('Platform', 'x64')
        ET.SubElement(product, 'Media', {
            'Id': '1',
            'Cabinet': 'meson.cab',
            'EmbedCab': 'yes',
        })
        targetdir = ET.SubElement(product, 'Directory', {
            'Id': 'TARGETDIR',
            'Name': 'SourceDir',
        })
        progfiledir = ET.SubElement(targetdir, 'Directory', {
            'Id': self.progfile_dir,
        })
        installdir = ET.SubElement(progfiledir, 'Directory', {
            'Id': 'INSTALLDIR',
            'Name': 'Meson',
        })
        ET.SubElement(
            installdir, 'Merge', {
                'Id': 'VCRedist',
                'SourceFile': self.redist_path,
                'DiskId': '1',
                'Language': '0',
            })

        ET.SubElement(product, 'Property', {
            'Id': 'WIXUI_INSTALLDIR',
            'Value': 'INSTALLDIR',
        })
        ET.SubElement(product, 'UIRef', {
            'Id': 'WixUI_FeatureTree',
        })
        for sd in self.staging_dirs:
            assert (os.path.isdir(sd))
        top_feature = ET.SubElement(
            product, 'Feature', {
                'Id': 'Complete',
                'Title': 'Meson ' + self.version,
                'Description': 'The complete package',
                'Display': 'expand',
                'Level': '1',
                'ConfigurableDirectory': 'INSTALLDIR',
            })
        for sd in self.staging_dirs:
            nodes = {}
            for root, dirs, files in os.walk(sd):
                cur_node = Node(dirs, files)
                nodes[root] = cur_node
            self.create_xml(nodes, sd, installdir, sd)
            self.build_features(nodes, top_feature, sd)
        vcredist_feature = ET.SubElement(
            top_feature, 'Feature', {
                'Id': 'VCRedist',
                'Title': 'Visual C++ runtime',
                'AllowAdvertise': 'no',
                'Display': 'hidden',
                'Level': '1',
            })
        ET.SubElement(vcredist_feature, 'MergeRef', {'Id': 'VCRedist'})
        ET.ElementTree(self.root).write(self.main_xml,
                                        encoding='utf-8',
                                        xml_declaration=True)
        # ElementTree can not do prettyprinting so do it manually
        import xml.dom.minidom
        doc = xml.dom.minidom.parse(self.main_xml)
        with open(self.main_xml, 'w') as of:
            of.write(doc.toprettyxml())
Ejemplo n.º 12
0
    def data_call(self):
        global final_df1

        def divide_chunks(l, n):
            print("running data_call")

            # looping till length l
            for i in range(0, len(l), n):
                yield l[i:i + n]

        # How many elements each
        # list should have
        n = 100

        chunks = list(divide_chunks(data, n))
        final_df1 = pd.DataFrame()
        for chunk in chunks:
            line = ""
            for aa in chunk:
                line = line + "<dat:JavaLangstring>" + aa[0] + "/" + aa[
                    1] + "/ALL</dat:JavaLangstring>"
            print(line)
            final_df = pd.DataFrame()

            # datetimeObject = datetime.strptime(start_date,"%d-%m-%Y")
            # new_start_date =datetimeObject.strftime("%Y-%m-%d")
            # datetimeObject = datetime.strptime(end_date,"%d-%m-%Y")
            # new_end_date =datetimeObject.strftime("%Y-%m-%d")
            # print(new_start_date,new_end_date)
            # a=pd.DatetimeIndex(start=new_start_date,end=new_end_date, freq=BDay())

            url = "http://10.0.9.61:80/gdm/DataActionsService"
            headers = {'content-type': 'application/soap+xml'}
            # headers = {'content-type': 'text/xml'}
            body1 = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:dat="http://www.datagenicgroup.com">
            <soapenv:Header>
                  <wsse:Security xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
                     <wsse:UsernameToken wsu:Id="UsernameToken-902788241" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
                        <wsse:Username>adminuser@domain</wsse:Username>
                        <wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">adminuser@domain</wsse:Password>
                     </wsse:UsernameToken>
                  </wsse:Security>
                  <ns1:Client soapenv:actor="http://schemas.xmlsoap.org/soap/actor/next" soapenv:mustUnderstand="0" xmlns:ns1="gdm:http://www.datagenicgroup.com">
                     <ns1:ApplicationType>JavaClient</ns1:ApplicationType>
                  </ns1:Client>
               </soapenv:Header>
               <soapenv:Body>
                  <dat:getGenicDatas>
                     <dat:uris>"""

            body3 = """</dat:uris>
                     <dat:rangeUri>range://default/default</dat:rangeUri>
                  </dat:getGenicDatas>
               </soapenv:Body>
            </soapenv:Envelope>
            """
            body2 = line
            body = body1 + body2 + body3
            response = requests.post(
                url, data=body, headers=headers
            )  # ,username="******",password="******",)
            r = response.text
            # print(r)

            with open('GDMResponse.xml', 'w') as f:
                f.write(r)
            tree = et.ElementTree(et.fromstring(response.text))
            root = tree.getroot()
            for element in root:
                if element.tag == "{http://schemas.xmlsoap.org/soap/envelope/}Body":
                    for ns4 in element:
                        if ns4.tag == "{http://www.datagenicgroup.com}getGenicDatasResponse":
                            for ns4_temp in ns4:
                                if ns4_temp.tag == "{http://www.datagenicgroup.com}return":
                                    for ns4_temp_2 in ns4_temp:
                                        if ns4_temp_2.tag == "{http://www.datagenicgroup.com}GenicData":
                                            FullRangeUri = ""
                                            temp_model_data = []
                                            for genicData in ns4_temp_2:

                                                if genicData.tag == "{java:com.datagenicgroup.data}NumericSeries":
                                                    for properties in genicData:

                                                        for property in properties:
                                                            # print(property.text)
                                                            if property.text == "FullRangeUri":
                                                                for property in properties:
                                                                    # print(property.tag)
                                                                    if property.tag == "{java:com.datagenicgroup.data}Value":
                                                                        # print(property.text)
                                                                        FullRangeUri = property.text
                                                            if property.text == "ModelName":
                                                                for property in properties:
                                                                    if property.tag == "{java:com.datagenicgroup.data}Value":
                                                                        ModelName = property.text
                                                            if property.text == "ModelDescription":
                                                                for property in properties:
                                                                    if property.tag == "{java:com.datagenicgroup.data}Value":
                                                                        ModelDescription = property.text
                                                            if property.text == "ModelUri":
                                                                for property in properties:
                                                                    if property.tag == "{java:com.datagenicgroup.data}Value":
                                                                        ModelUri = property.text
                                                                        # print(ModelUri)
                                                        if properties.tag == "{java:com.datagenicgroup.data}Values":
                                                            temp_model_data.append(
                                                                properties.text
                                                            )
                                                            # print(properties.text)
                                            print(temp_model_data, ModelUri,
                                                  len(str(ModelUri)))
                                            if len(temp_model_data
                                                   ) > 0 and len(
                                                       str(ModelUri)) > 0:
                                                if ModelUri.split(
                                                        "/")[3].endswith("_M"):

                                                    start_date = "01-" + FullRangeUri.split(
                                                        "/")[-2]
                                                    end_date = "01-" + FullRangeUri.split(
                                                        "/")[-1]
                                                    datetimeObject = datetime.strptime(
                                                        start_date, "%d-%m-%Y")
                                                    new_start_date = datetimeObject.strftime(
                                                        "%Y-%m-%d")
                                                    datetimeObject = datetime.strptime(
                                                        end_date, "%d-%m-%Y")
                                                    new_end_date = datetimeObject.strftime(
                                                        "%Y-%m-%d")
                                                    a = pd.DatetimeIndex(
                                                        start=new_start_date,
                                                        end=new_end_date,
                                                        freq=MonthBegin())

                                                    temp_datelist = []
                                                    for temp_date in a:
                                                        temp_datelist.append(
                                                            temp_date)

                                                if ModelUri.split(
                                                        "/")[3].endswith("_A"):
                                                    start_date = "01-" + FullRangeUri.split(
                                                        "/")[-2]
                                                    end_date = "01-" + FullRangeUri.split(
                                                        "/")[-1]
                                                    datetimeObject = datetime.strptime(
                                                        start_date, "%d-%m-%Y")
                                                    new_start_date = datetimeObject.strftime(
                                                        "%Y-%m-%d")
                                                    datetimeObject = datetime.strptime(
                                                        end_date, "%d-%m-%Y")
                                                    new_end_date = datetimeObject.strftime(
                                                        "%Y-%m-%d")
                                                    a = pd.DatetimeIndex(
                                                        start=new_start_date,
                                                        end=new_end_date,
                                                        freq=YearBegin())
                                                    # print(a)
                                                    temp_datelist = []
                                                    for temp_date in a:
                                                        temp_datelist.append(
                                                            temp_date)
                                            print(ModelUri)
                                            print(FullRangeUri)
                                            print(len(temp_datelist))
                                            print(len(temp_model_data))
                                            # print("######################33")
                                            df = pd.DataFrame()
                                            #
                                            # print(len(temp_datelist))
                                            # print(len(temp_model_data))
                                            temp_datelist = temp_datelist
                                            temp_model_data = temp_model_data
                                            if len(temp_model_data) == len(
                                                    temp_datelist):
                                                df["Date"] = temp_datelist
                                                df["value"] = temp_model_data
                                                df["Model Code"] = ModelUri
                                                df["Model check code"] = ModelUri.split(
                                                    "/")[3].split(
                                                        ".")[-1].replace(
                                                            "_", ".")
                                                df.sort_values('Date',
                                                               ascending=False)
                                                # df.append(pd.DataFrame(temp_datelist))
                                                # df.append(pd.DataFrame(temp_model_data))
                                                # print(df)
                                                final_df = final_df.append(df)
                                                # print(final_df.head(10))
                                                # print("###################################################################################################################################")

            final_df1 = final_df1.append(final_df)
        final_df1.to_csv(path + "EIA_MODEL_DATA.csv", index=False)
Ejemplo n.º 13
0
def makePretrainDataTrainVal(howMany, dataType, trainVal):

    dataDir = '../data/rawdata/Coco'
    annFile = '{}/instances_{}.json'.format(dataDir, dataType)

    if not os.path.exists(dataDir):
        os.mkdir(dataDir)

    if not os.path.exists(dataDir + '/Annot'):
        os.mkdir(dataDir + '/Annot')

    if not os.path.exists(dataDir + '/Xml'):
        os.mkdir(dataDir + '/Xml')

    if not os.path.exists(dataDir + '/Input'):
        os.mkdir(dataDir + '/Input')

    coco = COCO(annFile)

    catIds = coco.getCatIds(catNms=['person'])
    imgIds = coco.getImgIds(catIds=catIds)
    outputFileName = '../data/metadata/Coco.csv'

    cols = ['Image.Title', 'Pano.File', 'Xml.File', 'Annot.File', 'Train.Val']
    rows = []

    # if exists, append
    if os.path.isfile(outputFileName):
        inputDf = pd.read_csv(outputFileName)
        for idx, row in inputDf.iterrows():
            rows.append([row['Image.Title'], row['Pano.File'], row['Xml.File'], row['Annot.File'], row['Train.Val']])

    howMany = len(imgIds) if howMany < 0 else howMany

    for i in range(howMany):

        imgId = 0
        inputImgName = ''

        while True:
            imgId = imgIds[np.random.randint(0, len(imgIds))]
            inputImgName = dataDir + '/Input/InputImg-' + str(imgId) + '.jpg'
            if not os.path.isfile(inputImgName):
                break

        img = coco.loadImgs(imgId)[0]
        print('img: {}'.format(img))
        inputImg = io.imread(img['coco_url'])
        io.imsave(inputImgName, inputImg)

        xmlName = dataDir + '/Xml/Xml-' + str(imgId) + '.xml'
        xmlRoot = ET.Element("root")
        xmlToothList = ET.SubElement(xmlRoot, "ToothList")

        annotDir = dataDir + '/Annot/Annot-' + str(imgId) + '/'
        os.mkdir(annotDir)

        pngs = cocoSegmentationToPng(coco, imgId)

        for j in range(len(pngs)):

            png = pngs[j]
            annotImg = np.array(png)
            annotName = dataDir + '/Annot/Annot-' + str(imgId) + '/TargetImg-' + str(j) + '.jpg'
            cv2.imwrite(annotName, annotImg)
            xmlTooth = ET.SubElement(xmlToothList, "Tooth", Number=str(j))
            coords = makeRandomBoundingBox(annotImg)
            for i in range(len(coords)):
                (x, y) = coords[i]
                ET.SubElement(xmlTooth, 'P' + str(i), Y=str(y), X=str(x))

        xmlTree = ET.ElementTree(xmlRoot)
        xmlTree.write(xmlName)

        row = [imgId, inputImgName, xmlName, annotDir, trainVal]
        rows.append(row)

        # write every time so that there will be result to use even after an abrupt error
        outputDf = pd.DataFrame(rows, columns=cols)
        outputDf.to_csv(outputFileName)

    return
Ejemplo n.º 14
0
def extract_abstract_using_openAPI(start, end):
    # i: excel 번호
    print('api 시작')
    for i in range(start, end):
        rb = xlrd.open_workbook(DOWNLAD_PATH + "논문검색리스트Excel (" + str(i) +
                                ").xls")
        rb_sheet = rb.sheet_by_index(0)
        nrows = rb_sheet.nrows
        ncols = rb_sheet.ncols

        result = []
        words = [
            "title_kor", "title_eng", "main_author", "sub_author",
            "journal_kor", "journal_eng", "issuer_kor", "issuer_eng",
            "issue_year", "book_num", "page_num", "keyword_kor", "keyword_eng",
            "subject", "quote", "direct_urls", "doi", "abstract"
        ]

        # 엑셀에 있는 모든 row
        for j in range(1, nrows):
            papers = {}
            idx = 0
            papers_kci_id = ""

            for col in range(ncols):
                # 논문 ID
                if col == 1:
                    papers_kci_id = rb_sheet.cell_value(j, col)
                    continue
                if col == 0 or col == 10:
                    continue

                papers[words[idx]] = "NaN"
                papers[words[idx]] = rb_sheet.cell_value(j, col)
                idx += 1

            # print(papers_kci_id)
            url = "https://open.kci.go.kr/po/openapi/openApiSearch.kci?key=19192000&apiCode=articleDetail&id=" + papers_kci_id
            response = requests.get(url=url)
            if (response.status_code == 200):
                xml = response.text
            else:
                continue

            # xml parsing
            tree = ET.ElementTree(ET.fromstring(xml))
            root = tree.getroot()

            # subject 중분류까지만 바꾸기
            categories = root.find('outputData').find('record').find(
                'articleInfo').find('article-categories').text
            if categories is None:
                papers["subject"] = "NaN"
            else:
                categories_list = categories.split('>')
                if len(categories_list) > 1:
                    papers["subject"] = categories_list[1].strip()

            abstract = root.find('outputData').find('record').find(
                'articleInfo').find('abstract')
            if abstract is None:
                papers["abstract"] = "NaN"
            else:
                papers["abstract"] = abstract.text
            # print(abstract.text)

            result.append(papers)

        print(str(i) + "번 엑셀까지 완료")
        # print(result)
        for item in result:
            Summary_report(
                title_kor=item['title_kor'],
                title_eng=item['title_eng'],
                main_author=item['main_author'],
                sub_author=item['sub_author'],
                # 저널 이름
                journal_kor=item['journal_kor'],
                journal_eng=item['journal_eng'],
                # 발행기관
                issuer_kor=item['issuer_kor'],
                issuer_eng=item['issuer_eng'],
                issue_year=item['issue_year'],
                book_num=item['book_num'],
                keyword_kor=item['keyword_kor'],
                keyword_eng=item['keyword_eng'],
                subject=item['subject'],
                quote=item['quote'],
                direct_urls=item['direct_urls'],
                doi=item['doi'],
                abstract=item['abstract'],
                page_num=item['page_num']).save()
Ejemplo n.º 15
0
def New_Save(dEntities):
    """This will setup a new saved game directory along with the player's
    xml data. Along with this system there should be other functions
    that will move the chunk data into this directory and also
    fetch the entity xml data for the beginning level."""

    #Create a new saved game directory (non-optional directory name, whatever isn't already taken.)
    #Directory will be added to config.Game_Directory + "\\SavedGames"
    os.chdir(config.Game_Directory + "\\SavedGames")

    lyst = os.listdir(os.getcwd())

    counter = 0

    #Iterate through the lyst counting the saved games.
    for i in lyst:
        #Checks if the current item
        #   is a Saved Game dir.
        if (i[0:3] == "Save"):
            counter += 1

    #This is so that the counter is one more than the total
    #   amount of saves.
    counter += 1

    #This is the new saved game's folder
    os.mkdir(os.getcwd() + "\\Save" + str(counter))

    os.chdir(os.getcwd() + "\\Save" + str(counter))

    #Returns an ELement object that can be modified and
    #   and saved as an xml file. This will be the player's
    #   saved data.
    playerStats = ET.Element("Player Stats")

    #This adds an attribute
    playerStats.set("name", "Fagot point1")

    playerStats.append(ET.Element("Class", {"sub-class": "F****r"}))
    playerStats.find("Class").text = "Hero"

    playerStats.append(ET.Element("CurHp"))
    playerStats.find("CurHp").text = 20
    playerStats.append(ET.Element("MaxHp"))
    playerStats.find("MaxHp").text = 20

    playerStats.append(ET.Element("CurMp"))
    playerStats.find("CurMp").text = 20
    playerStats.append(ET.Element("MaxMp"))
    playerStats.find("MaxMp").text = 20

    playerStats.append(ET.Element("Strength"))
    playerStats.find("Strength").text = 10
    playerStats.append(ET.Element("Intelligence"))
    playerStats.find("Intelligence").text = 10
    playerStats.append(ET.Element("Dexterity"))
    playerStats.find("Dexterity").text = 10
    playerStats.append(ET.Element("Agility"))
    playerStats.find("Agility").text = 10

    #This should save the xml we just created
    #   into the new saved directory
    ET.ElementTree(playerStats).write("Save" + str(counter))

    #Select this new saved game.
    config.Saved_Game_Directory = os.getcwd() + "\\Save" + str(counter)
Ejemplo n.º 16
0
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement

root = Element("movie")
title = SubElement(root, "title")
title.text = "장사리"
genere = SubElement(root, "genre")
genere.text = "액션, 역사"
rating = SubElement(root, "rating")
rating.text = "5"

ET.ElementTree(root).write("movie.xml", encoding="utf8", xml_declaration=True)
Ejemplo n.º 17
0
        closest_box_1_rotation = ET.SubElement(scenario,"closest_box_1_rotation")
        closest_box_1_rotation.text = str(disRotate(closest_box_1.rz))
        closest_box_1_type = ET.SubElement(scenario,"closest_box_1_type")
        closest_box_1_type.text = str(closest_box_1.type)
        closest_box_1_occlusion = ET.SubElement(scenario,"closest_box_1_occlusion")
        closest_box_1_occlusion.text = str(closest_box_1.occ)

    if closest_box_2 is None:
        closest_box_2_existence = ET.SubElement(scenario,"closest_box_2_existence")
        closest_box_2_existence.text = str(False)
        closest_box_2_rotation = ET.SubElement(scenario,"closest_box_2_rotation")
        closest_box_2_rotation.text = str(0)
        closest_box_2_type = ET.SubElement(scenario,"closest_box_2_type")
        closest_box_2_type.text = str("Misc")
        closest_box_2_occlusion = ET.SubElement(scenario,"closest_box_2_occlusion")
        closest_box_2_occlusion.text = str(box.occ)
    else:
        closest_box_2_existence = ET.SubElement(scenario,"closest_box_2_existence")
        closest_box_2_existence.text = str(True)
        closest_box_2_rotation = ET.SubElement(scenario,"closest_box_2_rotation")
        closest_box_2_rotation.text = str(disRotate(closest_box_2.rz))
        closest_box_2_type = ET.SubElement(scenario,"closest_box_2_type")
        closest_box_2_type.text = str(closest_box_2.type)
        closest_box_2_occlusion = ET.SubElement(scenario,"closest_box_2_occlusion")
        closest_box_2_occlusion.text = str(closest_box_2.occ)
    i += 1

tree = ET.ElementTree()
tree._setroot(scenarios)
tree.write(resultXML)
Ejemplo n.º 18
0
def module_xml2po(module, po_file, language):
    # type: (UMC_Module, str, str) -> None
    """
	Create a PO file the |XML| definition of an |UMC| module.

	:param module: |UMC| module.
	:param po_file: File name of the textual message catalog.
	:param language: 2-letter language code.
	"""
    message_po = '%s/messages.po' % (os.path.dirname(po_file) or '.')

    po = polib.POFile(check_for_duplicates=True)
    po.header = PO_HEADER
    po.metadata = copy.copy(PO_METADATA)
    po.metadata['Project-Id-Version'] = module.package
    po.metadata['POT-Creation-Date'] = formatdate(localtime=True)
    po.metadata['Language'] = language

    def _append_po_entry(xml_entry):
        """Helper function to access text property of XML elements and to find the
		corresponding po-entry."""
        if xml_entry is not None and xml_entry.text is not None:  # important to use "xml_entry is not None"!
            entry = polib.POEntry(msgid=xml_entry.text, msgstr='')
            try:
                po.append(entry)
            except ValueError as exc:  # Entry "..." already exists
                print('Warning: Appending %r to po file failed: %s' %
                      (xml_entry.text, exc),
                      file=sys.stderr)

    if module.xml_definition and os.path.isfile(module.xml_definition):
        tree = ET.ElementTree(file=module.xml_definition)
        _append_po_entry(tree.find('module/name'))
        _append_po_entry(tree.find('module/description'))
        _append_po_entry(tree.find('module/keywords'))
        for flavor in tree.findall('module/flavor'):
            _append_po_entry(flavor.find('name'))
            _append_po_entry(flavor.find('description'))
            _append_po_entry(flavor.find('keywords'))
        _append_po_entry(tree.find('link/name'))
        _append_po_entry(tree.find('link/description'))
        _append_po_entry(tree.find('link/url'))

    if module.xml_categories and os.path.isfile(module.xml_categories):
        tree = ET.ElementTree(file=module.xml_categories)
        for cat in tree.findall('categories/category'):
            _append_po_entry(cat.find('name'))

    po.save(message_po)
    if os.path.isfile(po_file):
        try:
            if helper.call('msgmerge', '--update', '--sort-output', po_file,
                           message_po):
                raise Error('Failed to merge module translations into %s.' %
                            (po_file, ))
            backup_file = '{}~'.format(po_file)
            if os.path.isfile(backup_file):
                os.unlink(backup_file)
        finally:
            if os.path.isfile(message_po):
                os.unlink(message_po)
    else:
        helper.call('mv', message_po, po_file)
Ejemplo n.º 19
0
import xml.etree.ElementTree as et

#在内存中创建一个空的文档

etree = et.ElementTree()

e = et.Element('Student')

etree._setroot(e)

e_name = et.SubElement(e, 'Name')
e_name.text = "hahahah"

etree.write('v06.xml')
Ejemplo n.º 20
0
 def write(self, root, output_stream):
     tree = ET.ElementTree(root)
     tree.write(output_stream, encoding=str('UTF-8'), xml_declaration=True)
Ejemplo n.º 21
0
    def save_project(self, simulator_config=None):
        if self.project_file is None or not os.path.isfile(self.project_file):
            return

        # Recreate file
        open(self.project_file, 'w').close()
        root = ET.Element("UniversalRadioHackerProject")
        tree = ET.ElementTree(root)
        tree.write(self.project_file)

        # self.write_labels(self.maincontroller.compare_frame_controller.proto_analyzer)
        self.write_modulators_to_project_file(tree=tree)

        tree = ET.parse(self.project_file)
        root = tree.getroot()
        root.append(
            self.__device_conf_dict_to_xml("device_conf", self.device_conf))
        root.append(self.simulator_rx_conf_to_xml())
        root.append(self.simulator_tx_conf_to_xml())
        root.set("description",
                 str(self.description).replace("\n", self.NEWLINE_CODE))
        root.set(
            "collapse_project_tabs",
            str(int(not self.main_controller.ui.tabParticipants.isVisible())))
        root.set("modulation_was_edited", str(int(self.modulation_was_edited)))
        root.set("broadcast_address_hex", str(self.broadcast_address_hex))

        open_files = []
        for i, sf in enumerate(
                self.main_controller.signal_tab_controller.signal_frames):
            self.write_signal_information_to_project_file(sf.signal, tree=tree)
            try:
                pf = self.main_controller.signal_protocol_dict[sf]
                filename = pf.filename

                if filename in FileOperator.archives.keys():
                    open_filename = FileOperator.archives[filename]
                else:
                    open_filename = filename

                if not open_filename or open_filename in open_files:
                    continue
                open_files.append(open_filename)

                file_tag = ET.SubElement(root, "open_file")
                file_tag.set("name",
                             os.path.relpath(open_filename, self.project_path))
                file_tag.set("position", str(i))
            except Exception:
                pass

        for group_tag in root.findall("group"):
            root.remove(group_tag)

        cfc = self.main_controller.compare_frame_controller

        for i, group in enumerate(cfc.groups):
            group_tag = ET.SubElement(root, "group")
            group_tag.set("name", str(group.name))
            group_tag.set("id", str(i))

            for proto_frame in cfc.protocols[i]:
                if proto_frame.filename:
                    proto_tag = ET.SubElement(group_tag, "cf_protocol")
                    proto_tag.set(
                        "filename",
                        os.path.relpath(proto_frame.filename,
                                        self.project_path))

        root.append(
            cfc.proto_analyzer.to_xml_tag(
                decodings=cfc.decodings,
                participants=self.participants,
                messages=[
                    msg for proto in cfc.full_protocol_list
                    for msg in proto.messages
                ]))

        if simulator_config is not None:
            root.append(simulator_config.save_to_xml())

        util.write_xml_to_file(root, self.project_file)
Ejemplo n.º 22
0
os.mkdir("sentence_alignment")

info_file = open("corpus_to_align/output_data_aligned/info.txt")
for doc_id, group in itertools.groupby(reader(info_file),
                                       operator.itemgetter(0)):
    assert doc_id.endswith(".txt")
    doc_id = doc_id[:-4]
    root = et.Element("alignments")
    root.attrib['source_id'] = doc_id
    try:
        root.attrib['translation_id'] = files[doc_id][3]
    except KeyError:
        sys.stderr.write("warning: skipping {}\n".format(doc_id))
        continue
    tree = et.ElementTree(root)
    for _, source_ids, target_ids in group:
        alignment = et.Element("alignment")

        if (files[doc_id][0], files[doc_id][2]) == (source_lang, target_lang):
            pass
        elif (files[doc_id][0], files[doc_id][2]) == (target_lang,
                                                      source_lang):
            source_ids, target_ids = target_ids, source_ids

        source = et.Element("source")
        source.attrib['segments'] = " ".join("segment-{}".format(i - 1)
                                             for i in source_ids)
        trans = et.Element("translation")
        trans.attrib['segments'] = " ".join("segment-{}".format(i - 1)
                                            for i in target_ids)
Ejemplo n.º 23
0
Archivo: _vtu.py Proyecto: vn-os/meshio
def write(filename, mesh, binary=True, compression="zlib", header_type=None):
    # Writing XML with an etree required first transforming the (potentially large)
    # arrays into string, which are much larger in memory still. This makes this writer
    # very memory hungry. See <https://stackoverflow.com/q/59272477/353337>.
    from .._cxml import etree as ET

    # Check if the mesh contains polyhedral cells, this will require special treatment
    # in certain places.
    is_polyhedron_grid = False
    for c in mesh.cells:
        if c.type[:10] == "polyhedron":
            is_polyhedron_grid = True
            break
    # The current implementation cannot mix polyhedral cells with other cell types.
    # To write such meshes, represent all cells as polyhedra.
    if is_polyhedron_grid:
        for c in mesh.cells:
            if c.type[:10] != "polyhedron":
                raise ValueError(
                    "VTU export cannot mix polyhedral cells with other cell types"
                )

    if not binary:
        logging.warning("VTU ASCII files are only meant for debugging.")

    if mesh.points.shape[1] == 2:
        logging.warning("VTU requires 3D points, but 2D points given. "
                        "Appending 0 third component.")
        mesh.points = np.column_stack([
            mesh.points[:, 0], mesh.points[:, 1],
            np.zeros(mesh.points.shape[0])
        ])

    vtk_file = ET.Element(
        "VTKFile",
        type="UnstructuredGrid",
        version="0.1",
        # Use the native endianness. Not strictly necessary, but this simplifies things
        # a bit.
        byte_order=("LittleEndian"
                    if sys.byteorder == "little" else "BigEndian"),
    )
    header_type = ("UInt32" if header_type is None else vtk_file.set(
        "header_type", header_type))
    assert header_type is not None

    if binary and compression:
        # TODO lz4, lzma <https://vtk.org/doc/nightly/html/classvtkDataCompressor.html>
        compressions = {
            "lzma": "vtkLZMADataCompressor",
            "zlib": "vtkZLibDataCompressor",
        }
        assert compression in compressions
        vtk_file.set("compressor", compressions[compression])

    # swap the data to match the system byteorder
    # Don't use byteswap to make sure that the dtype is changed; see
    # <https://github.com/numpy/numpy/issues/10372>.
    points = mesh.points.astype(mesh.points.dtype.newbyteorder("="),
                                copy=False)
    for k, (cell_type, data) in enumerate(mesh.cells):
        # Treatment of polyhedra is different from other types
        if is_polyhedron_grid:
            new_cell_info = []
            for cell_info in data:
                new_face_info = []
                for face_info in cell_info:
                    face_info = np.asarray(face_info)
                    new_face_info.append(
                        face_info.astype(face_info.dtype.newbyteorder("="),
                                         copy=False))
                new_cell_info.append(new_face_info)
            mesh.cells[k] = CellBlock(cell_type, new_cell_info)
        else:
            mesh.cells[k] = CellBlock(
                cell_type, data.astype(data.dtype.newbyteorder("="),
                                       copy=False))
    for key, data in mesh.point_data.items():
        mesh.point_data[key] = data.astype(data.dtype.newbyteorder("="),
                                           copy=False)

    for data in mesh.cell_data.values():
        for k, dat in enumerate(data):
            data[k] = dat.astype(dat.dtype.newbyteorder("="), copy=False)
    for key, data in mesh.field_data.items():
        mesh.field_data[key] = data.astype(data.dtype.newbyteorder("="),
                                           copy=False)

    def numpy_to_xml_array(parent, name, data):
        vtu_type = numpy_to_vtu_type[data.dtype]
        fmt = "{:.11e}" if vtu_type.startswith("Float") else "{:d}"
        da = ET.SubElement(parent, "DataArray", type=vtu_type, Name=name)
        if len(data.shape) == 2:
            da.set("NumberOfComponents", "{}".format(data.shape[1]))
        if binary:
            da.set("format", "binary")
            if compression:
                # compressed write
                def text_writer(f):
                    max_block_size = 32768
                    data_bytes = data.tobytes()

                    # round up
                    num_blocks = -int(-len(data_bytes) // max_block_size)
                    last_block_size = (len(data_bytes) -
                                       (num_blocks - 1) * max_block_size)

                    # It's too bad that we have to keep all blocks in memory. This is
                    # necessary because the header, written first, needs to know the
                    # lengths of all blocks. Also, the blocks are encoded _after_ having
                    # been concatenated.
                    c = {"lzma": lzma, "zlib": zlib}[compression]
                    compressed_blocks = [
                        # This compress is the slowest part of the writer
                        c.compress(block)
                        for block in _chunk_it(data_bytes, max_block_size)
                    ]

                    # collect header
                    header = np.array(
                        [num_blocks, max_block_size, last_block_size] +
                        [len(b) for b in compressed_blocks],
                        dtype=vtu_to_numpy_type[header_type],
                    )
                    f.write(base64.b64encode(header.tobytes()).decode())
                    f.write(
                        base64.b64encode(b"".join(compressed_blocks)).decode())

            else:
                # uncompressed write
                def text_writer(f):
                    data_bytes = data.tobytes()
                    # collect header
                    header = np.array(len(data_bytes),
                                      dtype=vtu_to_numpy_type[header_type])
                    f.write(
                        base64.b64encode(header.tobytes() +
                                         data_bytes).decode())

        else:
            da.set("format", "ascii")

            def text_writer(f):
                # This write() loop is the bottleneck for the write. Alternatives:
                # savetxt is super slow:
                #   np.savetxt(f, data.reshape(-1), fmt=fmt)
                # joining and writing is a bit faster, but consumes huge amounts of
                # memory:
                #   f.write("\n".join(map(fmt.format, data.reshape(-1))))
                for item in data.reshape(-1):
                    f.write((fmt + "\n").format(item))

        da.text_writer = text_writer

    def _polyhedron_face_cells(face_cells):
        # Define the faces of each cell on the format specfied for VTU Polyhedron cells.
        # These are defined in Mesh.polyhedron_faces, as block data. The block consists
        # of a nested list (outer list represents cell, inner is faces for this cells),
        # where the items of the inner list are the nodes of specific faces.
        #
        # The output format is specified at https://vtk.org/Wiki/VTK/Polyhedron_Support

        # Initialize array for size of data per cell.
        data_size_per_cell = np.zeros(len(face_cells), dtype=int)

        # The data itself is of unknown size, and cannot be initialized
        data = []
        for ci, cell in enumerate(face_cells):
            # Number of faces for this cell
            data.append(len(cell))
            for face in cell:
                # Number of nodes for this face
                data.append(face.size)
                # The nodes themselves
                data += face.tolist()

            data_size_per_cell[ci] = len(data)

        # The returned data corresponds to the faces and faceoffsets fields in the
        # vtu polyhedron data format
        return data, data_size_per_cell.tolist()

    comment = ET.Comment(f"This file was created by meshio v{__version__}")
    vtk_file.insert(1, comment)

    grid = ET.SubElement(vtk_file, "UnstructuredGrid")

    total_num_cells = sum([len(c.data) for c in mesh.cells])
    piece = ET.SubElement(
        grid,
        "Piece",
        NumberOfPoints="{}".format(len(points)),
        NumberOfCells=f"{total_num_cells}",
    )

    # points
    if points is not None:
        pts = ET.SubElement(piece, "Points")
        numpy_to_xml_array(pts, "Points", points)

    if mesh.cells is not None and len(mesh.cells) > 0:
        cls = ET.SubElement(piece, "Cells")

        faces = None
        faceoffsets = None

        if is_polyhedron_grid:
            # The VTK polyhedron format requires both Cell-node connectivity, and a
            # definition of faces. The cell-node relation must be recoved from the
            # cell-face-nodes currently in CellBlocks.
            # NOTE: If polyhedral cells are implemented for more mesh types, this code
            # block may be useful for those as well.
            con = []
            num_nodes_per_cell = []
            for block in mesh.cells:
                for cell in block.data:
                    nodes_this_cell = []
                    for face in cell:
                        nodes_this_cell += face.tolist()
                    unique_nodes = np.unique(nodes_this_cell).tolist()

                    con += unique_nodes
                    num_nodes_per_cell.append(len(unique_nodes))

            connectivity = np.array(con)
            # offsets = np.hstack(([0], np.cumsum(num_nodes_per_cell)[:-1]))
            offsets = np.cumsum(num_nodes_per_cell)

            # Initialize data structures for polyhedral cells
            faces = []
            faceoffsets = []

        else:
            # create connectivity, offset, type arrays
            connectivity = np.concatenate([
                v.data[:,
                       _meshio_to_vtk_order(v.type, v.data.shape[1])].reshape(
                           -1) for v in mesh.cells
            ])

            # offset (points to the first element of the next cell)
            offsets = [
                v.data.shape[1] *
                np.arange(1, v.data.shape[0] + 1, dtype=connectivity.dtype)
                for v in mesh.cells
            ]
            for k in range(1, len(offsets)):
                offsets[k] += offsets[k - 1][-1]
            offsets = np.concatenate(offsets)

        # types
        types_array = []
        for k, v in mesh.cells:
            # For polygon and polyhedron grids, the number of nodes is part of the cell
            # type key. This part must be stripped away.
            special_cells = [
                "polygon",
                "polyhedron",
                "VTK_LAGRANGE_CURVE",
                "VTK_LAGRANGE_TRIANGLE",
                "VTK_LAGRANGE_QUADRILATERAL",
                "VTK_LAGRANGE_TETRAHEDRON",
                "VTK_LAGRANGE_HEXAHEDRON",
                "VTK_LAGRANGE_WEDGE",
                "VTK_LAGRANGE_PYRAMID",
            ]
            key_ = None
            for string in special_cells:
                if k.startswith(string):
                    key_ = string

            if key_ is None:
                # No special treatment
                key_ = k

            # further adaptions for polyhedron
            if k.startswith("polyhedron"):
                # Get face-cell relation on the vtu format. See comments in helper
                # function for more information of how to specify this.
                faces_loc, faceoffsets_loc = _polyhedron_face_cells(v)
                # Adjust offsets to global numbering
                assert faceoffsets is not None
                if len(faceoffsets) > 0:
                    faceoffsets_loc = [
                        fi + faceoffsets[-1] for fi in faceoffsets_loc
                    ]

                assert faces is not None
                faces += faces_loc
                faceoffsets += faceoffsets_loc

            types_array.append(np.full(len(v), meshio_to_vtk_type[key_]))

        types = np.concatenate(
            types_array
            # [np.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells]
        )

        numpy_to_xml_array(cls, "connectivity", connectivity)
        numpy_to_xml_array(cls, "offsets", offsets)
        numpy_to_xml_array(cls, "types", types)

        if is_polyhedron_grid:
            # Also store face-node relation
            numpy_to_xml_array(cls, "faces", np.array(faces, dtype=int))
            numpy_to_xml_array(cls, "faceoffsets",
                               np.array(faceoffsets, dtype=int))

    if mesh.point_data:
        pd = ET.SubElement(piece, "PointData")
        for name, data in mesh.point_data.items():
            numpy_to_xml_array(pd, name, data)

    if mesh.cell_data:
        cd = ET.SubElement(piece, "CellData")
        for name, data in raw_from_cell_data(mesh.cell_data).items():
            numpy_to_xml_array(cd, name, data)

    # write_xml(filename, vtk_file, pretty_xml)
    tree = ET.ElementTree(vtk_file)
    tree.write(filename)
Ejemplo n.º 24
0
    def write(self):
        """ Write the translation file back to the filesystem. """

        # If we are keeping obsolete messages then add them to the updated
        # message elements list.
        for name, message_els in self._contexts.items():
            updated_message_els = None

            for message_el in message_els:
                source = self.pretty(message_el.find('source').text)

                translation_el = message_el.find('translation')
                if translation_el is not None and translation_el.text:
                    if self._no_obsolete:
                        self.progress(
                                "Discarded obsolete message '{0}'".format(
                                        source))
                        self._nr_discarded_obsolete += 1
                    else:
                        translation_el.set('type', 'vanished')

                        if updated_message_els is None:
                            updated_message_els = self._get_updated_message_els(
                                    name)

                        self._add_message_el(message_el, updated_message_els)

                        self.progress(
                                "Kept obsolete message '{0}'".format(source))
                        self._nr_kept_obsolete += 1
                else:
                    self.progress(
                            "Discarded untranslated message '{0}'".format(
                                    source))
                    self._nr_discarded_untranslated += 1

        # Created the sorted context elements.
        for name in sorted(self._updated_contexts.keys()):
            context_el = ElementTree.Element('context')

            name_el = ElementTree.Element('name')
            name_el.text = name
            context_el.append(name_el)

            context_el.extend(self._updated_contexts[name])

            self._root.append(context_el)

        self.progress("Writing {0}...".format(self._ts_file))
        with open(self._ts_file, 'w', encoding='utf-8') as f:
            f.write('<?xml version="1.0" encoding="utf-8"?>\n')
            f.write('<!DOCTYPE TS>\n')

            # Python v3.9 and later.
            if hasattr(ElementTree, 'indent'):
                ElementTree.indent(self._root)

            ElementTree.ElementTree(self._root).write(f, encoding='unicode')
            f.write('\n')

        if not self._no_summary:
            self._summary()
Ejemplo n.º 25
0
import sys
import os
import xml.etree.ElementTree as ET

root = ET.Element("root")
sub1 = ET.SubElement(root, "sub", att='asdf')
ET.SubElement(sub1, "txt", c='a123').text = "hello"
ET.SubElement(sub1, "txt", d='432').text = "world"
sub2 = ET.SubElement(root, "sub", att='sdf')
blar = ET.SubElement(root, "blar", att='asdf')
ET.SubElement(blar, "txt1", gg='a123').text = "xml1"
ET.SubElement(blar, "txt2", d='432').text = "xml2"

tree = ET.ElementTree(root)
tree.write('tmp/tmp.xml', encoding="utf-8", xml_declaration=True)

#%%

tree = ET.parse('tmp/tmp.xml')
root = tree.getroot()

for track in root.findall('sub'):
    print(track.attrib['att'])  # == print(track.get('att'))

for track in root.find('sub').findall('txt'):
    print(track.text, track.attrib)

for track in root.iter():
    print track
Ejemplo n.º 26
0
    def _write_bdv_xml(self):
        # TODO we have hardcoded the number of
        # channels and  time points to 1, but should support more channels
        nt, nc = 1, 1
        key = 't00000/s00/0/cells'
        with file_reader(self.output_path, 'r') as f:
            shape = f[key].shape
            dtype = f[key].dtype
        nz, ny, nx = tuple(shape)

        # format for tischis bdv extension
        bdv_dtype = 'bdv.hdf5.ulong' if np.dtype(dtype) == np.dtype(
            'uint64') else 'bdv.hdf5'

        # write top-level data
        root = ET.Element('SpimData')
        root.set('version', '0.2')
        bp = ET.SubElement(root, 'BasePath')
        bp.set('type', 'relative')
        bp.text = '.'

        # read metadata from dict
        unit = self.metadata_dict.get('unit', 'micrometer')
        resolution = self.metadata_dict.get('resolution', (1., 1., 1.))
        dz, dy, dx = resolution
        offsets = self.metadata_dict.get('offsets', (0., 0., 0.))
        oz, oy, ox = offsets

        seqdesc = ET.SubElement(root, 'SequenceDescription')
        imgload = ET.SubElement(seqdesc, 'ImageLoader')
        imgload.set('format', bdv_dtype)
        el = ET.SubElement(imgload, 'hdf5')
        el.set('type', 'relative')
        el.text = os.path.basename(self.output_path)
        viewsets = ET.SubElement(seqdesc, 'ViewSetups')
        attrs = ET.SubElement(viewsets, 'Attributes')
        attrs.set('name', 'channel')
        for c in range(nc):
            vs = ET.SubElement(viewsets, 'ViewSetup')
            ET.SubElement(vs, 'id').text = str(c)
            ET.SubElement(vs, 'name').text = 'channel {}'.format(c + 1)
            ET.SubElement(vs, 'size').text = '{} {} {}'.format(nx, ny, nz)
            vox = ET.SubElement(vs, 'voxelSize')
            ET.SubElement(vox, 'unit').text = unit
            ET.SubElement(vox, 'size').text = '{} {} {}'.format(dx, dy, dz)
            a = ET.SubElement(vs, 'attributes')
            ET.SubElement(a, 'channel').text = str(c + 1)
            chan = ET.SubElement(attrs, 'Channel')
            ET.SubElement(chan, 'id').text = str(c + 1)
            ET.SubElement(chan, 'name').text = str(c + 1)
        tpoints = ET.SubElement(seqdesc, 'Timepoints')
        tpoints.set('type', 'range')
        ET.SubElement(tpoints, 'first').text = str(0)
        ET.SubElement(tpoints, 'last').text = str(nt - 1)

        vregs = ET.SubElement(root, 'ViewRegistrations')
        for t in range(nt):
            for c in range(nc):
                vreg = ET.SubElement(vregs, 'ViewRegistration')
                vreg.set('timepoint', str(t))
                vreg.set('setup', str(c))
                vt = ET.SubElement(vreg, 'ViewTransform')
                vt.set('type', 'affine')
                ET.SubElement(
                    vt, 'affine'
                ).text = '{} 0.0 0.0 {} 0.0 {} 0.0 {} 0.0 0.0 {} {}'.format(
                    dx, ox, dy, oy, dz, oz)

        indent_xml(root)
        tree = ET.ElementTree(root)
        tree.write(os.path.splitext(self.output_path)[0] + ".xml")
Ejemplo n.º 27
0
def graph(request):
    """Draw a graph.  Time will always be along the x axis.

    The request can have the following parameters:
    width - width of returned image, defaults to 800
    height - height of returned image, defaults to 600
    end - end date and time in ISO format, defaults to now
    start - start date and time in ISO format (yyyy-mm-dd hh:mm:ss)
    (start defaults to end-1 day)
      - or a time difference
    title - title of graph
    series - a data series to plot, as follows:
      controller:register:colour
    """
    width = int(request.GET.get('width', 800))
    height = int(request.GET.get('height', 600))
    leftmargin = int(request.GET.get('leftmargin', 100))
    bottommargin = int(request.GET.get('bottommargin', 100))
    floatmin = float(request.GET.get('floatmin', 0.0))
    floatmax = float(request.GET.get('floatmax', 30.0))
    end = None
    if 'end' in request.GET:
        end = parsedatetime(request.GET['end'])
    if end is None:
        end = datetime.datetime.now()
    start = None
    if 'start' in request.GET:
        start = parsedatetime(request.GET['start'])
        if start is None:
            try:
                start = end - datetime.timedelta(
                    days=int(request.GET['start']))
            except ValueError:
                pass
    if start is None:
        start = end - datetime.timedelta(days=30)

    length = (end - start).total_seconds()
    series = request.GET.getlist('series')

    # Origin is in top-left by default.
    svg = ET.Element(
        "svg", {
            'xmlns': 'http://www.w3.org/2000/svg',
            'version': '1.1',
            'width': str(width),
            'height': str(height)
        })
    ET.SubElement(svg, "title").text = "A Graph"
    ET.SubElement(svg, "desc").text = "A longer description of the group"
    # Bounding box
    ET.SubElement(svg,
                  "rect",
                  x="0",
                  y="0",
                  width=str(width),
                  height=str(height),
                  fill="none",
                  stroke="blue")

    # Let's create a group with a transformed coordinate space such
    # that the origin is in the bottom-left
    g = ET.SubElement(svg,
                      "g",
                      transform="translate(%d,%d) scale(1,-1)" %
                      (leftmargin, height - bottommargin))

    # We now can draw axes, etc. from 0 - (width - leftmargin) on X and 0 - (height - bottommargin) on y
    graphwidth = width - leftmargin
    graphheight = height - bottommargin

    # Draw horizontal scale lines for temperature
    y = 0.0
    while y < floatmax:
        scaley = (y - floatmin) * graphheight / floatmax
        ET.SubElement(g,
                      "line",
                      x1="0",
                      x2=str(width),
                      y1=str(scaley),
                      y2=str(scaley),
                      stroke="lightgrey")
        y = y + 1.0
    y = 0.0
    while y < floatmax:
        scaley = (y - floatmin) * graphheight / floatmax
        ET.SubElement(g,
                      "line",
                      x1="0",
                      x2=str(width),
                      y1=str(scaley),
                      y2=str(scaley),
                      stroke="darkgrey")
        y = y + 5.0

    for s in series:
        controller, register, colour = s.split(":")
        controller = Controller.objects.get(ident=controller)
        register = Register.objects.get(controller=controller, name=register)
        # Retrieve all datapoints between start and end
        dt = DATATYPE_DICT[register.datatype]
        # We want to find the timestamps of the first datapoint before
        # start (if there is one) and the first datapoint after end
        # (if there is one), and adjust start and end to include
        # these.  This will avoid the graph having blank sections at
        # the left and right hand sides.
        sdp = dt.objects.filter(register=register, timestamp__lt=start)\
                        .order_by('-timestamp')[:1]
        s_start = sdp[0].timestamp if len(sdp) == 1 else start
        edp = dt.objects.filter(register=register,timestamp__gt=end)\
                        .order_by('timestamp')[:1]
        s_end = edp[0].timestamp if len(edp) == 1 else end
        datapoints = dt.objects.filter(register=register, timestamp__lte=s_end,
                                       timestamp__gte=s_start)\
                               .order_by('timestamp')
        now_drawing = False
        dl = []
        for datum in datapoints:
            x = (datum.timestamp - start).total_seconds() * graphwidth / length
            if datum.data is None:
                now_drawing = False
            else:
                # Let's assume float for now
                y = (datum.data - floatmin) * graphheight / floatmax
                dl.append("%s %f %f" % ('L' if now_drawing else 'M', x, y))
                now_drawing = True
        ET.SubElement(g, "path", stroke=colour, fill="none", d=" ".join(dl))

    r = HttpResponse(content_type="image/svg+xml")
    ET.ElementTree(svg).write(r, encoding="UTF-8", xml_declaration=True)
    return r
Ejemplo n.º 28
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Problem 074

XML 1 - Find the Score

Source : https://www.hackerrank.com/challenges/xml-1-find-the-score/problem
"""
import sys
import xml.etree.ElementTree as etree


def get_attr_number(node):
    return len(node.attrib) + sum([get_attr_number(child) for child in node])


if __name__ == '__main__':
    sys.stdin.readline()
    xml = sys.stdin.read()
    tree = etree.ElementTree(etree.fromstring(xml))
    root = tree.getroot()
    print(get_attr_number(root))
Ejemplo n.º 29
0
    def create(self,name,response,description=None,effectiveWavelength=None,origin=None,path=None,\
                   url=None,vBandFilter=None,vegaOffset=None,verbose=False):
        funcname = self.__class__.__name__ + "." + sys._getframe(
        ).f_code.co_name
        F = Filter()
        F.name = name
        F.setTransmission(response["wavelength"], response["response"])
        if effectiveWavelength is not None:
            F.effectiveWavelength = effectiveWavelength
        F.origin = F.origin
        F.description = description
        F.url = url
        F.write(path=path, verbose=verbose)
        return

        # Create tree root
        root = ET.Element("filter")
        # Add name and other descriptions
        ET.SubElement(root, "name").text = name
        if description is None:
            description = name
        ET.SubElement(root, "description").text = description
        if origin is None:
            origin = "unknown"
        ET.SubElement(root, "origin").text = origin
        if url is None:
            url = "unknown"
        ET.SubElement(root, "url").text = url
        # Add in response data
        RES = ET.SubElement(root, "response")
        dataSize = len(response["wavelength"])
        for i in range(dataSize):
            wavelength = response["wavelength"][i]
            transmission = response["response"][i]
            datum = "{0:7.3f} {1:9.7f}".format(wavelength, transmission)
            ET.SubElement(RES, "datum").text = datum
        # Compute effective wavlength and Vega offset if needed
        if effectiveWavelength is None:
            wavelength = response["wavelength"]
            transmission = response["response"]
            effectiveWavelength = computeEffectiveWavelength(
                wavelength, transmission)
        ET.SubElement(root,
                      "effectiveWavelength").text = str(effectiveWavelength)
        if vegaOffset is None:
            wavelength = response["wavelength"]
            transmission = response["response"]
            if vBandFilter is None:
                vBandFilter = pkg_resources.resource_filename(
                    __name__, "data/filters/Buser_V.xml")
            VO = VegaOffset(VbandFilterFile=vBandFilter)
            vegaOffset = VO.computeOffset(wavelength, transmission)
        ET.SubElement(root, "vegaOffset").text = str(vegaOffset)
        # Finalise tree and save to file
        tree = ET.ElementTree(root)
        if path is None:
            path = self.filtersDirectory
        path = path + "/" + name + ".xml"
        if verbose:
            print(funcname + "(): writing filter to file: " + path)
        tree.write(path)
        formatFile(path)
        return
    def report(self):
        arc = AssetReportCollectionElement()
        arc.id = 'asset_report_collection_' + uuid.uuid4().hex

        arc.relationships = RelationshipsType()
        arc.report_requests = ReportRequestsType()
        arc.assets = AssetsType()
        arc.reports = ReportsType()

        # TODO arc.extended-infos

        report_request = ReportRequestType()
        arc.report_requests.report_requests.append(report_request)

        report_request.id = 'report-request_' + uuid.uuid4().hex

        #report_request.content = self.checker.content.to_xml()
        report_request.content = ET.Element('stuff')

        for host in self.hosts:
            asset = AssetElement()
            arc.assets.assets.append(asset)

            asset.id = 'asset_' + host.facts['unique_id']

            comp = ComputingDeviceType()
            asset.assets.append(comp)

            # TODO: if root_uuid is unavailable
            # TODO: fallback to mobo guid, eth0 mac address, eth0 ip address, hostname

            for cpe in host.facts['cpe']:
                c = CPEType(cpe.to_uri_string())
                comp.cpes.append(c)

            # TODO multiple FQDNs
            comp.fqdn = FQDNType(host.facts['fqdn'][0])

            comp.hostname = ComputingDeviceHostnameType(host.facts['hostname'])

            try:
                comp.motherboard_guid = MotherboardGUIDType(str(uuid.UUID(host.facts['motherboard_uuid'])))
            except KeyError:
                logger.debug("Couldn't parse motherboard-guid")

            comp.connections = ConnectionsType()

            for dev, net_con in host.facts['network_connections'].items():
                logger.debug('Producing Connection for device ' + dev)
                for address in net_con['network_addresses']:
                    logger.debug('Producing network address: ' + str(address))
                    conn = NetworkInterfaceType()
                    comp.connections.connections.append(conn)

                    conn.mac_address = MACAddressType(host.facts['network_connections'][dev]['mac_address'])

                    conn.ip_address = IPAddressType()
                    if address['type'] == 'ipv4':
                        conn.ip_address.ip_v4 = IPAddressIPv4Type(address['address'])
                        conn.subnet_mask = IPAddressIPv4Type(address['subnet_mask'])
                        if 'default_route' in host.facts['network_connections'][dev]:
                            conn.default_route = IPAddressIPv4Type(host.facts['network_connections'][dev]['default_route'])
                    elif address['type'] == 'ipv6':
                        conn.ip_address.ip_v6 = IPAddressIPv6Type(address['address'])
                        conn.subnet_mask = IPAddressIPv6Type(address['subnet_mask'])
                        if 'default_route' in host.facts['network_connections'][dev]:
                            conn.default_route = IPAddressIPv6Type(host.facts['network_connections'][dev]['default_route'])

            # network services
            for svc in host.facts['network_services']:
                s = ServiceType()
                asset.assets.append(s)

                s.host = HostType()

                # TODO multiple FQDNs
                s.host.fqdn = FQDNType(host.facts['fqdn'][0])

                # TODO fix this to really parse the IP
                s.host.ip_address = IPAddressType()
                if '.' in svc['ip_address']:
                    s.host.ip_address.ip_v4 = IPAddressIPv4Type(svc['ip_address'])
                elif ':' in svc['ip_address']:
                    s.host.ip_address.ip_v6 = IPAddressIPv6Type(svc['ip_address'])

                port = ServicePortType(svc['port'])
                port.source = svc['source']
                port.timestamp = svc['timestamp']
                s.ports.append(port)

                s.protocol = ProtocolType(svc['protocol'])

            report = ReportType()
            report.content = ReportContentElement()
            report.content.append(self.reporter.report(host))
            arc.reports.reports.append(report)
            report.id = 'report_' + uuid.uuid4().hex

            rel = RelationshipType()
            arc.relationships.relationships.append(rel)
            rel.subject = report.id
            rel.type = 'isAbout'
            ref = RefElement(asset.id)
            rel.refs.append(ref)

            # TODO 'retrievedFrom' relationship
            # TODO 'createdBy' relationship
            # TODO 'hasSource' relationship
            # TODO 'recordedBy' relationship
            # TODO 'initiatedBy' relationship

            rel = RelationshipType()
            arc.relationships.relationships.append(rel)
            rel.subject = report.id
            rel.type = 'createdFor'
            ref = RefElement(report_request.id)
            rel.refs.append(ref)

            # TODO 'hasMetadata' relationship

        arc_et = ET.ElementTree(element=arc.to_xml())
        return arc_et