Beispiel #1
0
def make_svg_bargraph(labels, heights, categories=None, palette=None,
                      barheight=100, barwidth=12, show_labels=True, file_header=False,
                      data_url=False):
    if palette is None:
        palette = default_bargraph_palette
    if categories is None:
        categories = [('', len(labels))]
    unitheight = float(barheight) / max(max(heights, default=1), 1)
    textheight = barheight if show_labels else 0
    labelsize = float(barwidth)
    gap = float(barwidth) / 4
    # textsize = barwidth + gap
    textsize = barwidth + gap / 2
    rollup = max(heights, default=1)
    textmargin = float(labelsize) * 2 / 3
    leftmargin = 32
    rightmargin = 8
    svgwidth = len(heights) * (barwidth + gap) + 2 * leftmargin + rightmargin
    svgheight = barheight + textheight

    # create an SVG XML element
    svg = et.Element('svg', width=str(svgwidth), height=str(svgheight),
                     version='1.1', xmlns='http://www.w3.org/2000/svg')

    # Draw the bar graph
    basey = svgheight - textheight
    x = leftmargin
    # Add units scale on left
    if len(heights):
        for h in [1, (max(heights) + 1) // 2, max(heights)]:
            et.SubElement(svg, 'text', x='0', y='0',
                          style=('font-family:sans-serif;font-size:%dpx;' +
                                 'text-anchor:end;alignment-baseline:hanging;' +
                                 'transform:translate(%dpx, %dpx);') %
                          (textsize, x - gap, basey - h * unitheight)).text = str(h)
        et.SubElement(svg, 'text', x='0', y='0',
                      style=('font-family:sans-serif;font-size:%dpx;' +
                             'text-anchor:middle;' +
                             'transform:translate(%dpx, %dpx) rotate(-90deg)') %
                      (textsize, x - gap - textsize, basey - h * unitheight / 2)
                      ).text = 'units'
    # Draw big category background rectangles
    for catindex, (cat, catcount) in enumerate(categories):
        if not catcount:
            continue
        et.SubElement(svg, 'rect', x=str(x), y=str(basey - rollup * unitheight),
                      width=(str((barwidth + gap) * catcount - gap)),
                      height=str(rollup * unitheight),
                      fill=palette[catindex % len(palette)][1])
        x += (barwidth + gap) * catcount
    # Draw small bars as well as 45degree text labels
    x = leftmargin
    catindex = -1
    catcount = 0
    for label, height in zip(labels, heights):
        while not catcount and catindex <= len(categories):
            catindex += 1
            catcount = categories[catindex][1]
            color = palette[catindex % len(palette)][0]
        et.SubElement(svg, 'rect', x=str(x), y=str(basey - (height * unitheight)),
                      width=str(barwidth), height=str(height * unitheight),
                      fill=color)
        x += barwidth
        if show_labels:
            et.SubElement(svg, 'text', x='0', y='0',
                          style=('font-family:sans-serif;font-size:%dpx;text-anchor:end;' +
                                 'transform:translate(%dpx, %dpx) rotate(-45deg);') %
                          (labelsize, x, basey + textmargin)).text = label
        x += gap
        catcount -= 1
    # Text labels for each category
    x = leftmargin
    for cat, catcount in categories:
        if not catcount:
            continue
        et.SubElement(svg, 'text', x='0', y='0',
                      style=('font-family:sans-serif;font-size:%dpx;text-anchor:end;' +
                             'transform:translate(%dpx, %dpx) rotate(-90deg);') %
                      (textsize, x + (barwidth + gap) * catcount - gap,
                       basey - rollup * unitheight + gap)).text = '%d %s' % (
            catcount, cat + ('s' if catcount != 1 else ''))
        x += (barwidth + gap) * catcount
    # Output - this is the bare svg.
    result = et.tostring(svg).decode('utf-8')
    if file_header or data_url:
        result = ''.join([
            '<?xml version=\"1.0\" standalone=\"no\"?>\n',
            '<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n',
            '\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n',
            result])
    if data_url:
        import base64
        result = 'data:image/svg+xml;base64,' + base64.b64encode(
            result.encode('utf-8')).decode('utf-8')
    return result
Beispiel #2
0
def main():

    hosts = []
    backupsDirectory = "/home/user/backups"
    for item in os.listdir(backupsDirectory):
        if item in [".", ".."]:
            continue
        if os.path.isdir(os.path.join(backupsDirectory, item)):
            hosts.append(item)
            print "item: ", item

    if len(hosts) == 0:
        return 0

    backupSessionMap = {}
    for host in hosts:
        print "found host: ", host
        sessionInfoFile = os.path.join(os.path.join(backupsDirectory, host),
                                       "session_list.xml")
        sessionList = veeamlpb.session.CSessionInfoList.FromXmlFile(
            sessionInfoFile)
        backupSessionMap[host] = sessionList

        for sessionInfo in sessionList.List():
            print "Session:", sessionInfo.ToString()

    html = xml.Element("html")
    body = xml.SubElement(html, "body",
                          {"style": "background-color: #00b336;"})

    xml.SubElement(
        body, "h1").text = "Report at " + datetime.datetime.now().strftime(
            "%Y-%m-%d %H:%M:%S")
    xml.SubElement(body, "h2").text = "Statistic:"
    for host in hosts:
        sessionList = backupSessionMap[host]
        success = 0
        warning = 0
        error = 0
        if len(sessionList.List()) == 0:
            continue

        for sessionInfo in sessionList.List():
            if sessionInfo.State() == "Success":
                success += 1
            elif sessionInfo.State() == "Warning":
                warning += 1
            else:
                error += 1

        latestSessionInfo = sessionList.List()[-1]
        attr = {}
        if latestSessionInfo.State() == "Success":
            attr["style"] = "background-color: #005f4b; color: white;"
        elif latestSessionInfo.State() == "Warning":
            attr["style"] = "background-color: #93ea20;"
        else:
            attr["style"] = "background-color: #ba0200; color: white;"

        xml.SubElement(xml.SubElement(body,"p"),"span", attr).text = \
            host + " - "+str(success)+"/"+str(warning)+"/"+str(error)+" Success/Warning/Error"

    for host in hosts:
        sessionList = backupSessionMap[host]

        xml.SubElement(body, "h2").text = host + ":"

        tableStyle = xml.SubElement(body, "style")
        tableStyle.attrib["type"] = "text/css"
        tableStyle.text = "TABLE {border: 1px solid green;} TD{ border: 1px solid green; padding: 4px;}"

        table = xml.SubElement(body, "table")
        thead = xml.SubElement(table, "thead")
        xml.SubElement(thead, "th").text = "Number"
        xml.SubElement(thead, "th").text = "State"
        xml.SubElement(thead, "th").text = "Job name"
        xml.SubElement(thead, "th").text = "Start at"
        xml.SubElement(thead, "th").text = "Complete at"

        tbody = xml.SubElement(table, "tbody")
        inx = 0
        for sessionInfo in reversed(sessionList.List()):
            if inx == 10:
                break
            tr = xml.SubElement(tbody, "tr")
            xml.SubElement(tr, "td").text = str(inx)

            attr = {}
            if sessionInfo.State() == "Success":
                pass
            elif sessionInfo.State() == "Warning":
                attr["style"] = "background-color: #93ea20;"
            else:
                attr["style"] = "background-color: #ba0200; color: white;"
            xml.SubElement(tr, "td", attr).text = sessionInfo.State()

            xml.SubElement(tr, "td").text = sessionInfo.JobName()
            xml.SubElement(tr, "td").text = sessionInfo.StartTime()
            xml.SubElement(tr, "td").text = sessionInfo.FinishTime()

            inx += 1

    xml.ElementTree(html).write("summary.html",
                                encoding='utf-8',
                                method='html')
    return 0
def addTag(parent, tag, xargs={}, contents=None):
    e = ET.SubElement(parent, tag,
                      xargs) if parent is not None else ET.Element(tag, xargs)
    mergecontents(e, contents)
    return e
# serialize
metric = {
    'time': datetime.now(),
    'name': 'CPU',
    'value': 3.4,
    'labels': {
        'host': 'prod9',
        'version': '1.4.4',
    },
}


def new_element(tag, text):
    """Helper function to create an element with text"""
    elem = xml.Element(tag)
    elem.text = text
    return elem


root = xml.Element('metric')
root.append(new_element('time', metric['time'].isoformat()))
root.append(new_element('name', metric['name']))
root.append(new_element('value', str(metric['value'])))
labels = xml.Element('labels')
for key, value in metric['labels'].items():
    labels.append(xml.Element('label', key=key, value=value))
root.append(labels)
data = xml.tostring(root)
print('xml:', data.decode('utf-8'))
Beispiel #5
0
    def _get_macroscopic_xml(self, macroscopic):
        xml_element = ET.Element("macroscopic")
        xml_element.set("name", macroscopic)

        return xml_element
Beispiel #6
0
def run(args):
    #input_path = "/Users/jamarq_laptop/PAN/Software/pan-2/Data/blogs/es/"
    #model_path = "./Models/"
    #output_path = "./Outputs/"

    input_path = args.input
    output_path = args.output
    model_path = args.model

    if not input_path.endswith("/"):
        input_path += "/"
    if not output_path.endswith("/"):
        output_path += "/"
    if not model_path.endswith("/"):
        model_path += "/"

    files = [f for f in os.listdir(input_path) if f.endswith('.xml')]

    tree = ET.parse(input_path + files[0])
    root = tree.getroot()

    type = root.attrib["type"] + "_" + root.attrib["lang"]

    clf = joblib.load(model_path + type + '.pkl')
    topic_model = joblib.load(model_path + type + '_topic_model.pkl')
    scaler = joblib.load(model_path + type + "_scaler.pkl")

    for file in files:

        features = []
        tree = ET.parse(input_path + file)
        root = tree.getroot()

        aut_id = file.split("_")[0]

        if "." in aut_id:
            aut_id = aut_id.split(".")[0]

        lang = type.split("_")[1]
        doccount = tree.find("documents").attrib["count"]

        features.extend(topic_model.get_sim_unseen(input_path + file))

        features.append(int(doccount))

        xmlstr = clean(ET.tostring(root), root.attrib['lang'].lower(),
                       root.attrib['type'], tree)
        xmlstr = filter(lambda x: x in string.printable, xmlstr)
        tokens = word_tokenize(xmlstr)
        nostop_tokens = removeStopWords(tokens, type.split("_")[1])

        features.append(extractRepeatation(nostop_tokens, type.split("_")[1]))

        cap_word, cap_let = extractCapital(nostop_tokens)
        features.append(cap_word)
        features.append(cap_let)

        features.append(len(tokens))

        LIWCDic = readDictionary(getLIWCOnLang(lang))
        features.extend(extractLIWCFeatures(tokens, LIWCDic))

        features.extend(extractReadabilityArray(xmlstr, nostop_tokens))

        these_features.append(extractEmoticons(xmlstr, nostop_tokens))

        these_features.extend(extractHTMLTags(xmlstr, nostop_tokens))

        label = clf.predict(scaler.transform(features))

        author = ET.Element('author')
        author.set("id", aut_id)
        author.set("type", type.split("_")[0])
        author.set("lang", type.split("_")[1])
        author.set("age_group", label[0].split(" ")[0])
        author.set("gender", label[0].split(" ")[1])

        ET.ElementTree(author).write(output_path + aut_id + ".xml")

    print "All done!"
    print '\a'
 def setUp(self):
     self.flatten_feed = transitfeed.Loader(
         data_path('flatten_feed')).load()
     self.kmlwriter = kmlwriter.KMLWriter()
     self.kmlwriter.shape_points = True
     self.parent = Et.Element('parent')
Beispiel #8
0
    def save(self, quick_save: bool = False) -> None:
        # Save the overall state of the party
        xml_root = ET.Element('SaveState')
        xml_root.attrib['name'] = self.hero_party.main_character.name
        xml_root.attrib['map'] = self.get_map_name()
        xml_root.attrib['gp'] = str(self.hero_party.gp)

        # Save state related to light diameter
        if self.hero_party.light_diameter is not None:
            xml_root.attrib['light_diameter'] = str(
                self.hero_party.light_diameter)
        if self.hero_party.light_diameter_decay_steps is not None:
            xml_root.attrib['light_diameter_decay_steps'] = str(
                self.hero_party.light_diameter_decay_steps)
        if self.hero_party.light_diameter_decay_steps_remaining is not None:
            xml_root.attrib['light_diameter_decay_steps_remaining'] =\
                str(self.hero_party.light_diameter_decay_steps_remaining)

        # Save state related to repel monsters
        xml_root.attrib[
            'repel_monsters'] = 'yes' if self.hero_party.repel_monsters else 'no'
        if self.hero_party.repel_monsters_decay_steps_remaining is not None:
            xml_root.attrib['repel_monsters_decay_steps_remaining'] =\
                str(self.hero_party.repel_monsters_decay_steps_remaining)
        if (self.hero_party.repel_monster_fade_dialog is not None and
                isinstance(self.hero_party.repel_monster_fade_dialog, str)):
            xml_root.attrib['repel_monster_fade_dialog'] = str(
                self.hero_party.repel_monster_fade_dialog)

        # Save state related to last outside position
        if '' != self.hero_party.last_outside_map_name:
            xml_root.attrib[
                'last_outside_map'] = self.hero_party.last_outside_map_name
            xml_root.attrib['last_outside_x'] = str(
                self.hero_party.last_outside_pos_dat_tile.x)
            xml_root.attrib['last_outside_y'] = str(
                self.hero_party.last_outside_pos_dat_tile.y)
            xml_root.attrib[
                'last_outside_dir'] = self.hero_party.last_outside_dir.name

        # Save state related to removed decorations
        removed_decorations_element = ET.SubElement(xml_root,
                                                    'RemovedDecorations')
        for map_name in self.removed_decorations_by_map:
            for decoration in self.removed_decorations_by_map[map_name]:
                removed_decoration_element = ET.SubElement(
                    removed_decorations_element, 'RemovedDecoration')
                removed_decoration_element.attrib['map'] = map_name
                removed_decoration_element.attrib['x'] = str(
                    decoration.point.x)
                removed_decoration_element.attrib['y'] = str(
                    decoration.point.y)
                if decoration.type is not None:
                    removed_decoration_element.attrib[
                        'type'] = decoration.type.name

        # Save state for member of the hero party
        for member in self.hero_party.members:
            member_element = ET.SubElement(xml_root, 'PartyMember')
            member_element.attrib['name'] = member.name
            member_element.attrib['type'] = member.character_type.name
            member_element.attrib['x'] = str(member.curr_pos_dat_tile.x)
            member_element.attrib['y'] = str(member.curr_pos_dat_tile.y)
            member_element.attrib['dir'] = member.direction.name
            member_element.attrib['xp'] = str(member.xp)
            member_element.attrib['hp'] = str(member.hp)
            member_element.attrib['mp'] = str(member.mp)
            member_element.attrib[
                'is_combat_character'] = 'yes' if member.is_combat_character else 'no'

            items_element = ET.SubElement(member_element, 'EquippedItems')
            if member.weapon is not None:
                item_element = ET.SubElement(items_element, 'Item')
                item_element.attrib['name'] = member.weapon.name
            if member.armor is not None:
                item_element = ET.SubElement(items_element, 'Item')
                item_element.attrib['name'] = member.armor.name
            if member.shield is not None:
                item_element = ET.SubElement(items_element, 'Item')
                item_element.attrib['name'] = member.shield.name
            for tool in member.other_equipped_items:
                item_element = ET.SubElement(items_element, 'Item')
                item_element.attrib['name'] = tool.name

            items_element = ET.SubElement(member_element, 'UnequippedItems')
            for item, item_count in member.unequipped_items.items():
                if item_count > 0:
                    item_element = ET.SubElement(items_element, 'Item')
                    item_element.attrib['name'] = item.name
                    item_element.attrib['count'] = str(item_count)

        progress_markers_element = ET.SubElement(xml_root, 'ProgressMarkers')
        for progress_marker in self.hero_party.progress_markers:
            progress_marker_element = ET.SubElement(progress_markers_element,
                                                    'ProgressMarker')
            progress_marker_element.attrib['name'] = progress_marker

        # TODO: This should all be captured in game.xml
        if not quick_save:
            dialog_element = ET.SubElement(xml_root, 'Dialog')
            dialog_element.text = '"I am glad thou hast returned.  All our hopes are riding on thee."'
            dialog_element = ET.SubElement(xml_root, 'Dialog')
            dialog_element.text = '"Before reaching thy next level of experience thou must gain [NEXT_LEVEL_XP] ' \
                                  'experience points.  See me again when thy level has increased."'
            dialog_element = ET.SubElement(xml_root, 'Dialog')
            dialog_element.text = '"Goodbye now, [NAME].  Take care and tempt not the Fates."'

        xml_string = xml.dom.minidom.parseString(
            ET.tostring(xml_root)).toprettyxml(indent="   ")

        save_game_file_path = os.path.join(
            self.saves_path, self.hero_party.main_character.name + '.xml')

        # Archive off the old save, if one is present
        self.archive_saved_game_file(save_game_file_path)

        # Save the game
        try:
            if not os.path.isdir(self.saves_path):
                os.makedirs(self.saves_path)
            save_game_file = open(save_game_file_path, 'w')
            save_game_file.write(xml_string)
            save_game_file.close()

            print('Saved game to file', save_game_file_path, flush=True)
        except Exception as exc:
            print(
                'ERROR: Exception encountered while attempting to save game file:',
                exc,
                flush=True)
 def element(self):
     """Return the Component as a valid KGML element."""
     # The root is this Component element
     component = ET.Element("component")
     component.attrib = {"id": str(self._id)}
     return component
Beispiel #10
0
def get_original_test_set_xml(original_dataset_file, destination_file, destination_path, file_g2go):
    """

    :param original_dataset_file:
    :param destination_file:
    :param destination_path:
    :param file_g2go:
    :return:
    """
    dict_gene_go_id, dict_gene_go_name = go_annotations(file_g2go)

    # for test
    # dataset = open(original_dataset_file, encoding='utf-8')
    # dataset_reader = csv.reader(dataset, delimiter='\t')
    #
    # line_count = 0
    # line_saves = []
    #
    # for row in dataset_reader:
    #     if line_count == 0:
    #         line_saves.append(row[:-1])
    #     else:
    #         if row[11] == 'C':
    #             line_saves.append(row[:-1])
    #         elif row[11] == 'I' and row[10] == 'TRUE':
    #             line_saves.append(row[:-2] + ['FALSE'])
    #         elif row[11] == 'I' and row[10] == 'FALSE':
    #             line_saves.append(row[:-2] + ['TRUE'])
    #
    #     line_count += 1
    #
    # output_file = open(destination_file, 'w', encoding='utf-8')
    # output_file_writer = csv.writer(output_file, delimiter='\t')
    #
    # for row in line_saves:
    #     output_file_writer.writerow(row)
    # end for test

    #dict_id_sentence = get_pubmed_id_sentences(destination_file)  # id_pubmed : [sentence1, sentence2, etc.]
    dict_id_sentence = get_pubmed_id_sentences_expert(destination_file)  # id_pubmed : [sentence1, sentence2, etc.]

    for key, items in dict_id_sentence.items():
        root = ET.Element('document', id=key)
        sentence_number = 0

        for item in items:
            entity_number = 0
            #entities_sentence = get_entities_sentence(destination_file, item)
            entities_sentence = get_entities_sentence_expert(destination_file, item)  # for expert

            doc = ET.SubElement(root, 'sentence', id=key + '.s' + str(sentence_number), text=item)
            save_entities = []
            save_pairs = []

            for pair in entities_sentence:  # sentence : [[entity1, id, char1, char2], [entity2, id, char3, char4],
                # relation], [[entity1, id, char1, char2], [entity2, id, char3, char4],
                # relation], etc.]

                pair_list = []

                entity_1 = pair[0]
                entity_2 = pair[1]

                if entity_1 not in save_entities:
                    save_entities.append(entity_1)

                if entity_2 not in save_entities:
                    save_entities.append(entity_2)

                pair_list.append(entity_1)
                pair_list.append(entity_2)

                pair_list_sorted = sorted(pair_list, key=lambda x: int(x[2]))
                pair_list_sorted.append(pair[2])

                save_pairs.append(pair_list_sorted)

            save_entities_sorted = sorted(save_entities, key=lambda x: int(x[2]))
            associated_entity_number = {}

            for entity in save_entities_sorted:

                if entity[1].startswith('HP'):

                    ET.SubElement(doc, 'entity', id=key + '.s' + str(sentence_number) + '.e' + str(entity_number),
                                  charOffset=entity[2] + '-' + entity[3], type='HP', text=entity[0],
                                  ontology_id=entity[1])

                else:

                    ET.SubElement(doc, 'entity', id=key + '.s' + str(sentence_number) + '.e' + str(entity_number),
                                  charOffset=entity[2] + '-' + entity[3], type='GENE', text=entity[0],
                                  ontology_id=entity[1])

                associated_entity_number[repr(entity)] = key + '.s' + str(sentence_number) + '.e' + str(entity_number)
                entity_number += 1

            # save_pairs = [[['PDYN', '5173', '64', '68'], ['epilepsy', 'HP_0001250', '111', '119'], 'False'], [['PDYN', '5173', '32', '68'], ['epilepsy', 'HP_0001250', '111', '119'], 'False']]

            pairs_sorted = sorted(save_pairs, key=lambda x: int(x[0][2]))
            pair_number = 0

            for pair in pairs_sorted:

                ET.SubElement(doc, 'pair', id=key + '.s' + str(sentence_number) + '.p' + str(pair_number),
                              e1=associated_entity_number[repr(pair[0])],
                              e2=associated_entity_number[repr(pair[1])],
                              relation=pair[2].lower())

                pair_number += 1
            sentence_number += 1

        output_file = open(destination_path + key + '.xml', 'w', encoding='utf-8')
        # output_file.write('<?xml version="1.0" encoding="UTF-8"?>')

        #root = xml_file_go(dict_gene_go_name, dict_gene_go_id, root)
        output_file.write(prettify(root))

        output_file.close()

    return
Beispiel #11
0
def all_in_one(base_dir, destination_file, destination_path, nomenclature='relation'):
    """

    :param nomenclature:
    :param base_dir:
    :param destination_file:
    :param destination_path:
    :return:
    """

    base_root = ET.Element('document', id=destination_file)
    sentence_number = 0

    for f in os.listdir(base_dir):

        reader = open(base_dir + '/' + f, 'r', encoding='utf-8')
        content = reader.read().replace('</sup>', 'AAAA').replace('<sup>', 'AAA').replace('</b>', 'AAAA')\
            .replace('<b>', 'AAA').replace('</i>', 'AAAA').replace('<i>', 'AAA').replace('</sub>', 'AAAA')\
            .replace('<sub>', 'AAA')

        root = ET.fromstring(content)

        #tree = ET.parse(base_dir + '/' + f)
        #root = tree.getroot()

        for sentence in root:
            sentence_text = sentence.get('text')
            all_pairs = sentence.findall('pair')

            for pair in all_pairs:
                doc = ET.SubElement(base_root, 'sentence', id='s' + str(sentence_number), text=str(sentence_text.encode('ascii', 'ignore'))[2:-1])

                id_1 = pair.get('e1')
                id_2 = pair.get('e2')

                for e in sentence.findall('entity'):

                    if e.get('id') == id_1:
                        if e.get('type') == 'GO':
                            e_type = 'GO'
                        else:
                            e_type = 'HP'

                        ET.SubElement(doc, 'entity', id='s' + str(sentence_number) + '.e0',
                                      charOffset=e.get('charOffset'), type=e_type, text=e.get('text'), ontology_id=e.get('ontology_id'))

                    elif e.get('id') == id_2:
                        if e.get('type') == 'GO':
                            e_type = 'GO'
                        else:
                            e_type = 'HP'

                        ET.SubElement(doc, 'entity', id='s' + str(sentence_number) + '.e1',
                                      charOffset=e.get('charOffset'), type=e_type, text=e.get('text'), ontology_id=e.get('ontology_id'))

                ET.SubElement(doc, 'pair', id='s' + str(sentence_number) + '.p0', e1='s' + str(sentence_number) + '.e0',
                              e2='s' + str(sentence_number) + '.e1', relation=pair.get(nomenclature))

                sentence_number += 1

    all_file = open(destination_path + '/' + destination_file + '.xml', 'w', encoding='utf-8')
    all_file.write(prettify(base_root))
    all_file.close()

    return


#all_in_one('corpora/consensus_test/', 'consensus_test', 'corpora/')
#all_in_one('corpora/original_train/', 'original_train', 'corpora/')
#all_in_one('corpora/original_test/', 'original_test', 'corpora/')
#all_in_one('corpora/expert_test/', 'expert_test', 'corpora/')

# RUN ####

# def main():
#     """Creates an xml file for each abstract
#     """
#
#     #xml_file('data/original_dataset_70.tsv', 'data/batch_results_70.csv', 'data/validation_set.csv', 'data/gene2go', 'corpora/amazon_train/')
#     #xml_file('data/original_dataset_30.tsv', 'data/batch_results_30_consensus.csv', 'data/validation_set.csv', 'data/gene2go', 'corpora/consensus_test/', test=True)
#
#     return
#
#
# # python3 src/parser_csv.py
# if __name__ == "__main__":
#     main()
Beispiel #12
0
def xml_file(dataset_file, amazon_file, validation_file, file_g2go, destination_path, test=False):
    """Process to create each file

    :param dataset_file:
    :param amazon_file:
    :param validation_file:
    :param file_g2go:
    :param destination_path:
    :param test:
    """

    if test is True:
        blacklist = create_consensus_dataset('data/batch_results_30.csv', 'data/external_rater_results.tsv', 'data/batch_results_30_consensus.csv')
    else:
        blacklist = []

    # stats
    count = 0
    count_true = 0
    count_false = 0
    count_excluded = 0

    original_count = 0
    original_count_true = 0
    original_count_false = 0

    validation = open(validation_file, 'r', encoding='utf-8')
    validation.readline()
    validation_sentences = validation.readlines()

    dict_gene_go_id, dict_gene_go_name = go_annotations(file_g2go)
    amazon = open(amazon_file, encoding='utf-8')
    amazon_file_reader = csv.reader(amazon, delimiter=',', quotechar='"')

    line_count = 0
    dict_replies = {}  # sentence : reply

    for row in amazon_file_reader:
        if line_count == 0:
            pass
        elif row:
            if row[21] == '' and test is False and row[-2] + '\n' not in validation_sentences:
                if row[-1] == 'Yes, they share a direct/explicit relation in the sentence.':
                    dict_replies[row[-2]] = 'true'
                    count_true += 1
                elif row[-1] == 'The entities seem to be illy marked, or something is wrong with the entities/sentence.':
                    dict_replies[row[-2]] = 'error'
                    count_excluded += 1
                elif row[-1] == 'No, they are separate entities with no correlation in the sentence.':
                    dict_replies[row[-2]] = 'false'
                    count_false += 1
            elif row[21] == '' and test is True:
                if row[-1] == 'Yes, they share a direct/explicit relation in the sentence.':
                    dict_replies[row[-2]] = 'true'
                    count_true += 1
                elif row[-1] == 'The entities seem to be illy marked, or something is wrong with the entities/sentence.':
                    dict_replies[row[-2]] = 'error'
                    count_excluded += 1
                elif row[-1] == 'No, they are separate entities with no correlation in the sentence.':
                    dict_replies[row[-2]] = 'false'
                    count_false += 1

        line_count += 1

    amazon.close()

    dict_id_sentence = get_pubmed_id_sentences(dataset_file)  # id_pubmed : [sentence1, sentence2, etc.]

    for key, items in dict_id_sentence.items():
        root = ET.Element('document', id=key)
        sentence_number = 0

        for item in items:
            entity_number = 0
            entities_sentence = get_entities_sentence(dataset_file, item)

            doc = ET.SubElement(root, 'sentence', id=key + '.s' + str(sentence_number), text=item)
            save_entities = []
            save_pairs = []

            for pair in entities_sentence:  # sentence : [[entity1, id, char1, char2], [entity2, id, char3, char4],
                                            # relation], [[entity1, id, char1, char2], [entity2, id, char3, char4],
                                            # relation], etc.]

                pair_list = []

                entity_1 = pair[0]
                entity_2 = pair[1]

                if entity_1 not in save_entities:
                    save_entities.append(entity_1)

                if entity_2 not in save_entities:
                    save_entities.append(entity_2)

                pair_list.append(entity_1)
                pair_list.append(entity_2)

                pair_list_sorted = sorted(pair_list, key=lambda x: int(x[2]))
                pair_list_sorted.append(pair[2])

                save_pairs.append(pair_list_sorted)

            save_entities_sorted = sorted(save_entities, key = lambda x: int(x[2]))
            associated_entity_number = {}

            for entity in save_entities_sorted:

                if entity[1].startswith('HP'):

                    ET.SubElement(doc, 'entity', id=key + '.s' + str(sentence_number) + '.e' + str(entity_number),
                                  charOffset=entity[2] + '-' + entity[3], type='HP', text=entity[0], ontology_id=entity[1])

                else:

                    ET.SubElement(doc, 'entity', id=key + '.s' + str(sentence_number) + '.e' + str(entity_number),
                                  charOffset=entity[2] + '-' + entity[3], type='GENE', text=entity[0], ontology_id=entity[1])

                associated_entity_number[repr(entity)] = key + '.s' + str(sentence_number) + '.e' + str(entity_number)
                entity_number += 1

            # save_pairs = [[['PDYN', '5173', '64', '68'], ['epilepsy', 'HP_0001250', '111', '119'], 'False'], [['PDYN', '5173', '32', '68'], ['epilepsy', 'HP_0001250', '111', '119'], 'False']]

            pairs_sorted = sorted(save_pairs, key = lambda x: int(x[0][2]))
            pair_number = 0

            for pair in pairs_sorted:

                original_count += 1
                if pair[2] == 'False':
                    original_count_false += 1
                elif pair[2] == 'True':
                    original_count_true += 1

                new_sentence = item[:int(pair[0][2])] + '<b>' + item[int(pair[0][2]):int(pair[0][3])] + '</b>' + \
                               item[int(pair[0][3]):int(pair[1][2])] + '<b>' + item[int(pair[1][2]):int(pair[1][3])] + \
                               '</b>' + item[int(pair[1][3]):]

                if new_sentence.replace(',', '<span>&#44;</span>') in blacklist:
                    pass
                else:
                    if dict_replies[new_sentence.replace(',', '<span>&#44;</span>')] == 'error':
                         pass
                    else:
                        ET.SubElement(doc, 'pair', id=key + '.s' + str(sentence_number) + '.p' + str(pair_number),
                                e1=associated_entity_number[repr(pair[0])], e2=associated_entity_number[repr(pair[1])],
                                relation=dict_replies[new_sentence.replace(',', '<span>&#44;</span>')])
                        count += 1
                        pair_number += 1

            sentence_number += 1

        output_file = open(destination_path + key + '.xml', 'w', encoding='utf-8')
        # output_file.write('<?xml version="1.0" encoding="UTF-8"?>')

        root = xml_file_go(dict_gene_go_name, dict_gene_go_id, root)
        output_file.write(prettify(root))

        output_file.close()

    # stats
    print('total', count)
    print('count true', count_true)
    print('count false', count_false)
    print('count_excluded', count_excluded)

    print('------------------------------------------------------------')

    print('original count', original_count)
    print('original_count_true', original_count_true)
    print('original_count_false', original_count_false)

    return
Beispiel #13
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 22:28:53 2018

@author: Administrator
"""

import xml.etree.ElementTree as ET


def subElement(root, tag, text):
    ele = ET.SubElement(root, tag)
    ele.text = text
    ele.tail = '\n'


if __name__ == "__main__":
    root = ET.Element("note")

    to = root.makeelement("to", {})
    to.text = "peter"
    to.tail = '\n'
    root.append(to)

    subElement(root, "from", "marry")
    subElement(root, "heading", "Reminder")
    subElement(root, "body", "Don't forget the meeting!")

    tree = ET.ElementTree(root)
    tree.write("note.xml", encoding="utf-8", xml_declaration=True)
Beispiel #14
0
def queue():
    clear_screen()
    title('QUEUE')
    print('Choose a parameter sets file:')
    paramsets_file_names = sorted([
        name for name in os.listdir(PARAMSETS_DIR)
        if name.lower().endswith('.xml')
    ])
    for i, paramsets_file_name in enumerate(paramsets_file_names):
        print(str(i + 1) + ')', paramsets_file_name)
    print('M) Back to main menu')
    choice = None
    options = set(str(x + 1) for x in range(len(paramsets_file_names)))
    while choice not in options:
        choice = input('> ').strip().upper()
        if choice == 'M':
            return
    choice_index = int(choice) - 1
    # Get configuration from user
    print()
    print('How many instances per parameter set?')
    instances_per_paramset = int(input('> ').strip())
    print()
    print('How many runs per instance?')
    runs_per_instance = int(input('> ').strip())
    total_runs = instances_per_paramset * runs_per_instance
    print()
    print('Total number of runs will be',
          str(total_runs) + ',', 'split over', instances_per_paramset,
          'instance(s) per parameter set.')
    print()
    print('Run time limit (hh:mm:ss)')
    run_time_str = input('> ').strip()
    instance_time_limit = sum(
        int(x) * 60**i
        for i, x in enumerate(reversed(run_time_str.split(':'))))
    print()
    print('Partition name (leave blank for default: \'nodes\')')
    partition_name = input('> ').strip()
    print()
    print('Email alerts?')
    do_alerts = input('> ').strip().upper() in ['Y', 'YES']
    print()
    if do_alerts:
        print()
        print('Email address')
        email = input('> ').strip()
    # Generate run directory
    run_name = datetime.datetime.now().strftime('%y%m%d%H%M')
    run_dir = os.path.join(SIMULATIONS_DIR, run_name)
    os.mkdir(run_dir)
    os.mkdir(os.path.join(run_dir, 'output_files/'))
    os.mkdir(os.path.join(run_dir, 'output_std/'))
    # Copy latest binary
    binaries = sorted(
        [x for x in os.listdir(BINARIES_DIR) if x[-4:] == '.jar'])
    latest_binary = binaries[-1]
    shutil.copy2(os.path.join(BINARIES_DIR, latest_binary), run_dir)
    # Parse default parameter sets file
    paramset_values_default = {}
    paramsets_file_name = sorted([
        x for x in paramsets_file_names
        if 'default' in x and x.lower().endswith('.xml')
    ])[-1]
    paramsets_file_path = os.path.join(PARAMSETS_DIR, paramsets_file_name)
    paramsets_file_content = open(paramsets_file_path, 'r').read()
    start_index = paramsets_file_content.index('<EXPERIMENT>')
    end_index = paramsets_file_content.index('</EXPERIMENT>')
    paramsets_file_content = paramsets_file_content[start_index:end_index + 13]
    root = ET.fromstring(paramsets_file_content)
    num_paramsets = len(root.findall('PARAMETERS'))
    for paramset in root:
        for param in paramset:
            name, value = param.get('NAME'), param.get('VALUE')
            paramset_values_default[name] = [value]
    # Parse, edit, and export selected parameter sets file
    paramset_values_chosen = {}
    paramsets_file_name = paramsets_file_names[choice_index]
    paramsets_file_path = os.path.join(PARAMSETS_DIR, paramsets_file_name)
    paramsets_file_content = open(paramsets_file_path, 'r').read()
    start_index = paramsets_file_content.index('<EXPERIMENT>')
    end_index = paramsets_file_content.index('</EXPERIMENT>')
    paramsets_file_content = paramsets_file_content[start_index:end_index + 13]
    root = ET.fromstring(paramsets_file_content)
    num_paramsets = len(root.findall('PARAMETERS'))
    for paramset in root:
        for param in paramset:
            name, value = param.get('NAME'), param.get('VALUE')
            if name == 'N_PROTEINS':
                param.set('VALUE', str(runs_per_instance))
            if name not in paramset_values_chosen:
                paramset_values_chosen[name] = []
            paramset_values_chosen[name].append(value)
    paramsets_xml = ET.tostring(root, encoding='utf8', method='xml')
    with open(os.path.join(run_dir, PARAMSETS_FILE_NAME), 'wb') as outfile:
        outfile.write(paramsets_xml)
    # Find discrepancies and write them to file
    with open(os.path.join(run_dir, DISCREPANCIES_FILE_NAME), 'w') as outfile:
        for key, value_d in paramset_values_default.items():
            if key == 'N_PROTEINS':
                continue
            if key not in paramset_values_chosen.keys():
                print(
                    'ERROR: Missing parameter sets file is missing required parameter:',
                    key)
                exit(1)
            value_c = paramset_values_chosen[key]
            if set(value_d) != set(value_c):
                outfile.write(key + ': ' + ', '.join(value_c) + '\n')
    # Generate jobscript
    with open(os.path.join(run_dir, 'jobscript.sh'), 'w') as outfile:
        outfile.write('#!/bin/bash\n')
        outfile.write('#SBATCH --job-name=omd_sim\n')
        if do_alerts:
            outfile.write('#SBATCH --mail-type=ALL\n')
            outfile.write('#SBATCH --mail-user='******'\n')
        if partition_name != '':
            outfile.write('#SBATCH --partition=' + partition_name + '\n')
        outfile.write('#SBATCH --cpus-per-task=8\n')
        outfile.write('#SBATCH --mem=4gb\n')
        outfile.write('#SBATCH --time=' + run_time_str + '\n')
        outfile.write('#SBATCH --output=./output_std/omds%j.log\n')
        outfile.write('#SBATCH --error=./output_std/omds%j.err\n')
        outfile.write('#SBATCH --account=biol-stdbom-2019\n')
        outfile.write('#SBATCH --array=0-' + str(instances_per_paramset - 1) +
                      '\n')
        outfile.write('module load lang/Java/1.8.0_212\n')
        outfile.write('export MALLOC_ARENA_MAX=8\n')
        outfile.write('vmArgs="-Xmx1G -XX:ParallelGCThreads=1 -jar"\n')
        outfile.write('java $vmArgs ./' + latest_binary + ' ' +
                      PARAMSETS_FILE_NAME +
                      ' ./output_files $PSET_ID $SLURM_ARRAY_TASK_ID')
    # Generate launcher script
    with open(os.path.join(run_dir, 'launcher.sh'), 'w') as outfile:
        outfile.write(
            'cd "${0%/*}"\n')  # Sets working directory to script directory
        for paramset_id in range(num_paramsets):
            outfile.write('sbatch --export=PSET_ID=' + str(paramset_id) +
                          ' jobscript.sh\n')
    # Launch the tasks
    print('Ready to launch.')
    input('> ')
    print()
    p = subprocess.Popen(['sh', run_dir + '/launcher.sh'],
                         stdout=subprocess.PIPE)
    stdout, stderr = p.communicate()
    # Parse output and save job-set info file
    job_group_ids = [
        int(x.strip()) for x in stdout.decode().split('Submitted batch job ')
        if len(x) > 0
    ]
    if len(job_group_ids) > 0:
        root = ET.Element('JobSet')
        job_groups = ET.SubElement(root, 'JobGroups')
        for job_group_id in job_group_ids:
            ET.SubElement(job_groups, 'JobGroup', id=str(job_group_id))
        ET.SubElement(root,
                      'Parameter',
                      name='ParamsetTitle',
                      value=paramsets_file_name[:-4])
        ET.SubElement(root,
                      'Parameter',
                      name='InstancesPerParamset',
                      value=str(instances_per_paramset))
        ET.SubElement(root,
                      'Parameter',
                      name='RunsPerInstance',
                      value=str(runs_per_instance))
        ET.SubElement(root,
                      'Parameter',
                      name='InstanceTimeLimit',
                      value=str(instance_time_limit))
        tree = ET.ElementTree(root)
        tree.write(os.path.join(run_dir, JOB_SET_INFO_FILE_NAME))
        input('Done. Press any key to continue.')
    else:
        print('Launch failed.\n')
        exit(1)
Beispiel #15
0
    def _get_lines(self, dispatchmode=False, extendedmode=False):
        company = self.company_id

        if dispatchmode:
            mode1 = 'out_invoice'
            mode2 = 'in_refund'
            extrf = "29"
            declcode = self._get_expedition_code(extendedmode)
            declform = self._get_expedition_form(extendedmode)
        else:
            mode1 = 'in_invoice'
            mode2 = 'out_refund'
            extrf = "19"
            declcode = self._get_reception_code(extendedmode)
            declform = self._get_reception_form(extendedmode)

        decl = ET.Element('Report')
        decl.set('code', declcode)
        decl.set('date', '%s-%s' % (self.year, self.month))
        datas = ET.SubElement(decl, 'Data')
        datas.set('form', declform)
        datas.set('close', 'true')
        entries = {}

        query = """
            SELECT
                inv_line.id
            FROM
                account_invoice_line inv_line
                JOIN account_invoice inv ON inv_line.invoice_id=inv.id
                LEFT JOIN res_country ON res_country.id = inv.intrastat_country_id
                LEFT JOIN res_partner ON res_partner.id = inv.partner_id
                LEFT JOIN res_country countrypartner ON countrypartner.id = res_partner.country_id
                JOIN product_product ON inv_line.product_id=product_product.id
                JOIN product_template ON product_product.product_tmpl_id=product_template.id
            WHERE
                inv.state IN ('open','paid')
                AND inv.company_id=%s
                AND not product_template.type='service'
                AND (res_country.intrastat=true OR (inv.intrastat_country_id is NULL
                                                    AND countrypartner.intrastat=true))
                AND ((res_country.code IS NOT NULL AND not res_country.code=%s)
                     OR (res_country.code is NULL AND countrypartner.code IS NOT NULL
                     AND not countrypartner.code=%s))
                AND inv.type IN (%s, %s)
                AND to_char(COALESCE(inv.date, inv.date_invoice), 'YYYY')=%s
                AND to_char(COALESCE(inv.date, inv.date_invoice), 'MM')=%s
            """

        self.env.cr.execute(query,
                            (company.id, company.partner_id.country_id.code,
                             company.partner_id.country_id.code, mode1, mode2,
                             self.year, self.month))
        lines = self.env.cr.fetchall()
        invoicelines_ids = [rec[0] for rec in lines]
        invoicelines = self.env['account.invoice.line'].browse(
            invoicelines_ids)

        for inv_line in invoicelines:
            linekey = self._get_intrastat_linekey(extrf, inv_line,
                                                  dispatchmode, extendedmode)
            if linekey is None:
                continue

            #We have the key
            #calculate amounts
            if inv_line.price_unit and inv_line.quantity:
                amount = inv_line.price_unit * inv_line.quantity
            else:
                amount = 0
            weight = (inv_line.product_id.weight or 0.0) * \
                inv_line.uom_id._compute_quantity(inv_line.quantity, inv_line.product_id.uom_id)
            if not inv_line.product_id.uom_id.category_id:
                supply_units = inv_line.quantity
            else:
                supply_units = inv_line.quantity * inv_line.uom_id.factor
            amounts = entries.setdefault(linekey, (0, 0, 0))
            amounts = (amounts[0] + amount, amounts[1] + weight,
                       amounts[2] + supply_units)
            entries[linekey] = amounts

        numlgn = 0
        for linekey in entries:
            amounts = entries[linekey]
            if round(amounts[0], 0) == 0:
                continue
            numlgn += 1
            item = ET.SubElement(datas, 'Item')
            self._build_intrastat_line(numlgn, item, linekey, amounts,
                                       dispatchmode, extendedmode)

        if numlgn == 0:
            #no datas
            datas.set('action', 'nihil')
        return decl
Beispiel #16
0
def main(argv=None):  # IGNORE:C0111
    '''Command line options.'''

    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = str(__updated__)
    program_version_message = '%%(prog)s %s (%s)' % (program_version,
                                                     program_build_date)
    if __name__ == '__main__':
        program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    else:
        program_shortdesc = __doc__.split("\n")[1]
    #program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    program_license = '''%s

  Created by TGen North on %s.
  Copyright 2015 TGen North. All rights reserved.

  Available for academic and research use only under a license
  from The Translational Genomics Research Institute (TGen)
  that is free for non-commercial use.

  Distributed on an "AS IS" basis without warranties
  or conditions of any kind, either express or implied.

USAGE
''' % (program_shortdesc, str(__date__))

    try:
        # Setup argument parser
        parser = argparse.ArgumentParser(
            description=program_license,
            formatter_class=argparse.RawDescriptionHelpFormatter)
        required_group = parser.add_argument_group("required arguments")
        required_group.add_argument(
            "-j",
            "--json",
            metavar="FILE",
            required=True,
            help="JSON file of assay descriptions. [REQUIRED]")
        required_group.add_argument("-b",
                                    "--bam",
                                    metavar="FILE",
                                    required=True,
                                    help="BAM file to analyze. [REQUIRED]")
        #required_group.add_argument("-r", "--ref", metavar="FILE", required=True, help="reference fasta file, should already be indexed. [REQUIRED]")
        #parser.add_argument("-o", "--out-dir", dest="odir", metavar="DIR", help="directory to write output files to. [default: `pwd`]")
        required_group.add_argument(
            "-o",
            "--out",
            metavar="FILE",
            required=True,
            help="XML file to write output to. [REQUIRED]")
        #parser.add_argument("-n", "--name", help="sample name, if not provided it will be derived from BAM file")
        parser.add_argument(
            "-d",
            "--depth",
            default=100,
            type=int,
            help=
            "minimum read depth required to consider a position covered. [default: 100]"
        )
        parser.add_argument(
            "--breadth",
            default=0.8,
            type=float,
            help=
            "minimum breadth of coverage required to consider an amplicon as present. [default: 0.8]"
        )
        parser.add_argument(
            "-p",
            "--proportion",
            default=0.1,
            type=float,
            help=
            "minimum proportion required to call a SNP at a given position. [default: 0.1]"
        )
        parser.add_argument("-V",
                            "--version",
                            action="version",
                            version=program_version_message)

        # Process arguments
        args = parser.parse_args()

        json_fp = args.json
        bam_fp = args.bam
        out_fp = args.out
        depth = args.depth
        breadth = args.breadth
        proportion = args.proportion
        #ref_fp = args.ref
        #out_dir = args.odir
        #if not out_dir:
        #    out_dir = os.getcwd()

        #out_dir = dispatcher.expandPath(out_dir)
        #if not os.path.exists(out_dir):
        #    os.makedirs(out_dir)

        assay_list = assayInfo.parseJSON(json_fp)
        samdata = pysam.AlignmentFile(bam_fp, "rb")
        #reference = pysam.FastaFile(ref_fp)

        sample_dict = {}
        if 'RG' in samdata.header:
            sample_dict['name'] = samdata.header['RG'][0]['ID']
        else:
            sample_dict['name'] = os.path.splitext(os.path.basename(bam_fp))[0]
        sample_dict['mapped_reads'] = str(samdata.mapped)
        sample_dict['unmapped_reads'] = str(samdata.unmapped)
        sample_dict['unassigned_reads'] = str(samdata.nocoordinate)
        sample_node = ElementTree.Element("sample", sample_dict)

        #out_fp = os.path.join(out_dir, sample_dict['name']+".xml")

        for assay in assay_list:
            assay_dict = {}
            assay_dict['name'] = assay.name
            assay_dict['type'] = assay.assay_type
            assay_node = ElementTree.SubElement(sample_node, "assay",
                                                assay_dict)
            ref_name = assay.name
            reverse_comp = assay.target.reverse_comp
            for amplicon in assay.target.amplicons:
                ref_name = assay.name + "_%s" % amplicon.variant_name if amplicon.variant_name else assay.name
                amplicon_dict = {}
                amplicon_dict['reads'] = str(samdata.count(ref_name))
                if amplicon.variant_name:
                    amplicon_dict['variant'] = amplicon.variant_name
                amplicon_node = ElementTree.SubElement(assay_node, "amplicon",
                                                       amplicon_dict)
                if samdata.count(ref_name) == 0:
                    significance_node = ElementTree.SubElement(
                        amplicon_node, "significance", {"flag": "no coverage"})
                    #Check for indeterminate resistances
                    resistances = set()
                    if amplicon.significance and amplicon.significance.resistance:
                        resistances.add(amplicon.significance.resistance)
                    for snp in amplicon.SNPs:
                        if snp.significance.resistance:
                            resistances.add(snp.significance.resistance)
                    for roi in amplicon.ROIs:
                        if roi.significance.resistance:
                            resistances.add(roi.significance.resistance)
                    if resistances:
                        significance_node.set("resistance",
                                              ",".join(resistances))
                else:
                    if amplicon.significance or samdata.count(
                            ref_name) < depth:
                        significance_node = ElementTree.SubElement(
                            amplicon_node, "significance")
                        if amplicon.significance:
                            significance_node.text = amplicon.significance.message
                            if amplicon.significance.resistance:
                                significance_node.set(
                                    "resistance",
                                    amplicon.significance.resistance)
                        if samdata.count(ref_name) < depth:
                            significance_node.set("flag", "low coverage")
                            #Check for indeterminate resistances
                            resistances = set()
                            if amplicon.significance and amplicon.significance.resistance:
                                resistances.add(
                                    amplicon.significance.resistance)
                            for snp in amplicon.SNPs:
                                if snp.significance.resistance:
                                    resistances.add(
                                        snp.significance.resistance)
                            for roi in amplicon.ROIs:
                                if roi.significance.resistance:
                                    resistances.add(
                                        roi.significance.resistance)
                            if resistances:
                                significance_node.set("resistance",
                                                      ",".join(resistances))

                    pileup = samdata.pileup(ref_name, max_depth=1000000)
                    amplicon_data = _process_pileup(pileup, amplicon, depth,
                                                    proportion)
                    if float(amplicon_data['breadth']) < breadth * 100:
                        significance_node = amplicon_node.find("significance")
                        if significance_node is None:
                            significance_node = ElementTree.SubElement(
                                amplicon_node, "significance")
                        if not significance_node.get("flag"):
                            significance_node.set(
                                "flag", "insufficient breadth of coverage")
                    for snp in amplicon_data['SNPs']:
                        _add_snp_node(amplicon_node, snp)
                        # This would be helpful, but count_coverage is broken in python3
                        #print(samdata.count_coverage(ref_name, snp.position-1, snp.position))
                    del amplicon_data['SNPs']
                    _write_parameters(amplicon_node, amplicon_data)

                    for roi in amplicon.ROIs:
                        roi_dict = _process_roi(roi, samdata, ref_name,
                                                reverse_comp)
                        _add_roi_node(amplicon_node, roi, roi_dict, depth,
                                      proportion)

        samdata.close()
        _write_xml(sample_node, out_fp)

        return 0
    except KeyboardInterrupt:
        ### handle keyboard interrupt ###
        return 0
    except Exception as e:
        if DEBUG or TESTRUN:
            raise (e)
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(e) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2
Beispiel #17
0
    def create_xml(self):
        """Creates xml that is to be exported and sent to estate for partner vat intra.
        :return: Value for next action.
        :rtype: dict
        """
        self.ensure_one()
        company = self.company_id
        if not (company.partner_id and company.partner_id.country_id
                and company.partner_id.country_id.id):
            self._company_warning(
                _('The country of your company is not set, '
                  'please make sure to configure it first.'))
        if not company.company_registry:
            self._company_warning(
                _('The registry number of your company is not set, '
                  'please make sure to configure it first.'))
        if len(self.year) != 4:
            raise exceptions.Warning(_('Year must be 4 digits number (YYYY)'))

        #Create root declaration
        decl = ET.Element('DeclarationReport')
        decl.set('xmlns', INTRASTAT_XMLNS)

        #Add Administration elements
        admin = ET.SubElement(decl, 'Administration')
        fromtag = ET.SubElement(admin, 'From')
        fromtag.text = company.company_registry
        fromtag.set('declarerType', 'KBO')
        ET.SubElement(admin, 'To').text = "NBB"
        ET.SubElement(admin, 'Domain').text = "SXX"
        if self.arrivals == 'be-standard':
            decl.append(self.sudo()._get_lines(dispatchmode=False,
                                               extendedmode=False))
        elif self.arrivals == 'be-extended':
            decl.append(self.sudo()._get_lines(dispatchmode=False,
                                               extendedmode=True))
        if self.dispatches == 'be-standard':
            decl.append(self.sudo()._get_lines(dispatchmode=True,
                                               extendedmode=False))
        elif self.dispatches == 'be-extended':
            decl.append(self.sudo()._get_lines(dispatchmode=True,
                                               extendedmode=True))

        #Get xml string with declaration
        data_file = ET.tostring(decl, encoding='UTF-8', method='xml')

        #change state of the wizard
        self.write({
            'name': 'intrastat_%s%s.xml' % (self.year, self.month),
            'file_save': base64.encodestring(data_file),
            'state': 'download'
        })
        return {
            'name': _('Save'),
            'view_type': 'form',
            'view_mode': 'form',
            'res_model': 'l10n_be_intrastat_xml.xml_decl',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'res_id': self.id,
        }
def append_to_manifest(project):
    try:
        lm = ES.parse('/'.join([local_manifest_dir, "roomservice.xml"]))
        lm = lm.getroot()
    except IOError, ES.ParseError:
        lm = ES.Element("manifest")
 def setUp(self):
     self.flatten_feed = transitfeed.Loader(
         data_path('flatten_feed')).load()
     self.good_feed = transitfeed.Loader(data_path('good_feed.zip')).load()
     self.kmlwriter = kmlwriter.KMLWriter()
     self.parent = Et.Element('parent')
def load_manifest(manifest):
    try:
        man = ElementTree.parse(manifest).getroot()
    except (IOError, ElementTree.ParseError):
        man = ElementTree.Element("manifest")
    return man
 def setUp(self):
     self.kmlwriter = kmlwriter.KMLWriter()
     self.parent = Et.Element('parent')
Beispiel #22
0
def _boundless_vrt_doc(src_dataset,
                       nodata=None,
                       background=None,
                       hidenodata=False,
                       width=None,
                       height=None,
                       transform=None,
                       masked=False):
    """Make a VRT XML document.

    Parameters
    ----------
    src_dataset : Dataset
        The dataset to wrap.
    background : int or float, optional
        The background fill value for the boundless VRT.
    masked : book
        If True, the src_dataset is replaced by its valid data mask.

    Returns
    -------
    str
        An XML text string.
    """

    nodata = nodata or src_dataset.nodata
    width = width or src_dataset.width
    height = height or src_dataset.height
    transform = transform or src_dataset.transform

    vrtdataset = ET.Element('VRTDataset')
    vrtdataset.attrib['rasterYSize'] = str(height)
    vrtdataset.attrib['rasterXSize'] = str(width)
    srs = ET.SubElement(vrtdataset, 'SRS')
    srs.text = src_dataset.crs.wkt if src_dataset.crs else ""
    geotransform = ET.SubElement(vrtdataset, 'GeoTransform')
    geotransform.text = ','.join([str(v) for v in transform.to_gdal()])

    for bidx, ci, block_shape, dtype in zip(src_dataset.indexes,
                                            src_dataset.colorinterp,
                                            src_dataset.block_shapes,
                                            src_dataset.dtypes):
        vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')
        vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)
        vrtrasterband.attrib['band'] = str(bidx)

        if nodata is not None:
            nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')
            nodatavalue.text = str(nodata)

            if hidenodata:
                hidenodatavalue = ET.SubElement(vrtrasterband,
                                                'HideNoDataValue')
                hidenodatavalue.text = "1"

        colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')
        colorinterp.text = ci.name.capitalize()

        if background is not None:
            complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')
            sourcefilename = ET.SubElement(complexsource, 'SourceFilename')
            sourcefilename.attrib['relativeToVRT'] = '1'
            sourcefilename.attrib["shared"] = "0"
            sourcefilename.text = "dummy.tif"
            sourceband = ET.SubElement(complexsource, 'SourceBand')
            sourceband.text = str(bidx)
            sourceproperties = ET.SubElement(complexsource, 'SourceProperties')
            sourceproperties.attrib['RasterXSize'] = str(width)
            sourceproperties.attrib['RasterYSize'] = str(height)
            sourceproperties.attrib['dataType'] = _gdal_typename(dtype)
            sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
            sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
            srcrect = ET.SubElement(complexsource, 'SrcRect')
            srcrect.attrib['xOff'] = '0'
            srcrect.attrib['yOff'] = '0'
            srcrect.attrib['xSize'] = '1'
            srcrect.attrib['ySize'] = '1'
            dstrect = ET.SubElement(complexsource, 'DstRect')
            dstrect.attrib['xOff'] = '0'
            dstrect.attrib['yOff'] = '0'
            dstrect.attrib['xSize'] = '1'
            dstrect.attrib['ySize'] = '1'
            scaleratio = ET.SubElement(complexsource, 'ScaleRatio')
            scaleratio.text = '0'
            scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')
            scaleoffset.text = str(background)

        complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')
        sourcefilename = ET.SubElement(complexsource, 'SourceFilename')
        sourcefilename.attrib['relativeToVRT'] = "0"
        sourcefilename.attrib["shared"] = "0"
        sourcefilename.text = parse_path(src_dataset.name).as_vsi()
        sourceband = ET.SubElement(complexsource, 'SourceBand')
        sourceband.text = str(bidx)
        sourceproperties = ET.SubElement(complexsource, 'SourceProperties')
        sourceproperties.attrib['RasterXSize'] = str(width)
        sourceproperties.attrib['RasterYSize'] = str(height)
        sourceproperties.attrib['dataType'] = _gdal_typename(dtype)
        sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
        sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
        srcrect = ET.SubElement(complexsource, 'SrcRect')
        srcrect.attrib['xOff'] = '0'
        srcrect.attrib['yOff'] = '0'
        srcrect.attrib['xSize'] = str(src_dataset.width)
        srcrect.attrib['ySize'] = str(src_dataset.height)
        dstrect = ET.SubElement(complexsource, 'DstRect')
        dstrect.attrib['xOff'] = str(
            (src_dataset.transform.xoff - transform.xoff) / transform.a)
        dstrect.attrib['yOff'] = str(
            (src_dataset.transform.yoff - transform.yoff) / transform.e)
        dstrect.attrib['xSize'] = str(src_dataset.width *
                                      src_dataset.transform.a / transform.a)
        dstrect.attrib['ySize'] = str(src_dataset.height *
                                      src_dataset.transform.e / transform.e)

        if src_dataset.nodata is not None:
            nodata_elem = ET.SubElement(complexsource, 'NODATA')
            nodata_elem.text = str(src_dataset.nodata)

        if src_dataset.options is not None:
            openoptions = ET.SubElement(complexsource, 'OpenOptions')
            for ookey, oovalue in src_dataset.options.items():
                ooi = ET.SubElement(openoptions, 'OOI')
                ooi.attrib['key'] = str(ookey)
                ooi.text = str(oovalue)

        # Effectively replaces all values of the source dataset with
        # 255.  Due to GDAL optimizations, the source dataset will not
        # be read, so we get a performance improvement.
        if masked:
            scaleratio = ET.SubElement(complexsource, 'ScaleRatio')
            scaleratio.text = '0'
            scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')
            scaleoffset.text = '255'

    if all(MaskFlags.per_dataset in flags
           for flags in src_dataset.mask_flag_enums):
        maskband = ET.SubElement(vrtdataset, 'MaskBand')
        vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')
        vrtrasterband.attrib['dataType'] = 'Byte'

        simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')
        sourcefilename = ET.SubElement(simplesource, 'SourceFilename')
        sourcefilename.attrib['relativeToVRT'] = "0"
        sourcefilename.attrib["shared"] = "0"
        sourcefilename.text = parse_path(src_dataset.name).as_vsi()

        sourceband = ET.SubElement(simplesource, 'SourceBand')
        sourceband.text = 'mask,1'
        sourceproperties = ET.SubElement(simplesource, 'SourceProperties')
        sourceproperties.attrib['RasterXSize'] = str(width)
        sourceproperties.attrib['RasterYSize'] = str(height)
        sourceproperties.attrib['dataType'] = 'Byte'
        sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
        sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
        srcrect = ET.SubElement(simplesource, 'SrcRect')
        srcrect.attrib['xOff'] = '0'
        srcrect.attrib['yOff'] = '0'
        srcrect.attrib['xSize'] = str(src_dataset.width)
        srcrect.attrib['ySize'] = str(src_dataset.height)
        dstrect = ET.SubElement(simplesource, 'DstRect')
        dstrect.attrib['xOff'] = str(
            (src_dataset.transform.xoff - transform.xoff) / transform.a)
        dstrect.attrib['yOff'] = str(
            (src_dataset.transform.yoff - transform.yoff) / transform.e)
        dstrect.attrib['xSize'] = str(src_dataset.width)
        dstrect.attrib['ySize'] = str(src_dataset.height)

    return ET.tostring(vrtdataset).decode('ascii')
def new_element(tag, text):
    """Helper function to create an element with text"""
    elem = xml.Element(tag)
    elem.text = text
    return elem
 def __init__(self):
     self.root = ElementTree.Element("Tlv")
Beispiel #25
0
    def to_xml_element(self, cross_sections=None):
        """Return XML representation of the material

        Parameters
        ----------
        cross_sections : str
            Path to an XML cross sections listing file

        Returns
        -------
        element : xml.etree.ElementTree.Element
            XML element containing material data

        """

        # Create Material XML element
        element = ET.Element("material")
        element.set("id", str(self._id))

        if len(self._name) > 0:
            element.set("name", str(self._name))

        if self._depletable:
            element.set("depletable", "true")

        if self._volume:
            element.set("volume", str(self._volume))

        # Create temperature XML subelement
        if self.temperature is not None:
            element.set("temperature", str(self.temperature))

        # Create density XML subelement
        if self._density is not None or self._density_units == 'sum':
            subelement = ET.SubElement(element, "density")
            if self._density_units != 'sum':
                subelement.set("value", str(self._density))
            subelement.set("units", self._density_units)
        else:
            raise ValueError(
                'Density has not been set for material {}!'.format(self.id))

        if self._macroscopic is None:
            # Create nuclide XML subelements
            subelements = self._get_nuclides_xml(self._nuclides)
            for subelement in subelements:
                element.append(subelement)
        else:
            # Create macroscopic XML subelements
            subelement = self._get_macroscopic_xml(self._macroscopic)
            element.append(subelement)

        if self._sab:
            for sab in self._sab:
                subelement = ET.SubElement(element, "sab")
                subelement.set("name", sab[0])
                if sab[1] != 1.0:
                    subelement.set("fraction", str(sab[1]))

        if self._isotropic:
            subelement = ET.SubElement(element, "isotropic")
            subelement.text = ' '.join(self._isotropic)

        return element
Beispiel #26
0
def interface_tengigabitethernet_switchport_trunk_private_vlan_classification_private_vlan_trunk_allowed_vlan_remove_trunk_ctag_id(
        **kwargs):
    """Auto Generated Code
    """
    config = ET.Element("config")
    interface = ET.SubElement(config,
                              "interface",
                              xmlns="urn:brocade.com:mgmt:brocade-interface")
    if kwargs.pop('delete_interface', False) is True:
        delete_interface = config.find('.//*interface')
        delete_interface.set('operation', 'delete')

    tengigabitethernet = ET.SubElement(interface, "tengigabitethernet")
    if kwargs.pop('delete_tengigabitethernet', False) is True:
        delete_tengigabitethernet = config.find('.//*tengigabitethernet')
        delete_tengigabitethernet.set('operation', 'delete')

    name_key = ET.SubElement(tengigabitethernet, "name")
    name_key.text = kwargs.pop('name')
    if kwargs.pop('delete_name', False) is True:
        delete_name = config.find('.//*name')
        delete_name.set('operation', 'delete')

    switchport = ET.SubElement(tengigabitethernet, "switchport")
    if kwargs.pop('delete_switchport', False) is True:
        delete_switchport = config.find('.//*switchport')
        delete_switchport.set('operation', 'delete')

    trunk_private_vlan_classification = ET.SubElement(
        switchport, "trunk-private-vlan-classification")
    if kwargs.pop('delete_trunk_private_vlan_classification', False) is True:
        delete_trunk_private_vlan_classification = config.find(
            './/*trunk-private-vlan-classification')
        delete_trunk_private_vlan_classification.set('operation', 'delete')

    private_vlan = ET.SubElement(trunk_private_vlan_classification,
                                 "private-vlan")
    if kwargs.pop('delete_private_vlan', False) is True:
        delete_private_vlan = config.find('.//*private-vlan')
        delete_private_vlan.set('operation', 'delete')

    trunk = ET.SubElement(private_vlan, "trunk")
    if kwargs.pop('delete_trunk', False) is True:
        delete_trunk = config.find('.//*trunk')
        delete_trunk.set('operation', 'delete')

    allowed = ET.SubElement(trunk, "allowed")
    if kwargs.pop('delete_allowed', False) is True:
        delete_allowed = config.find('.//*allowed')
        delete_allowed.set('operation', 'delete')

    vlan = ET.SubElement(allowed, "vlan")
    if kwargs.pop('delete_vlan', False) is True:
        delete_vlan = config.find('.//*vlan')
        delete_vlan.set('operation', 'delete')

    remove = ET.SubElement(vlan, "remove")
    if kwargs.pop('delete_remove', False) is True:
        delete_remove = config.find('.//*remove')
        delete_remove.set('operation', 'delete')

    trunk_vlan_id_key = ET.SubElement(remove, "trunk-vlan-id")
    trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
    if kwargs.pop('delete_trunk_vlan_id', False) is True:
        delete_trunk_vlan_id = config.find('.//*trunk-vlan-id')
        delete_trunk_vlan_id.set('operation', 'delete')

    trunk_ctag_id = ET.SubElement(remove, "trunk-ctag-id")
    if kwargs.pop('delete_trunk_ctag_id', False) is True:
        delete_trunk_ctag_id = config.find('.//*trunk-ctag-id')
        delete_trunk_ctag_id.set('operation', 'delete')

    trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')

    callback = kwargs.pop('callback', _callback)
    return callback(config, mgr=kwargs.pop('mgr'))
 def label(self, contents, xargs={}):
     """Get label element for this"""
     xargs["for"] = self.idname
     l = ET.Element('label', xargs)
     mergecontents(l, contents)
     return l
def rbridge_id_router_router_bgp_router_bgp_attributes_neighbor_neighbor_ips_neighbor_addr_static_network_edge(
        **kwargs):
    """Auto Generated Code
    """
    config = ET.Element("config")
    rbridge_id = ET.SubElement(config,
                               "rbridge-id",
                               xmlns="urn:brocade.com:mgmt:brocade-rbridge")
    if kwargs.pop('delete_rbridge_id', False) is True:
        delete_rbridge_id = config.find('.//*rbridge-id')
        delete_rbridge_id.set('operation', 'delete')

    rbridge_id_key = ET.SubElement(rbridge_id, "rbridge-id")
    rbridge_id_key.text = kwargs.pop('rbridge_id')
    if kwargs.pop('delete_rbridge_id', False) is True:
        delete_rbridge_id = config.find('.//*rbridge-id')
        delete_rbridge_id.set('operation', 'delete')

    router = ET.SubElement(rbridge_id, "router")
    if kwargs.pop('delete_router', False) is True:
        delete_router = config.find('.//*router')
        delete_router.set('operation', 'delete')

    router_bgp = ET.SubElement(router,
                               "router-bgp",
                               xmlns="urn:brocade.com:mgmt:brocade-bgp")
    if kwargs.pop('delete_router_bgp', False) is True:
        delete_router_bgp = config.find('.//*router-bgp')
        delete_router_bgp.set('operation', 'delete')

    router_bgp_attributes = ET.SubElement(router_bgp, "router-bgp-attributes")
    if kwargs.pop('delete_router_bgp_attributes', False) is True:
        delete_router_bgp_attributes = config.find('.//*router-bgp-attributes')
        delete_router_bgp_attributes.set('operation', 'delete')

    neighbor = ET.SubElement(router_bgp_attributes, "neighbor")
    if kwargs.pop('delete_neighbor', False) is True:
        delete_neighbor = config.find('.//*neighbor')
        delete_neighbor.set('operation', 'delete')

    neighbor_ips = ET.SubElement(neighbor, "neighbor-ips")
    if kwargs.pop('delete_neighbor_ips', False) is True:
        delete_neighbor_ips = config.find('.//*neighbor-ips')
        delete_neighbor_ips.set('operation', 'delete')

    neighbor_addr = ET.SubElement(neighbor_ips, "neighbor-addr")
    if kwargs.pop('delete_neighbor_addr', False) is True:
        delete_neighbor_addr = config.find('.//*neighbor-addr')
        delete_neighbor_addr.set('operation', 'delete')

    router_bgp_neighbor_address_key = ET.SubElement(
        neighbor_addr, "router-bgp-neighbor-address")
    router_bgp_neighbor_address_key.text = kwargs.pop(
        'router_bgp_neighbor_address')
    if kwargs.pop('delete_router_bgp_neighbor_address', False) is True:
        delete_router_bgp_neighbor_address = config.find(
            './/*router-bgp-neighbor-address')
        delete_router_bgp_neighbor_address.set('operation', 'delete')

    static_network_edge = ET.SubElement(neighbor_addr, "static-network-edge")
    if kwargs.pop('delete_static_network_edge', False) is True:
        delete_static_network_edge = config.find('.//*static-network-edge')
        delete_static_network_edge.set('operation', 'delete')

    callback = kwargs.pop('callback', _callback)
    return callback(config, mgr=kwargs.pop('mgr'))
Beispiel #29
0
    def build_xml_doc(self, encoding=None):
        """
        Builds the XML document for the JUnit test suite.
        Produces clean unicode strings and decodes non-unicode with the help of encoding.
        @param encoding: Used to decode encoded strings.
        @return: XML document with unicode string elements
        """

        # build the test suite element
        test_suite_attributes = dict()
        if any(c.assertions for c in self.test_cases):
            test_suite_attributes["assertions"] = str(
                sum([
                    int(c.assertions) for c in self.test_cases if c.assertions
                ]))
        # test_suite_attributes["disabled"] = str(len([c for c in self.test_cases if not c.is_enabled]))
        test_suite_attributes["errors"] = str(
            len([c for c in self.test_cases if c.is_error()]))
        test_suite_attributes["failures"] = str(
            len([c for c in self.test_cases if c.is_failure()]))
        test_suite_attributes["name"] = str(self.name)
        test_suite_attributes["skipped"] = str(
            len([c for c in self.test_cases if c.is_skipped()]))
        test_suite_attributes["tests"] = str(len(self.test_cases))
        test_suite_attributes["time"] = str(
            sum(c.elapsed_sec for c in self.test_cases if c.elapsed_sec))

        if self.hostname:
            test_suite_attributes["hostname"] = str(self.hostname)
        if self.id:
            test_suite_attributes["id"] = str(self.id)
        if self.package:
            test_suite_attributes["package"] = str(self.package)
        if self.timestamp:
            test_suite_attributes["timestamp"] = str(self.timestamp)
        if self.file:
            test_suite_attributes["file"] = str(self.file)
        if self.log:
            test_suite_attributes["log"] = str(self.log)
        if self.url:
            test_suite_attributes["url"] = str(self.url)

        xml_element = ET.Element("testsuite", test_suite_attributes)

        # add any properties
        if self.properties:
            props_element = ET.SubElement(xml_element, "properties")
            for k, v in self.properties.items():
                attrs = {"name": str(k), "value": str(v)}
                ET.SubElement(props_element, "property", attrs)

        # add test suite stdout
        if self.stdout:
            stdout_element = ET.SubElement(xml_element, "system-out")
            stdout_element.text = str(self.stdout)

        # add test suite stderr
        if self.stderr:
            stderr_element = ET.SubElement(xml_element, "system-err")
            stderr_element.text = str(self.stderr)

        # test cases
        for case in self.test_cases:
            test_case_attributes = dict()
            test_case_attributes["name"] = str(case.name)
            if case.assertions:
                # Number of assertions in the test case
                test_case_attributes["assertions"] = "%d" % case.assertions
            if case.elapsed_sec:
                test_case_attributes["time"] = "%f" % case.elapsed_sec
            if case.timestamp:
                test_case_attributes["timestamp"] = str(case.timestamp)
            if case.classname:
                test_case_attributes["classname"] = str(case.classname)
            if case.status:
                test_case_attributes["status"] = str(case.status)
            if case.category:
                test_case_attributes["class"] = str(case.category)
            if case.file:
                test_case_attributes["file"] = str(case.file)
            if case.line:
                test_case_attributes["line"] = str(case.line)
            if case.log:
                test_case_attributes["log"] = str(case.log)
            if case.url:
                test_case_attributes["url"] = str(case.url)

            test_case_element = ET.SubElement(xml_element, "testcase",
                                              test_case_attributes)

            # add any properties
            if case.properties:
                props_element = ET.SubElement(test_case_element, "properties")
                for k, v in case.properties.items():
                    attrs = {"name": str(k), "value": str(v)}
                    ET.SubElement(props_element, "property", attrs)

            # failures
            for failure in case.failures:
                if failure["output"] or failure["message"]:
                    attrs = {"type": "failure"}
                    if failure["message"]:
                        attrs["message"] = str(failure["message"])
                    if failure["type"]:
                        attrs["type"] = str(failure["type"])
                    failure_element = ET.Element("failure", attrs)
                    if failure["output"]:
                        failure_element.text = str(failure["output"])
                    test_case_element.append(failure_element)

            # errors
            for error in case.errors:
                if error["message"] or error["output"]:
                    attrs = {"type": "error"}
                    if error["message"]:
                        attrs["message"] = str(error["message"])
                    if error["type"]:
                        attrs["type"] = str(error["type"])
                    error_element = ET.Element("error", attrs)
                    if error["output"]:
                        error_element.text = str(error["output"])
                    test_case_element.append(error_element)

            # skippeds
            for skipped in case.skipped:
                attrs = {"type": "skipped"}
                if skipped["message"]:
                    attrs["message"] = str(skipped["message"])
                skipped_element = ET.Element("skipped", attrs)
                if skipped["output"]:
                    skipped_element.text = str(skipped["output"])
                test_case_element.append(skipped_element)

            # test stdout
            if case.stdout:
                stdout_element = ET.Element("system-out")
                stdout_element.text = str(case.stdout)
                test_case_element.append(stdout_element)

            # test stderr
            if case.stderr:
                stderr_element = ET.Element("system-err")
                stderr_element.text = str(case.stderr)
                test_case_element.append(stderr_element)
Beispiel #30
0
def dictParseQMark(userId=1, userSecret="", hideSecret=False):
    strQMarkXml = strRetrieveQMark(userId, userSecret)
    etQMarkXmlRoot = None
    try:
        etQMarkXmlRoot = ET.fromstring(strQMarkXml)
        etQMarkXmlRoot = etQMarkXmlRoot.find("channel")
    except Exception as e:
        # print("Invalid RSS")
        return {"valid": False, "message": "Invalid RSS"}

    # ============================================================
    # Parse XML into dictQMark
    dictQMark = {"valid": True}

    # Include Title
    etQmarkProcessing = etQMarkXmlRoot.find("title")
    dictQMark["title"] = etQmarkProcessing.text

    # Include LastBuildDate
    etQmarkProcessing = etQMarkXmlRoot.find("lastBuildDate")
    dictQMark["lastBuildDate"] = etQmarkProcessing.text

    if (hideSecret):
        # As of Mar 2021, secret element is the first and only element with attribute "rel"
        etQmarkProcessing = etQMarkXmlRoot.find('*[@rel="self"]')
        # print(etQMarkSecret) # debug
        dictQMark["link"] = etQmarkProcessing.attrib['href']

    # Include Mark Items
    dictQMark["items"] = []
    for etQMarkXmlItem in etQMarkXmlRoot.findall("item"):
        dictQMarkItem = {}

        # Retrieve Marked Article Title
        etQmarkProcessing = etQMarkXmlItem.find("title")
        if (etQmarkProcessing is None):
            dictQMarkItem["title"] = ""
        else:
            dictQMarkItem["title"] = etQmarkProcessing.text

        # Include Marked Article Link
        etQmarkProcessing = etQMarkXmlItem.find("link")
        if (etQmarkProcessing is None):
            dictQMarkItem["link"] = ""
        else:
            dictQMarkItem["link"] = etQmarkProcessing.text

        etQmarkProcessing = etQMarkXmlItem.find("description")
        if ((etQmarkProcessing is not None)
                and (etQmarkProcessing.text is not None)):
            etQmarkProcessing = ET.fromstring(
                "<div>" + html.unescape(etQmarkProcessing.text.strip()) +
                "</div>")
        else:
            etQmarkProcessing = ET.Element("div")
        # print( ET.tostring(etQmarkProcessing, encoding='unicode') )
        # Include Quotes (if any)
        etQMarkItemQuotes = etQmarkProcessing.find("blockquote")
        dictQMarkItem["quotes"] = []
        if etQMarkItemQuotes is not None:
            for etQuote in etQMarkItemQuotes:
                dictQMarkItem["quotes"].append(
                    ET.tostring(etQuote, encoding='unicode'))
        # Include Notes (if any)
        etQMarkItemNotes = etQmarkProcessing.find("aside")
        dictQMarkItem["notes"] = []
        if etQMarkItemNotes is not None:
            # print("text: "+etQMarkItemNotes.text+"endtext") # debug
            dictQMarkItem["notes"].append(
                ET.tostring(etQMarkItemNotes, encoding='unicode'))

        # Include Mark Item Link
        etQmarkProcessing = etQMarkXmlItem.find("guid")
        dictQMarkItem["guid"] = etQmarkProcessing.text

        # Include Mark Item PubDate
        etQmarkProcessing = etQMarkXmlItem.find("pubDate")
        dictQMarkItem["pubDate"] = etQmarkProcessing.text

        dictQMark["items"].append(dictQMarkItem)
        # print(json.dumps(dictQMarkItem)) # debug

    return dictQMark