def _notify(self, statevars): # create the request body propset = Element("e:propertyset") propset.attrib['xmlns:e'] = "urn:schemas-upnp-org:event-1-0" prop = SubElement(propset, "e:property") # add each evented statevar to the property set for statevar in statevars: if statevar.sendEvents: SubElement(prop, statevar.name).text = statevar.text_value else: raise Exception("StateVar '%s' is not evented" % statevar.name) postData = xmlprint(propset) logger.log_debug("NOTIFY property set:\n" + postData) # send the NOTIFY request to each callback for url,urlparts in self.callbacks: # set the NOTIFY headers headers = { 'Host': urlparts.netloc, 'Content-Type': MimeType('text', 'xml'), 'NT': 'upnp:event', 'NTS': 'upnp:propchange', 'SID': self.id, 'SEQ': self.seqid } # creator = protocol.ClientCreator(reactor, HTTPClientProtocol) request = ClientRequest("NOTIFY", urlparts.path, headers, postData) d = creator.connectTCP(urlparts.hostname, urlparts.port) d.addCallback(self._sendNotifyRequest, request) logger.log_debug("sending NOTIFY to %s" % url) self.seqid = self.seqid + 1
def process_task(self, target, command, args): OUT_FORMAT = ' {0:<35}{1}'.format sslConn = create_sslyze_connection(target, self._shared_settings) # Make sure OpenSSL was built with support for compression to avoid false negatives if 'zlib compression' not in sslConn.get_available_compression_methods(): raise RuntimeError('OpenSSL was not built with support for zlib / compression. Did you build nassl yourself ?') try: # Perform the SSL handshake sslConn.connect() compName = sslConn.get_current_compression_method() except ClientAuthenticationError: # The server asked for a client cert compName = sslConn.get_current_compression_method() finally: sslConn.close() # Text output if compName: compTxt = 'Supported' else: compTxt = 'Disabled' cmdTitle = 'Compression' txtOutput = [self.PLUGIN_TITLE_FORMAT(cmdTitle)] txtOutput.append(OUT_FORMAT("DEFLATE Compression:", compTxt)) # XML output xmlOutput = Element(command, title=cmdTitle) if compName: xmlNode = Element('compressionMethod', type="DEFLATE") xmlOutput.append(xmlNode) return PluginBase.PluginResult(txtOutput, xmlOutput)
def makeXML(self, work_directory): # </Artifact> TheArtifact = Element( 'Artifact') TheArtifact.attrib['xmlns']="http://geni.net/schema" TheArtifact.attrib['xmlns:xsi']="http://www.w3.org/2001/XMLSchema-instance" TheArtifact.attrib['xsi:schemaLocation']="http://geni.net/schema GENIObject.xsd" # <Artifact><Type/> Type = SubElement( TheArtifact, 'Type') # <Artifact><Type><Primary/> Primary = SubElement( Type, 'Primary') Primary.text = self.art_type_prime # <Artifact><Interpretation/> Interpretation = SubElement( TheArtifact, 'Interpretation') # <Artifact><Interpretation><Interpretation_read_me_text/> Read_me_text = SubElement( Interpretation, 'Interpretation_read_me_text') Read_me_text.text = self.interpretation_read_me #To print to file Test = ElementTree() Test._setroot(TheArtifact) root = Test.getroot() self.indent(root) Test.write(work_directory+'/artifact.xml')
def convert_to_xml(self): test_suites = Element('testsuites') for suite in self.suites: test_suites.append(suite.convert_to_xml()) return test_suites
def process_task(self, target, command, args): if self._shared_settings['starttls']: raise Exception('Cannot use --hsts with --starttls.') hsts_supported = self._get_hsts_header(target) if hsts_supported: hsts_timeout = hsts_supported hsts_supported = True # Text output cmd_title = 'HTTP Strict Transport Security' txt_result = [self.PLUGIN_TITLE_FORMAT(cmd_title)] if hsts_supported: txt_result.append(self.FIELD_FORMAT("OK - HSTS header received:", hsts_timeout)) else: txt_result.append(self.FIELD_FORMAT("NOT SUPPORTED - Server did not send an HSTS header.", "")) # XML output xml_hsts_attr = {'sentHstsHeader': str(hsts_supported)} if hsts_supported: xml_hsts_attr['hstsHeaderValue'] = hsts_timeout xml_hsts = Element('hsts', attrib = xml_hsts_attr) xml_result = Element('hsts', title = cmd_title) xml_result.append(xml_hsts) return PluginBase.PluginResult(txt_result, xml_result)
def xml_response(results, mytype): root = Element('data') for result in results: child = Element(mytype) child.text = result root.append(child) return Response('<?xml version="1.0" encoding="UTF-8"?>' + tostring(root), mimetype='text/xml')
def as_xml(self) -> Element: xml_result = Element(self.scan_command.get_cli_argument(), title=self.scan_command.get_title()) if self.compression_name: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="True")) else: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="False")) return xml_result
def insert_math(block, line): instream = Element(ns + 'instream-foreign-object') block.append(instream) math_obj = asciitomathml.asciitomathml.AsciiMathML(mstyle={'scriptlevel': '-2'}) math_obj.parse_string(line) math_tree = math_obj.get_tree() instream.append(math_tree)
def read_file(file_obj): it_obj = paragraphs(file_obj) fo_obj = make_fo_tree() flow = fo_obj[1][0] for para in it_obj: block = Element(ns + 'block') block.set('space-before', '12pt') flow.append(block) the_string = para while the_string: the_index = the_string.find('`') if the_index > -1: start = the_string[:the_index] insert_text(fo_obj, start) the_string = the_string[the_index + 1:] the_index = the_string.find('`') if the_index > -1: math = the_string[:the_index] the_string = the_string[the_index + 1:] insert_math(block, math) else: math = the_string insert_math(block, math) break else: insert_text(fo_obj, the_string) break return fo_obj
def render_documentheader(self, standoffheader): """Create the documentHeader Element. Returns ------- documentheader : ElementTree Primary element of the primary data document header. """ now = datetime.datetime.now() pubDate = now.strftime("%Y-%m-%d") documentheader = Element('documentHeader', {"xmlns": "http://www.xces.org/ns/GrAF/1.0/", "xmlns:xlink": "http://www.w3.org/1999/xlink", "docId": "PoioAPI-" + str(random.randint(1, 1000000)), "version": standoffheader.version, "creator": getpass.getuser(), "date.created": pubDate}) filedesc = self.render_filedesc(standoffheader.filedesc) profiledesc = self.render_profiledesc(standoffheader.profiledesc) datadesc = self.render_datadesc(standoffheader.datadesc) profiledesc.append(datadesc.getchildren()[0]) profiledesc.append(datadesc.getchildren()[1]) documentheader.append(filedesc) documentheader.append(profiledesc) return documentheader
def dict_to_xml(tag, d): elem = Element(tag) for key, val in d.items(): child = Element(key) child.text = str(val) elem.append(child) return elem
def sort_time(source): """Sort the source Element elements along their time (for annotations) and id (for relations). Returns a new Element """ dest=Element(source.tag) dest.attrib.update(source.attrib) antag=tag('annotation') reltag=tag('relation') rel=[ e for e in source if e.tag == reltag ] rel.sort(cmp_id) an=[ e for e in source if e.tag == antag ] # Pre-parse begin times for a in an: f=a.find(tag('millisecond-fragment')) if f is not None: a._begin = long(f.attrib['begin']) else: print "Error: cannot find begin time for ", a.attrib['id'] a._begin = 0 an.sort(cmp_time) for e in an: dest.append(e) for e in rel: dest.append(e) return dest
def write_header_elements(self, g): """ Helper method for write_header. """ graph_header = Element('graphHeader') graph_header.append(self.render_tag_usage(g)) depends_on = g.header.depends_on dependencies = SubElement(graph_header, 'dependencies') if depends_on: for dependency in depends_on: if dependency: SubElement(dependencies, 'dependsOn', {'f.id': dependency}) aspaces = g.annotation_spaces annotation_spaces = SubElement(graph_header, 'annotationSpaces') if aspaces: for aspace in aspaces: SubElement(annotation_spaces, 'annotationSpace', {'as.id': aspace.as_id}) roots = g.header.roots if roots: roots_element = SubElement(graph_header, 'roots') for root in roots: if root: SubElement(roots_element, 'root').text = root return graph_header
def _create_xml_node(self, key, value=''): key = key.replace(' ', '').strip() if key[0].isdigit(): # Would generate invalid XML key = 'oid-' + key # Tags cannot start with a digit xml_node = Element(key) xml_node.text = value.strip() return xml_node
def _command_resum_rate(self, target): """ Performs 100 session resumptions with the server in order to estimate the session resumption rate. """ # Create a thread pool and process the jobs NB_THREADS = 20 MAX_RESUM = 100 thread_pool = ThreadPool() for _ in xrange(MAX_RESUM): thread_pool.add_job((self._resume_with_session_id, (target, ))) thread_pool.start(NB_THREADS) # Format session ID results (txt_resum, xml_resum) = self._format_resum_id_results(thread_pool, MAX_RESUM) # Text output cmd_title = 'Resumption Rate with Session IDs' txt_result = [self.PLUGIN_TITLE_FORMAT(cmd_title)+' '+ txt_resum[0]] txt_result.extend(txt_resum[1:]) # XML output xml_result = Element('resum_rate', title = cmd_title) xml_result.append(xml_resum) thread_pool.join() return PluginBase.PluginResult(txt_result, xml_result)
def as_xml(self): xml_result = Element(self.plugin_command, title=self.COMMAND_TITLE) if self.compression_name: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="True")) else: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="False")) return xml_result
def getBaseXML(): root = Element('soapenv:Envelope') root.set('xmlns:soapenv', 'http://schemas.xmlsoap.org/soap/envelope/') root.set('xmlns:ns', 'http://www.cisco.com/AXL/API/11.0') header = SubElement(root, 'soapenv:Header') body = SubElement(root, 'soapenv:Body') return root, body
def _get_metadata_node(location): node = Element('location_data') for key, value in location.metadata.items(): element = Element(key) element.text = value node.append(element) return node
def new_node(self, node, caption, type, text='', expand=True, edit=True, modified=True, pos='sub', element=None): # element = self.get_element(node) if element is None: new_element = Element(type, caption=caption) new_element.text = text else: new_element = element self.add_element(node, new_element, pos) if type == 'folder': new_node = self.add_new_folder(node, caption, data={'element':new_element}, modified=modified, pos=pos) else: new_node = self.add_new_node(node, caption, data={'element':new_element}, modified=modified, pos=pos) if expand: wx.CallAfter(self.tree.Expand, node) self.tree.SelectItem(new_node) if edit: wx.CallAfter(self.tree.EditLabel, new_node) return new_node
def getItemsXML(expedition_id, category_id): """ Endpoint to return an XML List of all items associated with a certain expedition and category :param expedition_id: :param category_id: """ items = session.query(Item).filter_by( expedition_id=expedition_id, category_id=category_id).all() root = Element('allItems') comment = Comment('XML Endpoint Listing ' 'all Item for a specific Category and Expedition') root.append(comment) for i in items: ex = SubElement(root, 'expedition') ex.text = i.expedition.title category_name = SubElement(ex, 'category_name') category_description = SubElement(category_name, 'category_description') category_picture = SubElement(category_name, 'category_picture') category_name.text = i.category.name category_description.text = i.category.description category_picture.text = i.category.picture item_name = SubElement(category_name, 'item_name') item_decription = SubElement(item_name, 'item_description') item_picture = SubElement(item_name, 'item_picture') item_name.text = i.name item_decription.text = i.description item_picture.text = i.picture print tostring(root) return app.response_class(tostring(root), mimetype='application/xml')
def createElement(elementName,value,root): name = Element(elementName) root.append(name) if value: name.text = value else: name.text = " "
def process_task(self, target, command, args): ctSSL_initialize() try: (can_reneg, is_secure) = self._test_renegotiation(target) finally: ctSSL_cleanup() # Text output reneg_txt = 'Honored' if can_reneg else 'Rejected' secure_txt = 'Supported' if is_secure else 'Not supported' cmd_title = 'Session Renegotiation' txt_result = [self.PLUGIN_TITLE_FORMAT.format(cmd_title)] RENEG_FORMAT = ' {0:<35} {1}' txt_result.append(RENEG_FORMAT.format('Client-initiated Renegotiations:', reneg_txt)) txt_result.append(RENEG_FORMAT.format('Secure Renegotiation: ', secure_txt)) # XML output xml_reneg_attr = {'canBeClientInitiated' : str(can_reneg), 'isSecure' : str(is_secure)} xml_reneg = Element('sessionRenegotiation', attrib = xml_reneg_attr) xml_result = Element(command, title = cmd_title) xml_result.append(xml_reneg) return PluginBase.PluginResult(txt_result, xml_result)
def addToWorkingSet(newProjectPath): workingSetFilePath = os.path.expanduser("~") + os.sep + ".colt" + os.sep + "workingset.xml" projectsList = [] # Populate projects list if os.path.exists(workingSetFilePath) : workingSetElement = parse(workingSetFilePath).getroot() for projectElement in workingSetElement : projectPath = projectElement.attrib["path"] if projectPath : projectsList.append(projectPath) # Remove project path from the list projectsList = filter(lambda projectPath : projectPath != newProjectPath, projectsList) # Push new project projectsList.insert(0, newProjectPath) # Save the list workingSetElement = Element("workingset") workingSetElement.set("openRecent", "true") for projectPath in projectsList : projectElement = SubElement(workingSetElement, "project") projectElement.set("path", projectPath) workingSetFile = open(workingSetFilePath, "w") workingSetFile.write(tostring(workingSetElement)) workingSetFile.close()
def _saveTracks(self, tracks): element = Element("tracks") for track in tracks: track_element = self._saveTrack(track) element.append(track_element) return element
def _saveTimelineObjects(self, timeline_objects): element = Element("timeline-objects") for timeline_object in timeline_objects: timeline_object_element = self._saveTimelineObject(timeline_object) element.append(timeline_object_element) return element
def convert_MoocCheckboxesAssessment_to_xml(par): """Convert checkbox assignment into xml. """ xml = Element('problem') for key in ['display_name', 'max_attempts']: xml.attrib[key] = str(par[key]) p = SubElement(xml, 'p') p.text = par['question'] p = SubElement(xml, 'p') p.text = 'Select the answers that match' sub = SubElement(xml, 'choiceresponse') sub = SubElement(sub, 'checkboxgroup') sub.attrib['label'] = "Select the answers that match" sub.attrib['direction'] = "vertical" for i, ans in enumerate(par['answers']): choice = SubElement(sub, 'choice') if i in par['correct_answers']: choice.attrib['correct'] = 'true' else: choice.attrib['correct'] = 'false' choice.text = ans if 'explanation' in par: add_solution(xml, par['explanation']) return xml
def convert_MoocMultipleChoiceAssessment_to_xml(par): """ Convert multiple choice question into xml. """ xml = Element('problem') for key in ['display_name', 'max_attempts']: xml.attrib[key] = str(par[key]) p = SubElement(xml, 'p') p.text = par['question'] p = SubElement(xml, 'p') p.text = 'Please select correct answer' sub = SubElement(xml, 'multiplechoiceresponse') sub = SubElement(sub, 'choicegroup') sub.attrib['label'] = "Please select correct answer" sub.attrib['type'] = "MultipleChoice" for i, ans in enumerate(par['answers']): choice = SubElement(sub, 'choice') if i == par['correct_answer']: choice.attrib['correct'] = 'true' else: choice.attrib['correct'] = 'false' choice.text = ans if 'explanation' in par: add_solution(xml, par['explanation']) return xml
def story_feed(request, story): story = Story.objects.get(slug=story) rss = Element('rss') rss.set("version","2.0") channel = SubElement(rss,'channel') title = SubElement(channel,'title') title.text = story.title link = SubElement(channel,'link') link.text = request.build_absolute_uri(reverse("story")) desc = SubElement(channel,'description') desc.text = story.description chapters = story.chapters.all() for index in chapters: item = SubElement(channel,'item') title_c = SubElement(item,'title') title_c.text = index.title link = SubElement(item,'link') link.text = request.build_absolute_uri(index.get_absolute_url()) return HttpResponse(tostring(rss, encoding='UTF-8'))
def convert_MoocVideo_to_xml(par): """ Convert video_cell with MoocVideo into xml. """ xml = Element('video') for key in par: xml.attrib[key] = str(par[key]) return xml
def execute(self, mappings, source): """Writes the given language code/name mappings to Android XML resource files. source = string indicating source of the data, for example, 'cldr' mappings = list of dictionaries containing mappings""" # In order to be able to to localize a particular, limited set of words across multiple # languages, here we define a list of language codes to support for every resource file # generated. Where localized language names are missing, a place holder is printed. If # ['*'] is specified, then all available language code/name pairs are generated. COVERAGE_LIST = ['*'] # Get language names in English as a dict for inclusion in XML comments english_pairs = {} for entry in mappings: for k, v in entry.iteritems(): if k == 'en': english_pairs = v break for entry in mappings: for k, v in entry.iteritems(): dir = os.path.join(os.path.dirname(__file__), self.get_directory(source) + "../" + source + "-android/values-" + k) if not os.path.exists(dir): os.makedirs(dir) with open(dir + "/arrays.xml", "w") as f: top = Element('resources') if k in english_pairs.keys(): top_comment = ElementTree.Comment(' ' + english_pairs[k].decode('utf-8') + ' (' + k + ') ') else: top_comment = ElementTree.Comment(' ' + k + ' ') top.append(top_comment) child = SubElement(top, 'string-array') child.attrib['name'] = 'languages_all' if '*' not in COVERAGE_LIST: # Iterate through only those codes in COVERAGE_LIST for lang_code in COVERAGE_LIST: if lang_code in english_pairs.keys(): comment = ElementTree.Comment(' ' + lang_code + ' - ' + english_pairs[lang_code].decode('utf-8') + ' ') else: comment = ElementTree.Comment(' ' + lang_code + ' ') child.append(comment) entry = SubElement(child, 'item') if lang_code in v.keys(): entry.text = v[lang_code].decode('utf-8') else: entry.text = "UNDEFINED" else: # Iterate through all available language codes for lang_code, lang_name in sorted(v.iteritems()): if lang_code in english_pairs.keys(): comment = ElementTree.Comment(' ' + lang_code + ' - ' + english_pairs[lang_code].decode('utf-8') + ' ') else: comment = ElementTree.Comment(' ' + lang_code + ' ') child.append(comment) entry = SubElement(child, 'item') entry.text = lang_name.decode('utf-8') f.write(self.prettify(top))
def AddButtonAction(): global NameLabel, RenderText, window, FoodLabel, PriceLabel RenderText.configure(state='normal') RenderText.delete(0.0, END) Name = str(NameLabel.get()) Food = str(FoodLabel.get()) Price = str(PriceLabel.get()) switch = 0 for location in Data1.findall("list"): if Name in location.findtext("serviceAreaName"): RenderText.insert(INSERT, "삐빅! 중복입니다.") switch = 1 if switch == 0: New = Element("list") NewName = Element("serviceAreaName") NewName.text = Name New.append(NewName) NewFood = Element("batchMenu") NewFood.text = Food New.append(NewFood) NewPrice = Element("salePrice") NewPrice.text = Price New.append(NewPrice) RenderText.insert(INSERT, Name) RenderText.insert(INSERT, "휴게소 추가 완료.") Data1.append(New) RenderText.configure(state = 'disabled') NameLabel.delete(0,END) FoodLabel.delete(0,END) PriceLabel.delete(0,END) doc.write("Data1.xml", encoding="utf-8", xml_declaration=True)
def toXML(self, input_commstruct, analysis, limit_value, parameter_dic): # Retrieve the data from the analyzed statistics classified_families = analysis[0] motif_classification = analysis[1] file_pathes = analysis[2] # Create the root element with its attributes classification_element = Element( FinalOutputProcessor.CLASSIFICATION_TAG) classification_element.attrib[ FinalOutputProcessor. PIPELINE_NAME_ATT] = self.component.pipelineName classification_element.attrib[ FinalOutputProcessor. REFERENCE_SPECIES_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.REFERENCE_SPECIES] classification_element.attrib[ FinalOutputProcessor. ALIGNED_SPECIES_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.ALIGNED_SPECIES] classification_element.attrib[ FinalOutputProcessor. REFERENCE_MOTIF_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.REFERENCE_MOTIF] classification_element.attrib[ FinalOutputProcessor. BEDSEQUENCES_NUMBER_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.BEDSEQUENCES_NUMBER] classification_element.attrib[ FinalOutputProcessor. BEDSEQUENCES_MIN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.BEDSEQUENCES_MIN_SIZE] classification_element.attrib[ FinalOutputProcessor. BEDSEQUENCES_MAX_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.BEDSEQUENCES_MAX_SIZE] classification_element.attrib[ FinalOutputProcessor. BEDSEQUENCES_MEAN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.BEDSEQUENCES_MEAN_SIZE] classification_element.attrib[ FinalOutputProcessor. BEDSEQUENCES_TOTAL_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.BEDSEQUENCES_TOTAL_SIZE] classification_element.attrib[ FinalOutputProcessor. MSA_NUMBER_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.MSA_NUMBER] classification_element.attrib[ FinalOutputProcessor. MSA_MIN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.MSA_MIN_SIZE] classification_element.attrib[ FinalOutputProcessor. MSA_MAX_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.MSA_MAX_SIZE] classification_element.attrib[ FinalOutputProcessor. MSA_MEAN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.MSA_MEAN_SIZE] classification_element.attrib[ FinalOutputProcessor. MSA_TOTAL_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.MSA_TOTAL_SIZE] classification_element.attrib[ FinalOutputProcessor. CONSERVED_BLOCKS_NUMBER_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.CONSERVED_BLOCKS_NUMBER] classification_element.attrib[ FinalOutputProcessor. CONSERVED_BLOCKS_MIN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.CONSERVED_BLOCKS_MIN_SIZE] classification_element.attrib[ FinalOutputProcessor. CONSERVED_BLOCKS_MAX_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.CONSERVED_BLOCKS_MAX_SIZE] classification_element.attrib[ FinalOutputProcessor. CONSERVED_BLOCKS_MEAN_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.CONSERVED_BLOCKS_MEAN_SIZE] classification_element.attrib[ FinalOutputProcessor. CONSERVED_BLOCKS_TOTAL_SIZE_ATT] = input_commstruct.paramStatistics[ BedSeqAlignmentStatsCommStruct.CONSERVED_BLOCKS_TOTAL_SIZE] classification_element.attrib[ FinalOutputProcessor.BED_SEQUENCES_SIZE_PATH_ATT] = file_pathes[ FinalOutputProcessor.BED_SEQUENCES_SIZE_PATH_ATT] classification_element.attrib[ FinalOutputProcessor. BED_SEQUENCES_SIZE_GRAPH_PATH_ATT] = file_pathes[ FinalOutputProcessor.BED_SEQUENCES_SIZE_GRAPH_PATH_ATT] classification_element.attrib[ FinalOutputProcessor.ZIP_FILE] = os.path.join( os.path.join(self.outPath, "zip"), self.component.pipelineName + ".zip") # Add all other parameters values for param_name in parameter_dic.keys(): if not param_name in classification_element.attrib.keys(): classification_element.attrib[param_name] = parameter_dic[ param_name] # Insert the path to the BED sequences sizes histogram and graph self.addFilePathAttribute( classification_element, FinalOutputProcessor.BED_SEQUENCES_SIZE_PATH_ATT, file_pathes) self.addFilePathAttribute( classification_element, FinalOutputProcessor.BED_SEQUENCES_SIZE_GRAPH_PATH_ATT, file_pathes) # Insert the path to the Conserved Regions sizes histogram and graph if any self.addFilePathAttribute( classification_element, FinalOutputProcessor.CONSERVED_BLOCKS_SIZE_PATH_ATT, file_pathes) self.addFilePathAttribute( classification_element, FinalOutputProcessor.CONSERVED_BLOCKS_SIZE_GRAPH_PATH_ATT, file_pathes) # Insert the path to the MSA sizes histogram and graph if any self.addFilePathAttribute(classification_element, FinalOutputProcessor.MSA_SIZE_PATH_ATT, file_pathes) self.addFilePathAttribute(classification_element, FinalOutputProcessor.MSA_SIZE_GRAPH_PATH_ATT, file_pathes) # Insert the path to the BED output file if any self.addFilePathAttribute(classification_element, FinalOutputProcessor.BED_OUTPUT_ATT, file_pathes) self.addFilePathAttribute(classification_element, FinalOutputProcessor.BIGBED_OUTPUT_ATT, file_pathes) # Create a root son element for each family for family_rank in sorted(classified_families): # fill the family element attributes family_element = Element(FinalOutputProcessor.FAMILY_TAG) classification_element.append(family_element) family = classified_families[family_rank] family_element.attrib[ FinalOutputProcessor.FAMILY_NAME_ATT] = family # create a family son element for each motif in a family for motif_rank in sorted(motif_classification[family].keys()): motif_name = motif_classification[family][motif_rank] motif_stats = input_commstruct.motifStatistics[motif_name] # If the motif has its hypergeometric p-value above the limit, it is ignored #if not motif_stats.hasAttribute( MotifStatistics.MOTIF_HYP_PVALUE) or motif_stats.getAttributeAsfloat( MotifStatistics.MOTIF_HYP_PVALUE) > limit_value: # continue # If the motif has its hypergeometric e-value above the limit, it is ignored if not motif_stats.hasAttribute( MotifStatistics.MOTIF_HYP_EVALUE ) or motif_stats.getAttributeAsfloat( MotifStatistics.MOTIF_HYP_EVALUE) > limit_value: continue # Create the motif XML element motif_element = Element(FinalOutputProcessor.MOTIF_TAG) family_element.append(motif_element) # fill the motif element attributes motif_element.attrib[ FinalOutputProcessor.MOTIF_NAME_ATT] = motif_name motif_element.attrib[ FinalOutputProcessor.MOTIF_FAMILY_ATT] = family motif_element.attrib[ FinalOutputProcessor.MOTIF_ID_ATT] = motif_stats.motifID motif_element.attrib[FinalOutputProcessor. MOTIF_CLASS_ATT] = motif_stats.motifClass motif_element.attrib[FinalOutputProcessor. MOTIF_TYPE_ATT] = motif_stats.motifType motif_element.attrib[ FinalOutputProcessor. MOTIF_RANK_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_RANK) # fill the motif hit score attribute motif_element.attrib[ FinalOutputProcessor. MOTIF_HIT_SCORE_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_HIT_SCORE) # fill the motifhypergeometric score and p-value attribute if motif_stats.hasAttribute( MotifStatistics.MOTIF_HYP_HIT_SCORE): motif_element.attrib[ FinalOutputProcessor. MOTIF_HYP_HIT_SCORE_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_HYP_HIT_SCORE) if motif_stats.hasAttribute(MotifStatistics.MOTIF_HYP_PVALUE): motif_element.attrib[ FinalOutputProcessor. MOTIF_HYP_PVALUE_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_HYP_PVALUE) if motif_stats.hasAttribute(MotifStatistics.MOTIF_HYP_EVALUE): motif_element.attrib[ FinalOutputProcessor. MOTIF_HYP_EVALUE_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_HYP_EVALUE) # fill the motif chi2 and chi2 p-value attributes if motif_stats.hasAttribute(MotifStatistics.MOTIF_CHI2): motif_element.attrib[ FinalOutputProcessor. MOTIF_CHI2_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_CHI2) if motif_stats.hasAttribute(MotifStatistics.MOTIF_CHI2_PVALUE): motif_element.attrib[ FinalOutputProcessor. MOTIF_CHI2_PVALUE_ATT] = motif_stats.getAttribute( MotifStatistics.MOTIF_CHI2_PVALUE) # fill the motif contingency values if motif_stats.hasAttribute( MotifStatistics.CONTIGENCY_MOTIF_COOCCURENCE): motif_element.attrib[ FinalOutputProcessor. MOTIFCONTIGENCY_MOTIF_COOCCURENCE] = motif_stats.getAttribute( MotifStatistics.CONTIGENCY_MOTIF_COOCCURENCE) if motif_stats.hasAttribute( MotifStatistics.CONTIGENCY_REFERENCE_MOTIF_BEDSEQ): motif_element.attrib[ FinalOutputProcessor. MOTIFCONTIGENCY_REFERENCE_MOTIF_BEDSEQ] = motif_stats.getAttribute( MotifStatistics.CONTIGENCY_REFERENCE_MOTIF_BEDSEQ) if motif_stats.hasAttribute( MotifStatistics.CONTINGENCY_TOTAL_BEDSEQ): motif_element.attrib[ FinalOutputProcessor. MOTIFCONTINGENCY_TOTAL_BEDSEQ] = motif_stats.getAttribute( MotifStatistics.CONTINGENCY_TOTAL_BEDSEQ) # fill the ratio overlap attribute motif_element.attrib[ FinalOutputProcessor.MOTIF_RATIO_HOMOLOCATION] = str( int( motif_stats.getAttributeAsfloat( MotifStatistics.MOTIF_RATIO_HOMOLOCATION) * 1000.0) / float(10)) + "%" # Fill the motif logo and matrix attributes motif_element.attrib[ FinalOutputProcessor.MOTIF_LOGO_ATT] = os.path.join( FinalOutputProcessor.LOGOS_DIR_NAME, motif_name + "_m1.png") motif_element.attrib[ FinalOutputProcessor.MOTIF_MATRIX_ATT] = os.path.join( FinalOutputProcessor.LOGOS_DIR_NAME, motif_name + ".tf") # fill the motif element graphs self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_DISTANCE_HISTOGRAM_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_DISTANCE_HISTOGRAM_GRAPH_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor. MOTIF_DISTANCE_HISTOGRAM_GRAPH_PDF_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_PEAK_SCORE_HISTOGRAM_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_PEAK_SCORE_HISTOGRAM_GRAPH_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_COLOCATION_HISTOGRAM_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor.MOTIF_COLOCATION_HISTOGRAM_GRAPH_ATT, motif_name, file_pathes) self.addMotifFilePathAttribute( motif_element, FinalOutputProcessor. MOTIF_COLOCATION_HISTOGRAM_GRAPH_PDF_ATT, motif_name, file_pathes) return classification_element
#!/usr/bin/env python from pprint import pprint from execo import TaktukRemote from execo.time_utils import sleep from execo_g5k import get_host_site, get_host_cluster, get_kavlan_host_name, \ get_g5k_sites from vm5k import list_vm, get_oargrid_job_vm5k_resources from vm5k.plots import topology_plot from xml.etree.ElementTree import Element, SubElement, dump state = Element('vm5k') resources = get_oargrid_job_vm5k_resources(49509) sites = sorted([site for site in resources.keys() if site != 'global']) kavlan = resources['global']['kavlan'] hosts = [] for site in sites: hosts += map(lambda host: get_kavlan_host_name(host, kavlan), resources[site]['hosts']) for host in hosts: site = get_host_site(host) if state.find("./site[@id='" + site+ "']"): el_site = state.find("./site[@id='" + site+ "']") else: el_site = SubElement(state, 'site', attrib={'id': site}) cluster = get_host_cluster(host) if el_site.find("./cluster[@id='" + cluster + "']"): el_cluster = el_site.find("./cluster[@id='" + cluster+ "']")
from xml.etree import ElementTree as et import parsetag from xml.dom import minidom import codecs import os ATTRIBUTE_DEFINITION_STRING = '\'\'\'Attribute Definitions:' CHANGE_LOG_STRING = 'Changelog' SYNTAX_STRING = '\'\'\'Syntax' ROOT_TAG = '\'\'\'' HED_NODE_NAME = 'HED' HED_VERSION_STRING = 'HED version:' START_STRING = '!# start hed' UNIT_CLASS_STRING = '\'\'\'Unit classes' END_STRING = '!# end hed' hed_node = Element('HED') # Prints an element tree def prettify(elem): rough_string = et.tostring(elem, encoding='utf-8', method='xml') reparsed = minidom.parseString(rough_string) prettified_string = reparsed.toprettyxml(indent=" ", encoding='utf-8') return prettified_string[:prettified_string.rfind('\n')] # Removes the last line from a string def remove_last_line_from_string(s): return s[:s.rfind('\n')]
def devices_xml(self): devices = Element("devices") SubElement(devices, "emulator").text = self.emulator devices.append(self.disk_xml) return devices
def _show_data(self, show_obj): """ Creates an elementTree XML structure for a MediaBrowser-style series.xml returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ indexer_lang = show_obj.lang lINDEXER_API_PARMS = sickrage.INDEXER_API( show_obj.indexer).api_params.copy() lINDEXER_API_PARMS[b'actors'] = True if indexer_lang and not indexer_lang == sickrage.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS[b'language'] = indexer_lang if show_obj.dvdorder != 0: lINDEXER_API_PARMS[b'dvdorder'] = True t = sickrage.INDEXER_API( show_obj.indexer).indexer(**lINDEXER_API_PARMS) rootNode = Element("details") tv_node = SubElement(rootNode, "movie") tv_node.attrib[b"isExtra"] = "false" tv_node.attrib[b"isSet"] = "false" tv_node.attrib[b"isTV"] = "true" try: myShow = t[int(show_obj.indexerid)] except indexer_shownotfound: sickrage.LOGGER.error("Unable to find show with id " + str(show_obj.indexerid) + " on tvdb, skipping it") raise except indexer_error: sickrage.LOGGER.error( "TVDB is down, can't use its data to make the NFO") raise # check for title and id if not (getattr(myShow, 'seriesname', None) and getattr(myShow, 'id', None)): sickrage.LOGGER.info("Incomplete info for show with id " + str(show_obj.indexerid) + " on " + sickrage.INDEXER_API(show_obj.indexer).name + ", skipping it") return False SeriesName = SubElement(tv_node, "title") SeriesName.text = myShow[b'seriesname'] if getattr(myShow, "genre", None): Genres = SubElement(tv_node, "genres") for genre in myShow[b'genre'].split('|'): if genre and genre.strip(): cur_genre = SubElement(Genres, "Genre") cur_genre.text = genre.strip() if getattr(myShow, 'firstaired', None): FirstAired = SubElement(tv_node, "premiered") FirstAired.text = myShow[b'firstaired'] if getattr(myShow, "firstaired", None): try: year_text = str( datetime.datetime.strptime(myShow[b"firstaired"], dateFormat).year) if year_text: year = SubElement(tv_node, "year") year.text = year_text except Exception: pass if getattr(myShow, 'overview', None): plot = SubElement(tv_node, "plot") plot.text = myShow[b"overview"] if getattr(myShow, 'rating', None): try: rating = int(float(myShow[b'rating']) * 10) except ValueError: rating = 0 if rating: Rating = SubElement(tv_node, "rating") Rating.text = str(rating) if getattr(myShow, 'status', None): Status = SubElement(tv_node, "status") Status.text = myShow[b'status'] if getattr(myShow, "contentrating", None): mpaa = SubElement(tv_node, "mpaa") mpaa.text = myShow[b"contentrating"] if getattr(myShow, 'imdb_id', None): imdb_id = SubElement(tv_node, "id") imdb_id.attrib[b"moviedb"] = "imdb" imdb_id.text = myShow[b'imdb_id'] if getattr(myShow, 'id', None): indexerid = SubElement(tv_node, "indexerid") indexerid.text = myShow[b'id'] if getattr(myShow, 'runtime', None): Runtime = SubElement(tv_node, "runtime") Runtime.text = myShow[b'runtime'] if getattr(myShow, '_actors', None): cast = SubElement(tv_node, "cast") for actor in myShow[b'_actors']: if 'name' in actor and actor[b'name'].strip(): cur_actor = SubElement(cast, "actor") cur_actor.text = actor[b'name'].strip() indentXML(rootNode) data = ElementTree(rootNode) return data
def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a MediaBrowser style episode.xml and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps indexer_lang = ep_obj.show.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere lINDEXER_API_PARMS = sickrage.INDEXER_API( ep_obj.show.indexer).api_params.copy() if indexer_lang and not indexer_lang == sickrage.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS[b'language'] = indexer_lang if ep_obj.show.dvdorder != 0: lINDEXER_API_PARMS[b'dvdorder'] = True t = sickrage.INDEXER_API( ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS) myShow = t[ep_obj.show.indexerid] except indexer_shownotfound as e: raise ShowNotFoundException(e.message) except indexer_error as e: sickrage.LOGGER.error( "Unable to connect to TVDB while creating meta files - skipping - {}" .format(e)) return False rootNode = Element("details") movie = SubElement(rootNode, "movie") movie.attrib[b"isExtra"] = "false" movie.attrib[b"isSet"] = "false" movie.attrib[b"isTV"] = "true" # write an MediaBrowser XML containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (indexer_episodenotfound, indexer_seasonnotfound): sickrage.LOGGER.info( "Unable to find episode %dx%d on %s... has it been removed? Should I delete from db?" % (curEpToWrite.season, curEpToWrite.episode, sickrage.INDEXER_API(ep_obj.show.indexer).name)) return None if curEpToWrite == ep_obj: # root (or single) episode # default to today's date for specials if firstaired is not set if curEpToWrite.season == 0 and not getattr( myEp, 'firstaired', None): myEp[b'firstaired'] = str(datetime.date.fromordinal(1)) if not (getattr(myEp, 'episodename', None) and getattr(myEp, 'firstaired', None)): return None episode = movie if curEpToWrite.name: EpisodeName = SubElement(episode, "title") EpisodeName.text = curEpToWrite.name SeasonNumber = SubElement(episode, "season") SeasonNumber.text = str(curEpToWrite.season) EpisodeNumber = SubElement(episode, "episode") EpisodeNumber.text = str(curEpToWrite.episode) if getattr(myShow, "firstaired", None): try: year_text = str( datetime.datetime.strptime(myShow[b"firstaired"], dateFormat).year) if year_text: year = SubElement(episode, "year") year.text = year_text except: pass if getattr(myShow, "overview", None): plot = SubElement(episode, "plot") plot.text = myShow[b"overview"] if curEpToWrite.description: Overview = SubElement(episode, "episodeplot") Overview.text = curEpToWrite.description if getattr(myShow, 'contentrating', None): mpaa = SubElement(episode, "mpaa") mpaa.text = myShow[b"contentrating"] if not ep_obj.relatedEps and getattr(myEp, "rating", None): try: rating = int((float(myEp[b'rating']) * 10)) except ValueError: rating = 0 if rating: Rating = SubElement(episode, "rating") Rating.text = str(rating) if getattr(myEp, 'director', None): director = SubElement(episode, "director") director.text = myEp[b'director'] if getattr(myEp, 'writer', None): writer = SubElement(episode, "credits") writer.text = myEp[b'writer'] if getattr(myShow, '_actors', None) or getattr( myEp, 'gueststars', None): cast = SubElement(episode, "cast") if getattr(myEp, 'gueststars', None) and isinstance( myEp[b'gueststars'], basestring): for actor in (x.strip() for x in myEp[b'gueststars'].split('|') if x.strip()): cur_actor = SubElement(cast, "actor") cur_actor.text = actor if getattr(myShow, '_actors', None): for actor in myShow[b'_actors']: if 'name' in actor and actor[b'name'].strip(): cur_actor = SubElement(cast, "actor") cur_actor.text = actor[b'name'].strip() else: # append data from (if any) related episodes if curEpToWrite.name: if not EpisodeName.text: EpisodeName.text = curEpToWrite.name else: EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name if curEpToWrite.description: if not Overview.text: Overview.text = curEpToWrite.description else: Overview.text = Overview.text + "\r" + curEpToWrite.description indentXML(rootNode) data = ElementTree(rootNode) return data
def csvToXml(scsv, fxml): reader = csv.reader(scsv) headers = reader.next() headers = map(lambda h: h.replace(' ', ''), headers) root = Element('Data') for row in reader: eRow = Element('Row') root.append(eRow) for tag, text in zip(headers, row): e = Element(tag) e.text = text eRow.append(e) pretty(root) et = ElementTree(root) et.write(fxml)
class OwlBuild(): def __init__(self): self.typesCount = self.propsCount = self.namedCount = 0 self.createDom() self.loadGraph() def getContent(self): #return ET.tostring(self.dom) return self.prettify(self.dom).decode() def prettify(self,elem): # log.info("doc: %s" % ET.tostring(elem)) doc = minidom.parseString(ET.tostring(elem)) return doc.toprettyxml(encoding='UTF-8') def createDom(self): self.dom = Element('rdf:RDF') for (k,v) in NAMESPACES.items(): self.dom.set(k,v) self.dom.append(Comment("\n\tGenerated from Schema.org version: %s released: %s\n\t" % (getVersion(),getVersionDate(getVersion())))) self.ont = SubElement(self.dom,"owl:Ontology") self.ont.set("rdf:about",VOCABURI) info = SubElement(self.ont,"owl:versionInfo") info.set("rdf:datatype","http://www.w3.org/2001/XMLSchema#string") info.text = getVersion() x = SubElement(self.ont,"rdfs:label") x.text = "Schema.org Vocabulary" x = SubElement(self.ont,"dcterms:modified") x.set("rdf:datatype", "http://www.w3.org/2001/XMLSchema#dat") x.text = getVersionDate(getVersion()) self.dom.append(Comment("\n\t/////////////////////\n\t/ Definitions\n\t/////////////////////\n\n\t")) def loadGraph(self): self.list(SdoTermSource.sourceGraph()) def list(self, graph): types = {} props = {} exts = [] self.dom.append(Comment("\n\t/////////////////////\n\t/ Class Definitions\n\t/////////////////////\n\t")) for (s,p,o) in graph.triples((None,RDF.type,RDFS.Class)): if s.startswith("http://schema.org"): types.update({s:graph.identifier}) for t in sorted(types.keys()): self.outputType(t,graph) self.dom.append(Comment("\n\t/////////////////////\n\t/ Property Definitions\n\t/////////////////////\n\t")) for (s,p,o) in graph.triples((None,RDF.type,RDF.Property)): if s.startswith("http://schema.org"): props.update({s:graph.identifier}) for p in sorted(props.keys()): self.outputProp(p,graph) self.dom.append(Comment("\n\t/////////////////////\n\t/ Named Individuals Definitions\n\t/////////////////////\n\t")) self.outputEnums(graph) self.outputNamedIndividuals(VOCABURI + "True",graph) self.outputNamedIndividuals(VOCABURI + "False",graph) def outputType(self, uri, graph): self.typesCount += 1 typ = SubElement(self.dom,"owl:Class") typ.set("rdf:about",uri) ext = None for (p,o) in graph.predicate_objects(uri): if p == RDFS.label: l = SubElement(typ,"rdfs:label") l.set("xml:lang","en") l.text = o elif p == RDFS.comment: c = SubElement(typ,"rdfs:comment") c.set("xml:lang","en") c.text = Markdown.parse(o) elif p == RDFS.subClassOf: s = SubElement(typ,"rdfs:subClassOf") s.set("rdf:resource",o) elif p == URIRef(VOCABURI + "isPartOf"): #Defined in an extension ext = str(o) elif p == RDF.type and o == URIRef(VOCABURI + "DataType"): #A datatype s = SubElement(typ,"rdfs:subClassOf") s.set("rdf:resource",VOCABURI + "DataType") typ.append(self.addDefined(uri,ext)) def outputProp(self,uri, graph): self.propsCount += 1 children = [] domains = {} ranges = [] datatypeonly = True ext = None for (p,o) in graph.predicate_objects(uri): if p == RDFS.label: l = Element("rdfs:label") l.set("xml:lang","en") l.text = o children.append(l) elif p == RDFS.comment: c = Element("rdfs:comment") c.set("xml:lang","en") c.text = Markdown.parse(o) children.append(c) elif p == RDFS.subPropertyOf: sub = Element("rdfs:subPropertyOf") subval = str(o) if subval == "rdf:type": #Fixes a special case with schema:additionalType subval = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" sub.set("rdf:resource",subval) children.append(sub) elif p == INVERSEOF: sub = Element("owl:inverseOf") sub.set("rdf:resource",o) children.append(sub) elif p == SUPERSEDEDBY: sub = Element("schema:supersededBy") sub.set("rdf:resource",o) children.append(sub) elif p == DOMAININC: domains[o] = True elif p == RANGEINC: ranges.append(str(o)) if str(o) not in DATATYPES: datatypeonly = False elif p == URIRef(VOCABURI + "isPartOf"): ext = str(o) children.append(self.addDefined(uri,ext)) if not datatypeonly: for r in DEFAULTRANGES: if r not in ranges: ranges.append(r) if len(domains): d = Element("rdfs:domain") children.append(d) cl = SubElement(d,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in domains.keys(): targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if len(ranges): r = Element("rdfs:range") children.append(r) cl = SubElement(r,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in ranges: targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if datatypeonly: prop = SubElement(self.dom,"owl:DatatypeProperty") else: prop = SubElement(self.dom,"owl:ObjectProperty") prop.set("rdf:about",uri) for sub in children: prop.append(sub) def addDefined(self,uri,ext=None): if not ext: ext = "http://schema.org" ext = ext.replace("http://", "https://") defn = Element("rdfs:isDefinedBy") path = "%s/%s" % (ext,os.path.basename(uri)) defn.set("rdf:resource",path) return defn def outputEnums(self,graph): q = """ prefix schema: <http://schema.org/> select Distinct ?enum ?parent where{ ?parent rdfs:subClassOf schema:Enumeration. ?enum rdfs:subClassOf ?parent. } """ enums = list(graph.query(q)) #log.info("Enum Count %s" % len(enums)) for row in enums: self.outputNamedIndividuals(row.enum,graph,parent=row.parent) def outputNamedIndividuals(self,idividual,graph,parent=None): self.namedCount += 1 typ = SubElement(self.dom,"owl:NamedIndividual") typ.set("rdf:about",idividual) ext = None for (p,o) in graph.predicate_objects(URIRef(idividual)): if p == RDFS.label: l = SubElement(typ,"rdfs:label") l.set("xml:lang","en") l.text = o elif p == RDFS.comment: c = SubElement(typ,"rdfs:comment") c.set("xml:lang","en") c.text = Markdown.parse(o) elif p == URIRef(VOCABURI + "isPartOf"): ext = str(o) typ.append(self.addDefined(idividual,ext)) if parent: s = SubElement(typ,"rdfs:subClassOf") s.set("rdf:resource",parent)
def Udacity2Voc(self, dataSet_dir="example/udacity/", anno_dir="label/labels.csv", label_dir="results/", label_list=None): print( color.BOLD + color.RED + "------------------------- CSV Parsing Start-------------------------" + color.END) if label_list is None: label_list = self.label_list work_dir = getcwd() + "/" + dataSet_dir anno_dir = work_dir + anno_dir label_dir = work_dir + label_dir print("Input file : {}".format(anno_dir)) f = open(anno_dir, 'r', encoding='utf-8') l = csv.reader(f) try: for line in l: print( color.BOLD + color.RED + "------------------------- CSV Parsing -------------------------" + color.END) convertList = line[0].split(" ") length = len(convertList) image_name = convertList[0] xmin = convertList[1] ymin = convertList[2] xmax = convertList[3] ymax = convertList[4] label = convertList[6].split('"')[1] if length is 8: state = convertList[7].split('"')[1] label = label + state # Open output result files img = Image.open(dataSet_dir + "JPEG/" + image_name) img_width = int(img.size[0]) img_height = int(img.size[1]) img_depth = 3 #int(img.size[2]) print("image size (width, height) : {}".format(img.size)) print() print("Output : {}".format(label_dir + image_name[:-3] + "xml")) print() print("class name, index : ({})".format(label)) result_outpath = str(label_dir + image_name[:-3] + "xml") if not os.path.isfile(result_outpath): xml_annotation = Element("annotation") xml_folder = Element("folder") xml_folder.text = "udacity" xml_annotation.append(xml_folder) xml_filename = Element("filename") xml_filename.text = str(image_name) xml_annotation.append(xml_filename) xml_path = Element("path") xml_path.text = str(label_dir + image_name) xml_annotation.append(xml_path) xml_source = Element("source") xml_database = Element("database") xml_database.text = "Unknown" xml_source.append(xml_database) xml_annotation.append(xml_source) xml_size = Element("size") xml_width = Element("width") xml_width.text = str(img_width) xml_size.append(xml_width) xml_height = Element("height") xml_height.text = str(img_height) xml_size.append(xml_height) xml_depth = Element("depth") xml_depth.text = str(img_depth) xml_size.append(xml_depth) xml_annotation.append(xml_size) xml_segmented = Element("segmented") xml_segmented.text = "0" xml_annotation.append(xml_segmented) xml_object = Element("object") xml_name = Element("name") xml_name.text = label xml_object.append(xml_name) xml_pose = Element("pose") xml_pose.text = "Unspecified" xml_object.append(xml_pose) xml_truncated = Element("truncated") xml_truncated.text = "0" xml_object.append(xml_truncated) xml_difficult = Element("difficult") xml_difficult.text = "0" xml_object.append(xml_difficult) xml_bndbox = Element("bndbox") xml_xmin = Element("xmin") xml_xmin.text = str(xmin) xml_bndbox.append(xml_xmin) xml_ymin = Element("ymin") xml_ymin.text = str(ymin) xml_bndbox.append(xml_ymin) xml_xmax = Element("xmax") xml_xmax.text = str(xmax) xml_bndbox.append(xml_xmax) xml_ymax = Element("ymax") xml_ymax.text = str(ymax) xml_bndbox.append(xml_ymax) xml_object.append(xml_bndbox) xml_annotation.append(xml_object) self.indent(xml_annotation) dump(xml_annotation) ElementTree(xml_annotation).write(result_outpath) else: tree = parse(result_outpath) xml_annotation = tree.getroot() xml_object = Element("object") xml_name = Element("name") xml_name.text = label xml_object.append(xml_name) xml_pose = Element("pose") xml_pose.text = "Unspecified" xml_object.append(xml_pose) xml_truncated = Element("truncated") xml_truncated.text = "0" xml_object.append(xml_truncated) xml_difficult = Element("difficult") xml_difficult.text = "0" xml_object.append(xml_difficult) xml_bndbox = Element("bndbox") xml_xmin = Element("xmin") xml_xmin.text = str(xmin) xml_bndbox.append(xml_xmin) xml_ymin = Element("ymin") xml_ymin.text = str(ymin) xml_bndbox.append(xml_ymin) xml_xmax = Element("xmax") xml_xmax.text = str(xmax) xml_bndbox.append(xml_xmax) xml_ymax = Element("ymax") xml_ymax.text = str(ymax) xml_bndbox.append(xml_ymax) xml_object.append(xml_bndbox) xml_annotation.append(xml_object) self.indent(xml_annotation) dump(xml_annotation) ElementTree(xml_annotation).write(result_outpath) print( color.BOLD + color.RED + "------------------------- CSV Parsing -------------------------" + color.END) print( color.BOLD + color.RED + "------------------------- CSV Parsing END -------------------------" + color.END) except Exception as e: print(color.BOLD + color.RED + "ERROR : {}".format(e) + color.END)
def create_node(tag, attribs, text): element = Element(tag, attribs) element.text = text return element
class _Widget(object): def __init__(self, w_type, name, x_pos, y_pos, width, height): self.root = Element('widget', type=w_type, version='2.0.0') name_child = SubElement(self.root, 'name') name_child.text = name shared_functions = _SharedPropertyFunctions(self.root) shared_functions.integer_property('x', x_pos) shared_functions.integer_property('y', y_pos) shared_functions.integer_property('width', width) shared_functions.integer_property('height', height) self._prop_factory = Property(self.root) def visible(self, visible): child = SubElement(self.root, 'visible') child.text = str(visible) def version(self, version): self.root.attrib['version'] = version #def class_name(self, name): # pass #def rule(self, rule): # pass #def scripts(self, script): # pass def tool_tip(self, tool_tip): child = SubElement(self.root, 'tooltip') child.text = tool_tip def find_element(self, tag): elements = self.root.findall(tag) # check to make sure there are not more than 1 elements # we don't want duplicate tags if len(elements) > 1: print( 'Warning, more than one element of the same tag! Returning a list' ) return elements elif len(elements) == 0: return None else: return elements[0] def remove_element(self, tag): element = self.find_element(tag) if element is not None: self.root.remove(element) def get_element_value(self, tag): return self.find_element(tag).text # From: https://pymotw.com/3/xml.etree.ElementTree/create.html def prettify(self, elem): """Return a pretty-printed XML string for the Element. """ rough_string = tostring(elem, 'utf-8') reparse_xml = minidom.parseString(rough_string) return reparse_xml.toprettyxml(indent=" ", newl="\n") def __str__(self): return self.prettify(self.root) def __repr__(self): return self.prettify(self.root)
from xml.etree.ElementTree import Element, dump, SubElement note = Element("note") to = Element("tos") #자식 노드 to.text = "Tove" note.append(to) SubElement(note, "from").text = "Jani" note.attrib["data"] = "20120104" dump(note)
def outputProp(self,uri, graph): self.propsCount += 1 children = [] domains = {} ranges = [] datatypeonly = True ext = None for (p,o) in graph.predicate_objects(uri): if p == RDFS.label: l = Element("rdfs:label") l.set("xml:lang","en") l.text = o children.append(l) elif p == RDFS.comment: c = Element("rdfs:comment") c.set("xml:lang","en") c.text = Markdown.parse(o) children.append(c) elif p == RDFS.subPropertyOf: sub = Element("rdfs:subPropertyOf") subval = str(o) if subval == "rdf:type": #Fixes a special case with schema:additionalType subval = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" sub.set("rdf:resource",subval) children.append(sub) elif p == INVERSEOF: sub = Element("owl:inverseOf") sub.set("rdf:resource",o) children.append(sub) elif p == SUPERSEDEDBY: sub = Element("schema:supersededBy") sub.set("rdf:resource",o) children.append(sub) elif p == DOMAININC: domains[o] = True elif p == RANGEINC: ranges.append(str(o)) if str(o) not in DATATYPES: datatypeonly = False elif p == URIRef(VOCABURI + "isPartOf"): ext = str(o) children.append(self.addDefined(uri,ext)) if not datatypeonly: for r in DEFAULTRANGES: if r not in ranges: ranges.append(r) if len(domains): d = Element("rdfs:domain") children.append(d) cl = SubElement(d,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in domains.keys(): targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if len(ranges): r = Element("rdfs:range") children.append(r) cl = SubElement(r,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in ranges: targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if datatypeonly: prop = SubElement(self.dom,"owl:DatatypeProperty") else: prop = SubElement(self.dom,"owl:ObjectProperty") prop.set("rdf:about",uri) for sub in children: prop.append(sub)
def get_attr_id(self, title, attr_type, edge_or_node, default, mode): # find the id of the attribute or generate a new id try: return self.attr[edge_or_node][mode][title] except KeyError: # generate new id new_id = str(next(self.attr_id)) self.attr[edge_or_node][mode][title] = new_id attr_kwargs = {"id": new_id, "title": title, "type": attr_type} attribute = Element("attribute", **attr_kwargs) # add subelement for data default value if present default_title = default.get(title) if default_title is not None: default_element = Element("default") default_element.text = str(default_title) attribute.append(default_element) # new insert it into the XML attributes_element = None for a in self.graph_element.findall("attributes"): # find existing attributes element by class and mode a_class = a.get("class") a_mode = a.get("mode", "static") if a_class == edge_or_node and a_mode == mode: attributes_element = a if attributes_element is None: # create new attributes element attr_kwargs = {"mode": mode, "class": edge_or_node} attributes_element = Element("attributes", **attr_kwargs) self.graph_element.insert(0, attributes_element) attributes_element.append(attribute) return new_id
def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a WDTV style episode.xml and returns the resulting data object. ep_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.related_episodes indexer_lang = ep_obj.show.lang or sickrage.app.config.indexer_default_language lINDEXER_API_PARMS = IndexerApi(ep_obj.show.indexer).api_params.copy() lINDEXER_API_PARMS['language'] = indexer_lang if ep_obj.show.dvdorder != 0: lINDEXER_API_PARMS['dvdorder'] = True t = IndexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS) myShow = t[ep_obj.show.indexer_id] if not myShow: return False rootNode = Element("details") # write an WDTV XML containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (indexer_episodenotfound, indexer_seasonnotfound): sickrage.app.log.info( "Unable to find episode %dx%d on %s... has it been removed? Should I delete from db?" % (curEpToWrite.season, curEpToWrite.episode, IndexerApi(ep_obj.show.indexer).name)) return None if ep_obj.season == 0 and not getattr(myEp, 'firstaired', None): myEp["firstaired"] = str(datetime.date.min) if not (getattr(myEp, 'episodename', None) and getattr(myEp, 'firstaired', None)): return None if len(eps_to_write) > 1: episode = SubElement(rootNode, "details") else: episode = rootNode # TODO: get right EpisodeID episodeID = SubElement(episode, "id") episodeID.text = str(curEpToWrite.indexer_id) title = SubElement(episode, "title") title.text = ep_obj.pretty_name() if getattr(myShow, 'seriesname', None): seriesName = SubElement(episode, "series_name") seriesName.text = myShow["seriesname"] if curEpToWrite.name: episodeName = SubElement(episode, "episode_name") episodeName.text = curEpToWrite.name seasonNumber = SubElement(episode, "season_number") seasonNumber.text = str(curEpToWrite.season) episodeNum = SubElement(episode, "episode_number") episodeNum.text = str(curEpToWrite.episode) firstAired = SubElement(episode, "firstaired") if curEpToWrite.airdate > datetime.date.min: firstAired.text = str(curEpToWrite.airdate) if getattr(myShow, 'firstaired', None): try: year_text = str( datetime.datetime.strptime(myShow["firstaired"], dateFormat).year) if year_text: year = SubElement(episode, "year") year.text = year_text except Exception: pass if curEpToWrite.season != 0 and getattr(myShow, 'runtime', None): runtime = SubElement(episode, "runtime") runtime.text = myShow["runtime"] if getattr(myShow, 'genre', None): genre = SubElement(episode, "genre") genre.text = " / ".join([ x.strip() for x in myShow["genre"].split('|') if x.strip() ]) if getattr(myEp, 'director', None): director = SubElement(episode, "director") director.text = myEp['director'] for actor in t.actors(int(ep_obj.show.indexer_id)): if not ('name' in actor and actor['name'].strip()): continue cur_actor = SubElement(episode, "actor") cur_actor_name = SubElement(cur_actor, "name") cur_actor_name.text = actor['name'] if 'role' in actor and actor['role'].strip(): cur_actor_role = SubElement(cur_actor, "role") cur_actor_role.text = actor['role'].strip() if curEpToWrite.description: overview = SubElement(episode, "overview") overview.text = curEpToWrite.description # Make it purdy indent_xml(rootNode) data = ElementTree(rootNode) return data
def add_edges(self, G, graph_element): def edge_key_data(G): # helper function to unify multigraph and graph edge iterator if G.is_multigraph(): for u, v, key, data in G.edges(data=True, keys=True): edge_data = data.copy() edge_data.update(key=key) edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data else: for u, v, data in G.edges(data=True): edge_data = data.copy() edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data edges_element = Element("edges") for u, v, key, edge_data in edge_key_data(G): kw = {"id": str(key)} try: edge_label = edge_data.pop("label") kw["label"] = str(edge_label) except KeyError: pass try: edge_weight = edge_data.pop("weight") kw["weight"] = str(edge_weight) except KeyError: pass try: edge_type = edge_data.pop("type") kw["type"] = str(edge_type) except KeyError: pass try: start = edge_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = edge_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass source_id = str(G.nodes[u].get("id", u)) target_id = str(G.nodes[v].get("id", v)) edge_element = Element("edge", source=source_id, target=target_id, **kw) default = G.graph.get("edge_default", {}) if self.VERSION == "1.1": edge_data = self.add_slices(edge_element, edge_data) else: edge_data = self.add_spells(edge_element, edge_data) edge_data = self.add_viz(edge_element, edge_data) edge_data = self.add_attributes("edge", edge_element, edge_data, default) edges_element.append(edge_element) graph_element.append(edges_element)
def outputLdml(self, parsed_json): # Create a tree tag = 'keyboard' if 'locale' in parsed_json: attrib = {'locale': parsed_json['locale']} else: attrib = {'locale': parsed_json['id']} elem = Element(tag, attrib) tree = ElementTree(elem) version = Element('version', {'platform': '10', 'number': '1.0'}) elem.append(version) names = Element('names') name = Element('name', {'value': parsed_json['title']}) names.append(name) elem.append(names) settings = Element('settings', {'fallback': 'omit', 'transformPartial': 'hide'}) elem.append(settings) displayMap = None # Get all the key mappings for each level # Output each layer for layer in parsed_json['mappings']: try: modifier_value = self.layers_to_modifiers[layer] except: modifier_value = 'UNKNOWN' print('Modifier value unknown: %s', layer) modifiers = {'modifiers': modifier_value} if modifier_value: keymap = Element('keyMap', modifiers) else: keymap = Element('keyMap') elements = parsed_json['mappings'][layer] for start_point in elements: # Get the key mappings. mapping_string = elements[start_point] codes = self.mapping_parse.split(mapping_string) maps = [] if start_point == '': # The whole list E00 ... A3 keys = [] for row in self.maprows: self.row_base[row] self.row_max[row] for col in range(self.row_base[row], self.row_max[row]): keys.append('%s%02d' % (row, col)) else: keys=[] print('STARTING SOMEWHERE ELSE') index = 0 # If there's an A03 element, add this too! # TODO: # For anything with keycaps, e.g., {{S||ᐊᐱᓯ||\u202f}}, # add display mapping. for code in codes: if code and index < len(keys): output = code # Remove {{ }} if needed if code[0] == '{' and len(code) > 1 and code[1]== '{': if code[2:5] == "S||": parts = code.split('||') display = parts[1] output = parts[2][:-2] if display: if not displayMap: displayMap = Element('displayMap') displayElement = Element('display', {'mapOutput': output, 'display': display}) displayMap.append(displayElement) else: # Get the contents only output = code[2:-2] if output: map_content = {'iso': keys[index], 'to': output} map = Element('map', map_content) keymap.append(map) index += 1 elem.append(keymap) if displayMap: elem.append(displayMap) # transform items if parsed_json['transform']: transforms = Element('transforms', {'type': 'simple'}) keys = parsed_json['transform'].keys() for key in keys: key_parts = key.split('|') for part in key_parts: to_item = parsed_json['transform'][key] transform = Element('transform', {'from': part, 'to': to_item}) transforms.append(transform) elem.append(transforms) xml_output = ET.tostring(elem, encoding='utf-8') # Add header info doctype = '<!DOCTYPE keyboard SYSTEM "../dtd/ldmlKeyboard.dtd">\n' return doctype + xml_output
def as_xml(self): is_protocol_supported = True if len( self.accepted_cipher_list) > 0 else False result_xml = Element(self.scan_command.get_cli_argument(), title=self.scan_command.get_title(), isProtocolSupported=str(is_protocol_supported)) # Output the preferred cipher preferred_xml = Element('preferredCipherSuite') if self.preferred_cipher: preferred_xml.append( self._format_accepted_cipher_xml(self.preferred_cipher)) result_xml.append(preferred_xml) # Output all the accepted ciphers if any accepted_xml = Element('acceptedCipherSuites') if len(self.accepted_cipher_list) > 0: for cipher in self.accepted_cipher_list: accepted_xml.append(self._format_accepted_cipher_xml(cipher)) result_xml.append(accepted_xml) # Output all the rejected ciphers if any rejected_xml = Element('rejectedCipherSuites') if len(self.rejected_cipher_list) > 0: for cipher in self.rejected_cipher_list: cipher_xml = Element('cipherSuite', attrib={ 'name': cipher.name, 'anonymous': str(cipher.is_anonymous), 'connectionStatus': cipher.handshake_error_message }) rejected_xml.append(cipher_xml) result_xml.append(rejected_xml) # Output all the errored ciphers if any error_xml = Element('errors') if len(self.errored_cipher_list) > 0: for cipher in self.errored_cipher_list: cipher_xml = Element('cipherSuite', attrib={ 'name': cipher.name, 'anonymous': str(cipher.is_anonymous), 'connectionStatus': cipher.error_message }) error_xml.append(cipher_xml) result_xml.append(error_xml) return result_xml
def add_attributes(self, node_or_edge, xml_obj, data, default): # Add attrvalues to node or edge attvalues = Element("attvalues") if len(data) == 0: return data mode = "static" for k, v in data.items(): # rename generic multigraph key to avoid any name conflict if k == "key": k = "networkx_key" val_type = type(v) if val_type not in self.xml_type: raise TypeError( f"attribute value type is not allowed: {val_type}") if isinstance(v, list): # dynamic data for val, start, end in v: val_type = type(val) if start is not None or end is not None: mode = "dynamic" self.alter_graph_mode_timeformat(start) self.alter_graph_mode_timeformat(end) break attr_id = self.get_attr_id(str(k), self.xml_type[val_type], node_or_edge, default, mode) for val, start, end in v: e = Element("attvalue") e.attrib["for"] = attr_id e.attrib["value"] = str(val) # Handle nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" if start is not None: e.attrib["start"] = str(start) if end is not None: e.attrib["end"] = str(end) attvalues.append(e) else: # static data mode = "static" attr_id = self.get_attr_id(str(k), self.xml_type[val_type], node_or_edge, default, mode) e = Element("attvalue") e.attrib["for"] = attr_id if isinstance(v, bool): e.attrib["value"] = str(v).lower() else: e.attrib["value"] = str(v) # Handle float nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" attvalues.append(e) xml_obj.append(attvalues) return data
def do_sample_xml(sub_id): sub = Submission().get_record(sub_id) dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] # p = Profile().get_record(df["profile_id"]) sample_set = Element("SAMPLE_SET") sample_set.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") sample_set.set("xsi:noNamespaceSchemaLocation", "ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_5/SRA.sample.xsd") try: smp = get_sample(df) except: # some submission types do not require samples such as annotation submissions if sub["repository"] == "ena-ant": return False # iterate samples to create xml sample = Element("SAMPLE") sample_alias = get_sample_ref(df) sample.set("alias", sample_alias) sample.set( "center_name", df["description"]["attributes"]["study_type"]["study_center_name"]) sample_set.append(sample) sample_name = Element("SAMPLE_NAME") sample_title = Element("TITLE") sample_title.text = smp["name"] sample.append(sample_title) sample.append(sample_name) # get Source object for organism s = Source().get_record(smp["derivesFrom"][0]) taxon_id = Element("TAXON_ID") # get integer portion of NCBI taxon id taxon_id_content = s["organism"]["termAccession"].split('_')[1] taxon_id.text = taxon_id_content sample_name.append(taxon_id) scientific_name = Element("SCIENTIFIC_NAME") scientific_name.text = s["organism"]["annotationValue"] sample_name.append(scientific_name) common_name = Element("COMMON_NAME") sample_name.append(common_name) sample.append(Element("DESCRIPTION")) # do attributes attributes = Element("SAMPLE_ATTRIBUTES") for c in smp["characteristics"]: ch = Element("SAMPLE_ATTRIBUTE") tag = Element("TAG") tag.text = c["category"]["annotationValue"] value = Element("VALUE") value.text = c["value"]["annotationValue"] unit = Element("UNIT") unit.text = c["unit"]["annotationValue"] ch.append(tag) ch.append(value) ch.append(unit) attributes.append(ch) for c in smp["factorValues"]: ch = Element("SAMPLE_ATTRIBUTE") tag = Element("TAG") tag.text = c["category"]["annotationValue"] value = Element("VALUE") value.text = c["value"]["annotationValue"] unit = Element("UNIT") unit.text = c["unit"]["annotationValue"] ch.append(tag) ch.append(value) ch.append(unit) attributes.append(ch) sample.append(attributes) return prettify(sample_set)
class GEXFWriter(GEXF): # class for writing GEXF format files # use write_gexf() function def __init__(self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft"): self.construct_types() self.prettyprint = prettyprint self.encoding = encoding self.set_version(version) self.xml = Element( "gexf", { "xmlns": self.NS_GEXF, "xmlns:xsi": self.NS_XSI, "xsi:schemaLocation": self.SCHEMALOCATION, "version": self.VERSION, }, ) # Make meta element a non-graph element # Also add lastmodifieddate as attribute, not tag meta_element = Element("meta") subelement_text = f"NetworkX {nx.__version__}" SubElement(meta_element, "creator").text = subelement_text meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d")) self.xml.append(meta_element) register_namespace("viz", self.NS_VIZ) # counters for edge and attribute identifiers self.edge_id = itertools.count() self.attr_id = itertools.count() self.all_edge_ids = set() # default attributes are stored in dictionaries self.attr = {} self.attr["node"] = {} self.attr["edge"] = {} self.attr["node"]["dynamic"] = {} self.attr["node"]["static"] = {} self.attr["edge"]["dynamic"] = {} self.attr["edge"]["static"] = {} if graph is not None: self.add_graph(graph) def __str__(self): if self.prettyprint: self.indent(self.xml) s = tostring(self.xml).decode(self.encoding) return s def add_graph(self, G): # first pass through G collecting edge ids for u, v, dd in G.edges(data=True): eid = dd.get("id") if eid is not None: self.all_edge_ids.add(str(eid)) # set graph attributes if G.graph.get("mode") == "dynamic": mode = "dynamic" else: mode = "static" # Add a graph element to the XML if G.is_directed(): default = "directed" else: default = "undirected" name = G.graph.get("name", "") graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name) self.graph_element = graph_element self.add_nodes(G, graph_element) self.add_edges(G, graph_element) self.xml.append(graph_element) def add_nodes(self, G, graph_element): nodes_element = Element("nodes") for node, data in G.nodes(data=True): node_data = data.copy() node_id = str(node_data.pop("id", node)) kw = {"id": node_id} label = str(node_data.pop("label", node)) kw["label"] = label try: pid = node_data.pop("pid") kw["pid"] = str(pid) except KeyError: pass try: start = node_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = node_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass # add node element with attributes node_element = Element("node", **kw) # add node element and attr subelements default = G.graph.get("node_default", {}) node_data = self.add_parents(node_element, node_data) if self.VERSION == "1.1": node_data = self.add_slices(node_element, node_data) else: node_data = self.add_spells(node_element, node_data) node_data = self.add_viz(node_element, node_data) node_data = self.add_attributes("node", node_element, node_data, default) nodes_element.append(node_element) graph_element.append(nodes_element) def add_edges(self, G, graph_element): def edge_key_data(G): # helper function to unify multigraph and graph edge iterator if G.is_multigraph(): for u, v, key, data in G.edges(data=True, keys=True): edge_data = data.copy() edge_data.update(key=key) edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data else: for u, v, data in G.edges(data=True): edge_data = data.copy() edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data edges_element = Element("edges") for u, v, key, edge_data in edge_key_data(G): kw = {"id": str(key)} try: edge_label = edge_data.pop("label") kw["label"] = str(edge_label) except KeyError: pass try: edge_weight = edge_data.pop("weight") kw["weight"] = str(edge_weight) except KeyError: pass try: edge_type = edge_data.pop("type") kw["type"] = str(edge_type) except KeyError: pass try: start = edge_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = edge_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass source_id = str(G.nodes[u].get("id", u)) target_id = str(G.nodes[v].get("id", v)) edge_element = Element("edge", source=source_id, target=target_id, **kw) default = G.graph.get("edge_default", {}) if self.VERSION == "1.1": edge_data = self.add_slices(edge_element, edge_data) else: edge_data = self.add_spells(edge_element, edge_data) edge_data = self.add_viz(edge_element, edge_data) edge_data = self.add_attributes("edge", edge_element, edge_data, default) edges_element.append(edge_element) graph_element.append(edges_element) def add_attributes(self, node_or_edge, xml_obj, data, default): # Add attrvalues to node or edge attvalues = Element("attvalues") if len(data) == 0: return data mode = "static" for k, v in data.items(): # rename generic multigraph key to avoid any name conflict if k == "key": k = "networkx_key" val_type = type(v) if val_type not in self.xml_type: raise TypeError( f"attribute value type is not allowed: {val_type}") if isinstance(v, list): # dynamic data for val, start, end in v: val_type = type(val) if start is not None or end is not None: mode = "dynamic" self.alter_graph_mode_timeformat(start) self.alter_graph_mode_timeformat(end) break attr_id = self.get_attr_id(str(k), self.xml_type[val_type], node_or_edge, default, mode) for val, start, end in v: e = Element("attvalue") e.attrib["for"] = attr_id e.attrib["value"] = str(val) # Handle nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" if start is not None: e.attrib["start"] = str(start) if end is not None: e.attrib["end"] = str(end) attvalues.append(e) else: # static data mode = "static" attr_id = self.get_attr_id(str(k), self.xml_type[val_type], node_or_edge, default, mode) e = Element("attvalue") e.attrib["for"] = attr_id if isinstance(v, bool): e.attrib["value"] = str(v).lower() else: e.attrib["value"] = str(v) # Handle float nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" attvalues.append(e) xml_obj.append(attvalues) return data def get_attr_id(self, title, attr_type, edge_or_node, default, mode): # find the id of the attribute or generate a new id try: return self.attr[edge_or_node][mode][title] except KeyError: # generate new id new_id = str(next(self.attr_id)) self.attr[edge_or_node][mode][title] = new_id attr_kwargs = {"id": new_id, "title": title, "type": attr_type} attribute = Element("attribute", **attr_kwargs) # add subelement for data default value if present default_title = default.get(title) if default_title is not None: default_element = Element("default") default_element.text = str(default_title) attribute.append(default_element) # new insert it into the XML attributes_element = None for a in self.graph_element.findall("attributes"): # find existing attributes element by class and mode a_class = a.get("class") a_mode = a.get("mode", "static") if a_class == edge_or_node and a_mode == mode: attributes_element = a if attributes_element is None: # create new attributes element attr_kwargs = {"mode": mode, "class": edge_or_node} attributes_element = Element("attributes", **attr_kwargs) self.graph_element.insert(0, attributes_element) attributes_element.append(attribute) return new_id def add_viz(self, element, node_data): viz = node_data.pop("viz", False) if viz: color = viz.get("color") if color is not None: if self.VERSION == "1.1": e = Element( f"{{{self.NS_VIZ}}}color", r=str(color.get("r")), g=str(color.get("g")), b=str(color.get("b")), ) else: e = Element( f"{{{self.NS_VIZ}}}color", r=str(color.get("r")), g=str(color.get("g")), b=str(color.get("b")), a=str(color.get("a")), ) element.append(e) size = viz.get("size") if size is not None: e = Element(f"{{{self.NS_VIZ}}}size", value=str(size)) element.append(e) thickness = viz.get("thickness") if thickness is not None: e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness)) element.append(e) shape = viz.get("shape") if shape is not None: if shape.startswith("http"): e = Element(f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape)) else: e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape)) element.append(e) position = viz.get("position") if position is not None: e = Element( f"{{{self.NS_VIZ}}}position", x=str(position.get("x")), y=str(position.get("y")), z=str(position.get("z")), ) element.append(e) return node_data def add_parents(self, node_element, node_data): parents = node_data.pop("parents", False) if parents: parents_element = Element("parents") for p in parents: e = Element("parent") e.attrib["for"] = str(p) parents_element.append(e) node_element.append(parents_element) return node_data def add_slices(self, node_or_edge_element, node_or_edge_data): slices = node_or_edge_data.pop("slices", False) if slices: slices_element = Element("slices") for start, end in slices: e = Element("slice", start=str(start), end=str(end)) slices_element.append(e) node_or_edge_element.append(slices_element) return node_or_edge_data def add_spells(self, node_or_edge_element, node_or_edge_data): spells = node_or_edge_data.pop("spells", False) if spells: spells_element = Element("spells") for start, end in spells: e = Element("spell") if start is not None: e.attrib["start"] = str(start) self.alter_graph_mode_timeformat(start) if end is not None: e.attrib["end"] = str(end) self.alter_graph_mode_timeformat(end) spells_element.append(e) node_or_edge_element.append(spells_element) return node_or_edge_data def alter_graph_mode_timeformat(self, start_or_end): # If 'start' or 'end' appears, alter Graph mode to dynamic and # set timeformat if self.graph_element.get("mode") == "static": if start_or_end is not None: if isinstance(start_or_end, str): timeformat = "date" elif isinstance(start_or_end, float): timeformat = "double" elif isinstance(start_or_end, int): timeformat = "long" else: raise nx.NetworkXError( "timeformat should be of the type int, float or str") self.graph_element.set("timeformat", timeformat) self.graph_element.set("mode", "dynamic") def write(self, fh): # Serialize graph G in GEXF to the open fh if self.prettyprint: self.indent(self.xml) document = ElementTree(self.xml) document.write(fh, encoding=self.encoding, xml_declaration=True) def indent(self, elem, level=0): # in-place prettyprint formatter i = "\n" + " " * level if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: self.indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i
def do_analysis_xml(sub_id): sub = Submission().get_record(sub_id) dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] p = Profile().get_record(df["profile_id"]) analysis_set = Element("ANALYSIS_SET") analysis = Element("ANALYSIS") alias = make_alias(sub) analysis.set("alias", alias + "_anaysis") center_name = df["description"]["attributes"]["study_type"][ "study_analysis_center_name"] analysis.set("analysis_center", center_name) broker_name = df["description"]["attributes"]["study_type"]["study_broker"] analysis.set("broker_name", broker_name) analysis_date = df["description"]["attributes"]["study_type"][ "study_analysis_date"] # ad = analysis_date.split('/') # d = datetime.date(int(ad[2]), int(ad[1]), int(ad[0])) # analysis.set("anlalysis_date", d) # analysis_set.append(analysis) title = Element("TITLE") title.text = df["description"]["attributes"]["study_type"]["study_title"] analysis.append(title) description = Element("DESCRIPTION") description.text = df["description"]["attributes"]["study_type"][ "study_description"] analysis.append(description) study_ref = Element("STUDY_REF") study_ref.set("refname", str(sub["_id"])) analysis.append(study_ref) # TODO - Sample is not required for annotation submissions....ENA documentation saying it is is not correct. Will remove these stages from the wizard at some point s_ref = get_sample_ref(df) sample_ref = Element("SAMPLE_REF") sample_ref.set("refname", s_ref) # analysis.append(sample_ref) analysis_type = Element("ANALYSIS_TYPE") SubElement(analysis_type, "SEQUENCE_ANNOTATION") analysis.append(analysis_type) files = Element("FILES") file = Element("FILE") filename = df["name"] file_hash = df["file_hash"] fqfn = str( sub_id) + '/' + data_utils.get_current_user().username + '/' + filename file.set("filename", fqfn) file.set("filetype", "tab") file.set("checksum_method", "MD5") file.set("checksum", file_hash) file.set("unencrypted_checksum", file_hash) files.append(file) analysis.append(files) attrs = Element("ANALYSIS_ATTRIBUTES") for a in df["description"]["attributes"]["attach_study_samples"][ "attributes"]: attr = Element("ANALYSIS_ATTRIBUTE") tag = Element("TAG") tag.text = a["name"] value = Element("VALUE") value.text = a["value"] attr.append(tag) attr.append(value) attrs.append(attr) analysis.append(attrs) return prettify(analysis)
def do_study_xml(sub_id): # get submission object from mongo sub = Submission().get_record(sub_id) # get datafile objects dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] # get profile object p = Profile().get_record(df["profile_id"]) # Do STUDY_SET study_set = Element("STUDY_SET") study_set.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") study_set.set("xsi:noNamespaceSchemaLocation", "ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_5/SRA.study.xsd") # Do STUDY study = Element("STUDY") study.set("alias", str(sub["_id"])) study.set( "center_name", df["description"]["attributes"]["study_type"] ["study_analysis_center_name"]) study_set.append(study) # Do DESCRIPTOR descriptor = Element("DESCRIPTOR") # create element, append to parent and add text SubElement(descriptor, "STUDY_TITLE").text = p["title"] study_type = Element("STUDY_TYPE") es = get_study_type_enumeration( df["description"]["attributes"]["study_type"]["study_type"]) # es = df["description"]["attributes"]["study_type"]["study_type"] study_type.set("existing_study_type", es) descriptor.append(study_type) SubElement(descriptor, "STUDY_ABSTRACT").text = p["description"] study.append(descriptor) # Do STUDY_ATTRIBUTES study_attributes = Element("STUDY_ATTRIBUTES") # do attribute for date study_attribute = Element("STUDY_ATTRIBUTE") SubElement(study_attribute, "TAG").text = "Submission Date" SubElement(study_attribute, "VALUE").text = datetime.datetime.now().strftime('%Y-%m-%d') study_attributes.append(study_attribute) # here we can loop to add other STUDY_ATTRIBUTES study.append(study_attributes) return prettify(study_set)
def _dump_signal(signal, node_refs, signal_element): signal_element.set('name', signal.name) offset = _start_bit(signal.start, signal.byte_order) signal_element.set('offset', str(offset)) # Length. if signal.length != 1: signal_element.set('length', str(signal.length)) # Byte order. if signal.byte_order != 'little_endian': signal_element.set('endianess', signal.byte_order[:-7]) # Comment. if signal.comment is not None: _dump_notes(signal_element, signal.comment) # Receivers. if signal.receivers: consumer = SubElement(signal_element, 'Consumer') for receiver in signal.receivers: SubElement(consumer, 'NodeRef', id=str(node_refs[receiver])) # Value. value = Element('Value') if signal.minimum is not None: value.set('min', str(signal.minimum)) if signal.maximum is not None: value.set('max', str(signal.maximum)) if signal.scale != 1: value.set('slope', str(signal.scale)) if signal.offset != 0: value.set('intercept', str(signal.offset)) if signal.unit is not None: value.set('unit', signal.unit) if signal.is_float: if signal.length == 32: type_name = 'single' else: type_name = 'double' elif signal.is_signed: type_name = 'signed' else: type_name = None if type_name is not None: value.set('type', type_name) if value.attrib: signal_element.append(value) # Label set. if signal.choices: label_set = SubElement(signal_element, 'LabelSet') for value, name in signal.choices.items(): SubElement(label_set, 'Label', name=str(name), value=str(value))
def do_submission_xml(sub_id): sub = Submission().get_record(sub_id) dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] submission = Element("SUBMISSION") # get names of files in bundle and append here # do alias alias = make_alias(sub) submission.set("alias", alias + "_sub") submission.set( "broker_name", df["description"]["attributes"]["study_type"]["study_broker"]) submission.set( "center_name", df["description"]["attributes"]["study_type"] ["study_analysis_center_name"]) submission_date = datetime.datetime.now().isoformat() submission.set("submission_date", submission_date) submission.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") submission.set( "xsi:noNamespaceSchemaLocation", "ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_5/SRA.submission.xsd") contacts = Element("CONTACTS") copo_contact = Element("CONTACT") copo_contact.set("inform_on_error", "*****@*****.**") copo_contact.set("inform_on_status", "*****@*****.**") copo_contact.set("name", "COPO Support") contacts.append(copo_contact) people = Person(sub["profile_id"]).get_people_for_profile() for p in people: c = Element("CONTACT") c.set("name", p["firstName"] + " " + p["lastName"]) if [ x for x in p["roles"] if x["annotationValue"] == "SRA Inform On Status" ]: c.set("inform_on_status", p["email"]) if [ x for x in p["roles"] if x["annotationValue"] == "SRA Inform On Error" ]: c.set("inform_on_error", p["email"]) contacts.append(c) submission.append(contacts) actions = Element("ACTIONS") action = Element("ACTION") add = Element("ADD") add.set("schema", "analysis") add.set("source", "analysis.xml") action.append(add) actions.append(action) submission.append(actions) return prettify(submission)
def generate_cabin(self, world_ele): # materials missing for now lift_model_name = f'{self.name}' lift_model_ele = SubElement(world_ele, 'model') lift_model_ele.set('name', lift_model_name) # main cabin link for actuation platform_name = 'platform' platform = SubElement(lift_model_ele, 'link') platform.set('name', platform_name) inertial = SubElement(platform, 'inertial') mass = SubElement(inertial, 'mass') mass.text = f'{self.cabin_mass}' # visuals and collisions for floor and walls of cabin floor_dims = [self.width, self.depth, self.floor_thickness] floor_name = 'floor' floor_pose = Element('pose') floor_pose.text = f'0 0 {-self.floor_thickness / 2} 0 0 0' platform.append( visual(floor_name, floor_pose, floor_dims, lift_material())) platform.append(collision(floor_name, floor_pose, floor_dims, '0x01')) # Wall generation # get each pair of end_points on each side, generate a section of wall # between the pair of points for side, end_points in self.end_points.items(): assert len(end_points) % 2 == 0 for i in range(0, len(end_points), 2): pair = end_points[i:i + 2] name = f'{side}wall{i//2+1}' self.generate_wall(side, pair, name, platform) # lift cabin actuation joint lift_model_ele.append( joint('cabin_joint', 'prismatic', 'world', 'platform', joint_axis='z')) # cabin doors for lift_door in self.doors: lift_door.generate_cabin_door( lift_model_ele, f'CabinDoor_{self.name}_{lift_door.name}') # lift cabin plugin plugin_ele = SubElement(lift_model_ele, 'plugin') plugin_ele.set('name', 'lift') plugin_ele.set('filename', 'liblift.so') lift_name_ele = SubElement(plugin_ele, 'lift_name') lift_name_ele.text = f'{self.name}' for level_name, door_names in self.level_doors.items(): floor_ele = SubElement(plugin_ele, 'floor') floor_ele.set('name', f'{level_name}') floor_ele.set('elevation', f'{self.level_elevation[level_name]}') for door in self.doors: if door.name in door_names: door_pair_ele = SubElement(floor_ele, 'door_pair') door_pair_ele.set('cabin_door', f'CabinDoor_{self.name}_{door.name}') door_pair_ele.set( 'shaft_door', f'ShaftDoor_{self.name}_{level_name}_{door.name}') reference_floor_ele = SubElement(plugin_ele, 'reference_floor') reference_floor_ele.text = f'{self.reference_floor_name}' for param_name, param_value in self.params.items(): ele = SubElement(plugin_ele, param_name) ele.text = f'{param_value}' cabin_joint_ele = SubElement(plugin_ele, 'cabin_joint_name') cabin_joint_ele.text = 'cabin_joint' # pose model_pose = SubElement(lift_model_ele, 'pose') model_pose.text = f'{self.x} {self.y} 0 0 0 {self.yaw}'
Titulo = titulo_wrapper.h1 #Sinopsis sinopsis_w = soup.find('div', class_='summary_text') sinopsis = sinopsis_w.text #Temporadas temporadas_w = soup.find('div', class_='seasons-and-year-nav') temporadas = temporadas_w.a.text #Capitulos capitulos_wrapper = soup.find('div', class_='bp_description') capitulos = capitulos_wrapper.find('span', class_='bp_sub_heading') ############################################################################################### root = Element('Serie') tree = ElementTree(root) titlo = Element('Titulo') sinsis = Element('sinopsis') tempdas = Element('temporadas') caps = Element('capitulos') root.append(titlo) root.append(sinsis) root.append(tempdas) root.append(caps) titlo.text = Titulo.text sinsis.text = sinopsis caps.text = capitulos.text
def _extensions2gpx(self, parent, extensions): ext = Element(ns + "extensions") ext = self._dict2gpx(ext, extensions) parent.append(ext) return parent
def handle_item(item: ET.Element) -> Dict[str, Optional[DataItem]]: ret = dict(map(handle_field, item.findall('field'))) if 'key' in item.attrib: ret['key'] = item.attrib['key'] return ret