def create_localization_launch_file(): launch = Element("launch") launch.append(Comment("voxel_grid_filter")) voxelGridFilter = SubElement( launch, "include", {"file": "$(find points_downsampler)/launch/points_downsample.launch"}) SubElement(voxelGridFilter, "arg", { "name": "node_name", "value": "voxel_grid_filter" }) launch.append(Comment("nmea2tfpose")) SubElement(launch, "include", {"file": "$(find gnss_localizer)/launch/nmea2tfpose.launch"}) launch.append(Comment("ndt matching")) ndt_matching = SubElement( launch, "include", {"file": "$(find ndt_localizer)/launch/ndt_matching.launch"}) SubElement(ndt_matching, "arg", {"name": "use_openmp", "value": "false"}) SubElement(ndt_matching, "arg", {"name": "get_height", "value": "true"}) with open("./res/localization/localization.launch", "w") as f: f.write(prettify(launch)) return True
def _generate_powershell_xml(coverage_file: str) -> Element: """Generate a PowerShell coverage report XML element from the specified coverage file and return it.""" coverage_info = read_json_file(coverage_file) content_root = data_context().content.root is_ansible = data_context().content.is_ansible packages: dict[str, dict[str, dict[str, int]]] = {} for path, results in coverage_info.items(): filename = os.path.splitext(os.path.basename(path))[0] if filename.startswith('Ansible.ModuleUtils'): package = 'ansible.module_utils' elif is_ansible: package = 'ansible.modules' else: rel_path = path[len(content_root) + 1:] plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils" package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type) if package not in packages: packages[package] = {} packages[package][path] = results elem_coverage = Element('coverage') elem_coverage.append( Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ ')) elem_coverage.append( Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd ')) elem_sources = SubElement(elem_coverage, 'sources') elem_source = SubElement(elem_sources, 'source') elem_source.text = data_context().content.root elem_packages = SubElement(elem_coverage, 'packages') total_lines_hit = 0 total_line_count = 0 for package_name, package_data in packages.items(): lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data) total_lines_hit += lines_hit total_line_count += line_count elem_coverage.attrib.update({ 'branch-rate': '0', 'branches-covered': '0', 'branches-valid': '0', 'complexity': '0', 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0", 'lines-covered': str(total_line_count), 'lines-valid': str(total_lines_hit), 'timestamp': str(int(time.time())), 'version': get_ansible_version(), }) return elem_coverage
def append_datastrip_structure(datastrip: Element): """ Create the structure for DATASTRIP folder as defined in 'S2_PDI_Level-2A_Datastrip_Structure.xsd' and appends it to the datastrip element of the xml tree :param datastrip: Element for the DATASTRIP folder :return: """ datastrip_folder = SubElement(datastrip, 'DATASTRIP_NAME', attrib={'type': 'folder'}) datastrip_folder.append( Comment( 'Naming convention from https://sentinel.esa.int/documents/247904/685211/Sentinel-2-Products-Specification-Document at p74, p438' )) ds_xml = SubElement(datastrip_folder, 'DataStrip_Metadata_File', attrib={'type': 'file'}) qi_data = SubElement(datastrip_folder, 'QI_DATA', attrib={'type': 'folder'}) oqlc = SubElement(qi_data, 'OLQC', attrib={'type': 'file'}) oqlc.append(Comment('OLQC reports XML formatted'))
def __init__(self, top): """ Sets the initial parameters in the domain file :param top: top element in xml file :return: None """ self.top = Element(top) self.top.append( Comment('Assignment 3 - Development of a Spoken Dialogue System')) self.initial_state = Element('initialstate') self.initial_state.append( Comment('(optional) initial state variables ')) self.top.append(self.initial_state) self.parameters = Element('parameters') self.parameters.append( Comment('(optional) prior distributions for rule parameters')) self.top.append(self.parameters) self.settings = Element('settings') _modules = SubElement(self.settings, 'modules') _modules.text = 'opendial.plugins.NuanceSpeech' _id = SubElement(self.settings, 'id') _id.text = 'NMDPTRIAL_garash8420141029160511' _key = SubElement(self.settings, 'key') _key.text = 'd89a8e437c74b8cc9b73e0eef132adce26889142e1ffd000c4729adcaaa95c8244eac5c05e7e8cf9f16de7e29f95b1c5' \ '1d4207fe41d7fe054998e1f4423d7686' _lang = SubElement(self.settings, 'lang') _lang.text = 'nor-NOR' self.top.append(self.settings)
def __init__(self, nml_boiler=False): self.comp_type = Element('ComponentType') self.dynamics = Element('Dynamics') if nml_boiler: self.comp_type.set('extends', 'baseIonChannel') self.comp_type.append( Comment('The defs below are hardcoded for testing purposes!')) SubElement(self.comp_type, 'Constant', attrib={ 'dimension': 'voltage', 'name': 'MV', 'value': '1mV' }) SubElement(self.comp_type, 'Constant', attrib={ 'dimension': 'time', 'name': 'MS', 'value': '1ms' }) SubElement(self.comp_type, 'Requirement', attrib={ 'name': 'v', 'dimension': 'voltage' }) self.comp_type.append(Comment('End of hardcoded defs!'))
def insert_comments(xml, insert_position, text): comment = Comment( text="{0} automatically inserted by beast_tools".format(text)) comment.tail = '\n\n\t' xml.insert(insert_position, comment) insert_position += 1 return xml, insert_position
def list(self, graph): types = {} props = {} exts = [] self.dom.append( Comment( "\n\t/////////////////////\n\t/ Class Definitions\n\t/////////////////////\n\t" )) for (s, p, o) in graph.triples((None, RDF.type, RDFS.Class)): if s.startswith("http://schema.org"): types.update({s: graph.identifier}) for t in sorted(types.keys()): self.outputType(t, graph) self.dom.append( Comment( "\n\t/////////////////////\n\t/ Property Definitions\n\t/////////////////////\n\t" )) for (s, p, o) in graph.triples((None, RDF.type, RDF.Property)): if s.startswith("http://schema.org"): props.update({s: graph.identifier}) for p in sorted(props.keys()): self.outputProp(p, graph) self.dom.append( Comment( "\n\t/////////////////////\n\t/ Named Individuals Definitions\n\t/////////////////////\n\t" )) self.outputEnums(graph) self.outputNamedIndividuals("http://schema.org/True", graph) self.outputNamedIndividuals("http://schema.org/False", graph)
def InitializePatchXMLFileVars(): global PatchesXML PatchesXML = Element('patches') PatchesXML.append(Comment('NOTE: This is an ** Autogenerated file **')) PatchesXML.append( Comment( 'NOTE: Patching is in little endian format, i.e. 0xAABBCCDD will look like DD CC BB AA in the file or on disk' ))
def create_motion_launch_file(): launch = Element("launch") launch.append(Comment("Vehicle Contorl")) SubElement(launch, "include", { "file": "$(find runtime_manager)/scripts/vehicle_socket.launch", }) launch.append(Comment("path_select")) SubElement(launch, "node", { "pkg": "lattice_planner", "type": "path_select", "name": "path_select" }) launch.append(Comment("pure_pursuit")) params = "{header: {seq: 0, stamp: {secs: 0, nsecs: 0}, frame_id: ''}, param_flag: 0, velocity: 5.0, lookahead_distance: 4.0, lookahead_ratio: 2.0, minimum_lookahead_distance: 6.0, displacement_threshold: 0.0, relative_angle_threshold: 0}" SubElement( launch, "node", { "pkg": "rostopic", "type": "rostopic", "name": "rostopic", "args": "pub /config/waypoint_follower autoware_msgs/ConfigWaypointFollower '" + params + "'" }) purePersuit = SubElement( launch, "include", { "file": "$(find waypoint_follower)/launch/pure_pursuit.launch", }) SubElement(purePersuit, "arg", { "name": "is_linear_interpolation", "value": "true" }) SubElement(purePersuit, "arg", { "name": "publishes_for_steering_robot", "value": "true" }) launch.append(Comment("twist_filter")) SubElement(launch, "include", { "file": "$(find waypoint_follower)/launch/twist_filter.launch", }) launch.append(Comment("marker downsampler")) SubElement( launch, "node", { "pkg": "marker_downsampler", "type": "app.py", "name": "marker_downsampler" }) with open("./res/motion/motion.launch", "w") as f: f.write(prettify(launch)) return True
def CreateRawProgramXMLFile(): global opfile, PartitionCollection, RawProgramXML print "\nMaking \"%s\"" % RAW_PROGRAM RawProgramXML = Element('data') RawProgramXML.append(Comment("NOTE: This is an ** Autogenerated file **")) RawProgramXML.append(Comment('NOTE: Sector size is 512bytes')) #GUID Partitioning Table SubElement( RawProgramXML, 'program', { 'SECTOR_SIZE_IN_BYTES': str(512), 'file_sector_offset': str(0), 'filename': '', 'label': "PrimaryGPT", 'num_partition_sectors': str(34), 'partofsingleimage': "true", 'physical_partition_number': str(0), 'readbackverify': "false", 'size_in_KB': str(17.0), 'sparse': "false", 'start_byte_hex': str(hex(0)), 'start_sector': str(0) }) #Physical Partition for i in range(NumPhyPartitions): SubElement( RawProgramXML, 'program', { 'SECTOR_SIZE_IN_BYTES': str(PartitionCollection[i].SECTOR_SIZE_IN_BYTES), 'file_sector_offset': str(PartitionCollection[i].file_sector_offset), 'filename': PartitionCollection[i].filename, 'label': PartitionCollection[i].label, 'num_partition_sectors': str(PartitionCollection[i].num_partition_sectors), 'partofsingleimage': PartitionCollection[i].partofsingleimage, 'physical_partition_number': str(PartitionCollection[i].physical_partition_number), 'readbackverify': PartitionCollection[i].readbackverify, 'size_in_KB': str(PartitionCollection[i].size_in_KB), 'sparse': PartitionCollection[i].sparse, 'start_byte_hex': hex(PartitionCollection[i].start_byte_hex).rstrip('L'), 'start_sector': str(PartitionCollection[i].start_sector) }) opfile = open(RAW_PROGRAM, "w") opfile.write(Prettify(RawProgramXML)) opfile.close() print "\"%s\" Created" % RAW_PROGRAM
def save_file(self, file_name): out_file = file_name if file_name is None: out_file = "new_gpx_" + SlTrace.getTs() if not os.path.isabs(out_file): out_file = os.path.basename(out_file) out_file = os.path.join("..", "new_data", out_file) pm = re.match(r'^.*\.[^.]+$', out_file) if pm is None: out_file += ".gpx" out_file = os.path.abspath(out_file) SlTrace.lg(f"Output file: {out_file}") gpx_attr = {'version' : "1.1", 'creator' : "GDAL 3.0.4", 'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance", 'xmlns:ogr' : "http://osgeo.org/gdal", 'xmlns' : "http://www.topografix.com/GPX/1/1", 'xsi:schemaLocation' : "http://www.topografix.com/GPX/1/1", } gpx_top = Element('gpx', gpx_attr) generated_on = str(datetime.datetime.now()) comment = Comment(f"Created {generated_on} via surveyor.py by crs") gpx_top.append(comment) comment = Comment(f"Source code: GitHub raysmith619/PlantInvasion") gpx_top.append(comment) n_seg = 0 n_pt = 0 for track_segment in self.get_segments(): trk = SubElement(gpx_top, 'trk') ###gpx_top.append(trk) trkseg = SubElement(trk, 'trkseg') # We only have one trkseg per trk ###trk.append(trkseg) n_seg += 1 for seg_point in track_segment.get_points(): trkpt = SubElement(trkseg, 'trkpt', {'lat' : str(seg_point.lat), 'lon' : str(seg_point.long), }) ###trkseg.append(trkpt) n_pt += 1 SlTrace.lg(f"GPX File: {n_seg} segments {n_pt} points") pretty_str = prettify(gpx_top) if SlTrace.trace("gpx_output"): SlTrace.lg(pretty_str) if SlTrace.trace("gpx_rough_outupt"): rough_string = ElementTree.tostring(gpx_top, 'utf-8') SlTrace.lg(f"rough_string:{rough_string}") try: fout = open(out_file, "w") fout.write(pretty_str) fout.close() except IOError as e: err_msg = f"Error {repr(e)} in creating GPXFile {out_file}" SlTrace.lg(err_msg) SlTrace.report(err_msg)
def insert_to_generic_sections(xml, values, section, id_name, level = 2): begin_comm = Comment(text="Begin {0} values automatically inserted by insertobeast.py".format(section)) end_comm = Comment(text="End {0} values automatically inserted by insertobeast.py".format(section)) values.insert(0, begin_comm) values.append(end_comm) level = get_level(level, section) search = xml.findall(level) for result in search: if result.get('id') == id_name: for k, v in enumerate(values): result.insert(k, v) return xml
def generate_xml(self, part_colors=None): root = Element("PartSet") for idx, part_name in enumerate(BCS_PART_LIST): part_element = SubElement(root, "Part", idx=str(idx)) part_element.append(Comment(part_name.capitalize())) if part_name not in self.parts: part_element.append(Comment("This entry is empty.")) else: part = self.parts[part_name] part.generate_xml(part_element, part_colors) print(tostring(root)) dom = xml.dom.minidom.parseString(tostring(root)) declaration = xml.dom.minidom.Document().toxml() return dom.toprettyxml()[len(declaration) + 1:]
def allCitiesAndEventsXML(): # Query to get data of interest city = session.query(City).all() # Declare root node of XML top = Element('allEvents') comment = Comment('XML Response with all cities and events') top.append(comment) # Loop through query responses and format as XML for c in city: event = SubElement(top, 'event') child = SubElement(event, 'id') child.text = str(c.id) child = SubElement(event, 'city') child.text = c.name child = SubElement(event, 'state') child.text = c.state eventInfo = SubElement(event, 'eventInfo') # Add new node for Events for e in c.events: en = SubElement(eventInfo, 'event_name') en.text = e.name child = SubElement(en, 'description') child.text = e.description child = SubElement(en, 'event_date') child.text = str(e.event_date) child = SubElement(en, 'event_url') child.text = e.event_url child = SubElement(en, 'user_id') child.text = str(e.user_id) return app.response_class(tostring(top), mimetype='application/xml')
def buildXMl(): index = 0 data = Element('data') comment = Comment('All french names usage between ???? - 2017') data.append(comment) names = SubElement(data, 'names') for nameList in better_name_list: name = SubElement(names, 'name', { 'id': str(index), 'name': nameList[1] }) gender = SubElement( name, 'gender', {'gender': 'male' if nameList[0] == '1' else 'female'}) usages = SubElement(name, 'usages') for yearList in nameList[2]: year = SubElement(usages, 'year', { 'year': yearList[0], 'uses': yearList[1] }) index += 1 myfile = open("test.xml", "w") myfile.write(str(prettify(data)))
def export(): # 打开Excel文件读取数据 wb = xlrd.open_workbook(r'../0014/student.xls') # 通过名称获取一个工作表 table = wb.sheet_by_name(u'student') data = dict() # 循环行列表数据 for i in range(table.nrows): # 获取整行和整列的值(数组) # print(table.row_values(i)) # 使用行列索引 # print(table.row(i)[2].value) # 获取每一行的数据 row = table.row(i) print(row) value_list = list() key = row[0].value print(key) for i1 in row[1:]: value = i1.value print(value) value_list.append(value) data[key] = value_list print(data) root = Element('root') comment = Comment('学生信息表"id" : [名字, 数学, 语文, 英文]') child = SubElement(root, 'students') child.append(comment) child.text = str(data) tree = ElementTree(root) tree.write('student.xml', encoding='utf8')
def create_root(self): self.root = Element('annotation') comment = Comment('Generated for PyMOTW') folder = SubElement(self.root, "folder") folder.text = " " filename = SubElement(self.root, "filename") filename.text = " " source = SubElement(self.root, "source") source.text = " " owner = SubElement(self.root, "owner") owner.text = " " size = SubElement(self.root, "size") width = SubElement(size, "width") width.text = " " height = SubElement(size, "height") height.text = " " depth = SubElement(size, "depth") depth.text = " " segmented = SubElement(self.root, "segmented") segmented.text = "0"
def generateopml(request): generated_on = str(datetime.datetime.now()) root = Element('opml') root.set('version', '2.0') root.set('xmlns:pk', 'http://www.podkicker.com/backup.dtd') root.append(Comment('Generated by you2rss')) head = SubElement(root, 'head') title = SubElement(head, 'title') title.text = 'You2Rss podcasts' dc = SubElement(head, 'dateCreated') dc.text = generated_on dm = SubElement(head, 'dateModified') dm.text = generated_on body = SubElement(root, 'body') latest_channel_list = Channel.objects.all() for channel in latest_channel_list: channelURL = ''.join([ 'http://', get_current_site(request).domain, reverse('you2rss:rssfile', args=(channel.channel_id, )) ]) podcast = SubElement( body, 'outline', { 'text': channel.title_text, 'xmlUrl': channelURL, 'pk:autodownload': "0", 'pk:cachelimit': "0", 'pk:notify': "0", }) return HttpResponse(tostring(root), content_type="text/x-opml")
def create_root(self): self.root = Element('annotation') comment = Comment('Generated for PyMOTW') folder = SubElement(self.root, "folder") folder.text = " " filename = SubElement(self.root, "filename") filename.text = " " source = SubElement(self.root, "source") source.text = " " owner = SubElement(self.root, "owner") owner.text = " " imagesize = SubElement(self.root, "imagesize") nrows = SubElement(imagesize, "nrows") nrows.text = " " ncols = SubElement(imagesize, "ncols") ncols.text = " " depth = SubElement(imagesize, "depth") depth.text = " " segmented = SubElement(self.root, "segmented") segmented.text = "0"
def set_text(element, content): content = escape(content, entities={'\r\n': '<br />'}) # retain html tags in text content = content.replace("\n", "<br />") # replace new line for *nix system content = content.replace("<br />", "<br />\n") # add the line break in source to make it readable # trick to add CDATA for element tree lib element.append(Comment(' --><![CDATA[' + content.replace(']]>', ']]]]><![CDATA[>') + ']]><!-- '))
def buildxmlwithoutelement(pic_name, weight_num, height_num): top = Element('annotation') comment = Comment('Generated for BataFoundation - louis0815') top.append(comment) filename = SubElement(top, 'filename') filename.text = str(pic_name) sizePic = SubElement(top, 'size') weight = SubElement(sizePic, 'width') height = SubElement(sizePic, 'height') depth = SubElement(sizePic, 'depth') weight.text = '%d' % (weight_num) height.text = '%d' % (height_num) depth.text = '3' xmlfilepath = str( '/Users/insisterlouis/Workspaces/HardhatDetector/data/test/Annotations/' + (re.split(r'[.]', pic_name)[0]) + '.xml') xml_object = minidom.parseString( etree.ElementTree.tostring(top, encoding="utf-8")).toprettyxml(indent=" ") with open(xmlfilepath, "wb") as writter: writter.write(xml_object) return top
def getItemsXML(expedition_id, category_id): """ Endpoint to return an XML List of all items associated with a certain expedition and category :param expedition_id: :param category_id: """ items = session.query(Item).filter_by(expedition_id=expedition_id, category_id=category_id).all() root = Element('allItems') comment = Comment('XML Endpoint Listing ' 'all Item for a specific Category and Expedition') root.append(comment) for i in items: ex = SubElement(root, 'expedition') ex.text = i.expedition.title category_name = SubElement(ex, 'category_name') category_description = SubElement(category_name, 'category_description') category_picture = SubElement(category_name, 'category_picture') category_name.text = i.category.name category_description.text = i.category.description category_picture.text = i.category.picture item_name = SubElement(category_name, 'item_name') item_decription = SubElement(item_name, 'item_description') item_picture = SubElement(item_name, 'item_picture') item_name.text = i.name item_decription.text = i.description item_picture.text = i.picture print tostring(root) return app.response_class(tostring(root), mimetype='application/xml')
def xmlGenerator(self): operators = 1 prices = [] data1 = json.loads(self.getToken1.content) data2 = json.loads(self.getToken2.content) def prettify(elem): rough_string = ElementTree.tostring(elem, 'utf-8') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ") cryptocurrency = Element("cryptocurrency") for addPrices in range(len(self.coins) - 1): if addPrices < operators: prices.append( data1[addPrices]["market_data"]["current_price"]["usd"]) operators += 48 if operators > addPrices: prices.append(data1[addPrices + operators]["market_data"] ["current_price"]["usd"]) operators -= 48 else: prices.append(data2["price_usd"]) for crypto in range(len(prices)): cryptocurrency.append(Comment(self.coins[crypto] + 'coin')) SubElement(cryptocurrency, self.coins[crypto]).text = str(prices[crypto]) with open( str(datetime.datetime.now().strftime("%Y%m%d%H")) + ".xml", "w") as writeFile: writeFile.write(prettify(cryptocurrency))
def categoryGroceryXML(category_id): category = session.query(Category).filter_by(id = category_id).all() items = session.query(GroceryItem).filter_by(category_id = category_id).all() # Declare root node of XML top = Element('Category') comment = Comment('XML Response with a single category with all items within it') top.append(comment) # Loop through query responses and format as XML for c in category: grocery_category = SubElement(top, 'category') grocery_category_id = SubElement(grocery_category, 'id') grocery_category_id.text = str(c.id) grocery_category_name = SubElement(grocery_category, 'name') grocery_category_name.text = str(c.name) grocery_item = SubElement(grocery_category, 'item') for i in items: grocery_item_id = SubElement(grocery_item, 'id') grocery_item_id.text = str(i.id) grocery_item_name = SubElement(grocery_item, 'name') grocery_item_name.text = str(i.name) grocery_item_des = SubElement(grocery_item, 'description') grocery_item_des.text = str(i.description) grocery_item_price = SubElement(grocery_item, 'price') grocery_item_price.text = str(i.price) return app.response_class(tostring(top), mimetype='application/xml')
def to_tree(self): this = Element(self.type, self.serializable_attrs()) for child in self.children: this.append(child.to_tree()) if not self.is_startend and not self.children: this.append(Comment(' ')) return this
def main(world_name, saveToPath): root = Element('sdf') root.set('version', '1.4') comment = Comment('Generated by blend2world.py') root.append(comment) world = SubElement(root, 'world', {"name": world_name}) addHeader(world) #add ground plane addGroundPlane(world) # inc = SubElement(world,'include') # uri = SubElement(inc,'uri') # uri.text = "model://sun" addModel(world, "rightsidewall", "0 8.1 4 0 0 0", "file://blackwall.dae") addModel(world, "backsidewall", "-6 0.5 4 0 0 1.57", "file://blackwall.dae", "1.2833 1 1") addModel(world, "leftsidewall", "0 -7.1 4 0 0 0", "file://blackwall.dae") print("blend2world main function") for obs in bpy.data.objects: if obs.type == "MESH": addModel(world, obs.name, locrot2string(obs.location, obs.rotation_euler, obs.name), getMesh(obs.name)) indent(root) ET.dump(root) ElementTree(root).write(saveToPath)
def gps2xml(path): gpsdict = defaultdict(list) generated_on = str(datetime.now()) root = Element('root') comment = Comment('Generated for tool analysis') root.append(comment) # body = SubElement(root,'body') for roots, dirs, files in os.walk(path): device = roots.split('/') if len(device[-1]) > 8: gpsdict = gps.gpsdata(roots) node = SubElement(root, 'device', {'id': device[-1]}) if len(gpsdict) > 0: temp = 0 current_group = None for item in sorted(gpsdict.items()): # print item[1] if temp == 0 or not temp == item[0].date(): temp = item[0].date() date = SubElement(node, 'date', {'value': str(temp)}) current_group = SubElement( date, 'details', {'timestamp': str(item[0].time())}) child1 = SubElement(current_group, 'latitude') child1.text = item[1][0] child2 = SubElement(current_group, 'longitude') child2.text = item[1][1] d = os.path.join(path, 'xml') if not os.path.exists(d): os.makedirs(d) fname = os.path.join(d, 'gps.xml') f = open(fname, 'w') f.write(prettify(root)) f.close()
def writeCover(self, path): root = Element( 'html', { 'xmlns': 'http://www.w3.org/1999/xhtml', 'xmlns:epub': 'http://www.idpf.org/2007/ops', 'lang': 'ja', 'xml:lang': 'ja' }) root.append(Comment(self.xmlComment)) head = SubElement(root, 'head') SubElement(head, 'title').text = self.title body = SubElement(root, 'body') div = SubElement(body, 'div', {'id': 'main'}) SubElement(div, 'h1', {'id': 'book-title'}).text = self.title if not self.subTitle is None: SubElement(div, 'h2', {'id': 'book-subtitle'}).text = self.subTitle SubElement(div, 'p', {'id': 'book-author'}).text = self.creator SubElement(div, 'p', { 'id': 'book-translator' }).text = u'[訳] ' + self.translator rough_string = ElementTree.tostring(root, 'utf-8') reparsed = minidom.parseString(rough_string) contents = reparsed.toprettyxml() contents = contents.replace( '<?xml version="1.0" ?>', '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE html>') f = open(path, 'w') f.write(contents.encode('utf-8')) f.close()
def generate_xml(self, root): physics = SubElement(root, "PhysicsObject") for field_name in self.data.__fields__: if 'num' in field_name or 'offset' in field_name: continue if field_name in BCS_PHYSICS_XML_IGNORE: continue xml_name = BCS_PHYSICS_XML_TRANSLATION.get(field_name, field_name).upper() if xml_name.startswith("U_"): value = hex(self[field_name]) elif field_name.startswith("model") and self[field_name] < 10000: value = "10000" else: value = str(self[field_name]) SubElement(physics, xml_name, value=value) # Add file names physics.append(Comment("MODEL, EMM, EMB, EAN, BONE, SCD")) model_names = {} for name in BCS_PHYSICS_XML_NAMES: model_names[name] = get_costume_creator_name(self[name]) SubElement( physics, "STR_28", value= f'{model_names["emd_name"] or "NULL"}, {model_names["emm_name"] or "NULL"}, ' f'{model_names["emb_name"] or "NULL"}, {model_names["ean_name"] or "NULL"}, ' f'{self.bone_name or "NULL"}, {model_names["scd_name"] or "NULL"}')
def Button_OnClick2(): if len(entry2.get()) != 0: top = Element(entry2.get()) comment = Comment('Extracted Function Document for' + entry2.get()) top.append(comment) with open('links.txt') as f: for visited_link in f: try: html_page = urllib2.urlopen(visited_link) print(visited_link) soup = BeautifulSoup(html_page) # soup = BeautifulSoup(open('test.html')) for tag in soup.findAll('div', attrs={"class": "section"}): if tag.find('p') is not None: # tag_id = u''.join(tag.get('id')).encode('utf-8').strip() print tag.get('id') div_id = SubElement(top, tag.get('id')) # tag_id_text = u''.join(tag.find('p').text).encode('utf-8').strip() print tag.find('p').text div_id.text = tag.find('p').text else: continue except urllib2.HTTPError as err: print err.code print visited_link continue except urllib2.URLError as urlerr: print urlerr.message continue tree = ElementTree(top) tree.write(entry2.get() + '.xml') else: entry2.insert(0, "Enter Project Name")
def searchComment(query): q = Comment.all() for k in query: if k=='ancestor': q.ancestor(query[k]) else: q.filter(k, query[k]) return q
def insertComment(commenter, commenter_id, item_id, content): comment = Comment(parent=item_id, commenter = commenter, commenter_id = commenter_id, content = content) comment.put() return comment
def insert_comments(xml, insert_position, text): comment = Comment(text="{0} automatically inserted by beast_tools".format(text)) comment.tail = "\n\n\t" xml.insert(insert_position, comment) insert_position += 1 return xml, insert_position