def writeNav(self, path): root = Element('html', {'xmlns':'http://www.w3.org/1999/xhtml', 'xmlns:epub':'http://www.idpf.org/2007/ops', 'lang':'ja', 'xml:lang':'ja'}) root.append(Comment(self.xmlComment)) head = SubElement(root, 'head') SubElement(head, 'link', {'href':'nav.css', 'rel':'stylesheet', 'type':'text/css'}) if self.isVertical: SubElement(head, 'link', {'href':'vertical.css', 'rel':'stylesheet', 'type':'text/css'}) SubElement(head, 'title').text = u'目次' body = SubElement(root, 'body') section = SubElement(body, 'section', {'class':'frontmatter TableOfContents'}) nav = SubElement(section, 'nav', {'epub:type':'toc', 'id':'toc'}) SubElement(nav, 'h1').text = u'目次' ol = SubElement(nav, 'ol') xhtmls = self.contents.keys() for xhtml in xhtmls: li = SubElement(ol, 'li') filename = os.path.basename(xhtml) SubElement(li, 'a', {'href':filename}).text = self.contents[xhtml] rough_string = ElementTree.tostring(root, 'utf-8') reparsed = minidom.parseString(rough_string) contents = reparsed.toprettyxml() contents = contents.replace('<?xml version="1.0" ?>', '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE html>') f = open(path, 'w') f.write(contents.encode('utf-8')) f.close()
def process_task(self, target, command, args): OUT_FORMAT = ' {0:<35}{1}'.format sslConn = create_sslyze_connection(target, self._shared_settings) # Make sure OpenSSL was built with support for compression to avoid false negatives if 'zlib compression' not in sslConn.get_available_compression_methods(): raise RuntimeError('OpenSSL was not built with support for zlib / compression. Did you build nassl yourself ?') try: # Perform the SSL handshake sslConn.connect() compName = sslConn.get_current_compression_method() except ClientAuthenticationError: # The server asked for a client cert compName = sslConn.get_current_compression_method() finally: sslConn.close() # Text output if compName: compTxt = 'Supported' else: compTxt = 'Disabled' cmdTitle = 'Compression' txtOutput = [self.PLUGIN_TITLE_FORMAT(cmdTitle)] txtOutput.append(OUT_FORMAT("DEFLATE Compression:", compTxt)) # XML output xmlOutput = Element(command, title=cmdTitle) if compName: xmlNode = Element('compressionMethod', type="DEFLATE") xmlOutput.append(xmlNode) return PluginBase.PluginResult(txtOutput, xmlOutput)
def process_task(self, target, command): """ Connects to the target server and tries to get acceptable CAs for client cert """ (_, _, _, ssl_version) = target ssl_conn = create_sslyze_connection(target, self._shared_settings, ssl_version) res = [] try: # Perform the SSL handshake ssl_conn.connect() except ClientCertificateRequested: # The server asked for a client cert res = ssl_conn.get_client_CA_list() finally: ssl_conn.close() text_output = [self.PLUGIN_TITLE_FORMAT(self.CMD_TITLE)] if res: xml_output = Element(command, title=self.CMD_TITLE, isProvided="True") for ca in res: text_output.append(self.FIELD_FORMAT('', str(ca))) ca_xml = Element('ca') ca_xml.text = ca xml_output.append(ca_xml) else: xml_output = Element(command, title=self.CMD_TITLE, isProvided="False") return PluginBase.PluginResult(text_output, xml_output)
def _prepare_xml_request(self, module, leads): root = Element(module) # Row counter no = 1 for lead in leads: row = Element("row", no=str(no)) root.append(row) assert type(lead) == dict, "Leads must be dictionaries inside a list, got:" + str(type(lead)) for key, value in lead.items(): # <FL val="Lead Source">Web Download</FL> # <FL val="First Name">contacto 1</FL> fl = Element("FL", val=key) if type(value) == dict: # If it's an attached module, accept multiple groups mod_attach_no = 1 for module_key, module_value in value.items(): # The first group defines the module name, yank that and iterate through the contents for mod_item in module_value: mod_fl = SubElement(fl, module_key, no=str(mod_attach_no)) for mod_item_key, mod_item_value in mod_item.items(): attach_fl = SubElement(mod_fl, "FL", val=mod_item_key) attach_fl.text = mod_item_value mod_attach_no += 1 elif type(value) not in [str, unicode]: fl.text = str(value) else: fl.text = value row.append(fl) no += 1 return root
def process_task(self, target, command, args): if self._shared_settings['starttls']: raise Exception('Cannot use --hsts with --starttls.') hsts_supported = self._get_hsts_header(target) if hsts_supported: hsts_timeout = hsts_supported hsts_supported = True # Text output cmd_title = 'HTTP Strict Transport Security' txt_result = [self.PLUGIN_TITLE_FORMAT(cmd_title)] if hsts_supported: txt_result.append(self.FIELD_FORMAT("OK - HSTS header received:", hsts_timeout)) else: txt_result.append(self.FIELD_FORMAT("NOT SUPPORTED - Server did not send an HSTS header.", "")) # XML output xml_hsts_attr = {'sentHstsHeader': str(hsts_supported)} if hsts_supported: xml_hsts_attr['hstsHeaderValue'] = hsts_timeout xml_hsts = Element('hsts', attrib = xml_hsts_attr) xml_result = Element('hsts', title = cmd_title) xml_result.append(xml_hsts) return PluginBase.PluginResult(txt_result, xml_result)
def xml_response(results, mytype): root = Element('data') for result in results: child = Element(mytype) child.text = result root.append(child) return Response('<?xml version="1.0" encoding="UTF-8"?>' + tostring(root), mimetype='text/xml')
def as_xml(self) -> Element: xml_result = Element(self.scan_command.get_cli_argument(), title=self.scan_command.get_title()) if self.compression_name: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="True")) else: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="False")) return xml_result
def insert_math(block, line): instream = Element(ns + 'instream-foreign-object') block.append(instream) math_obj = asciitomathml.asciitomathml.AsciiMathML(mstyle={'scriptlevel': '-2'}) math_obj.parse_string(line) math_tree = math_obj.get_tree() instream.append(math_tree)
def make_fo_tree(): fo = Element(ns + 'root') fo.set('font-family', 'STIXGeneral,CharisSIL') lo_master = Element(ns + 'layout-master-set') fo.append(lo_master) sim_page_ms = Element(ns + 'simple-page-master', **{'master-name':'A4', 'page-height':'11in', 'page-width':'8.5in', 'margin-top':'0.5in', 'margin-left':'0.5in', 'margin-bottom':'.5in', 'margin-right':'.5in' } ) lo_master.append(sim_page_ms) reg_bd = Element(ns + 'region-body', **{'margin-top':'1in'}) sim_page_ms.append(reg_bd) ps = Element(ns + 'page-sequence', **{'master-reference':'A4'}) fo.append(ps) flow = Element(ns + 'flow', **{'flow-name':'xsl-region-body'}) ps.append(flow) return fo print(tostring(fo))
def as_xml(self): """XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error> """ # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
def dict_to_xml(tag, d): elem = Element(tag) for key, val in d.items(): child = Element(key) child.text = str(val) elem.append(child) return elem
def write_header_elements(self, g): """ Helper method for write_header. """ graph_header = Element('graphHeader') graph_header.append(self.render_tag_usage(g)) depends_on = g.header.depends_on dependencies = SubElement(graph_header, 'dependencies') if depends_on: for dependency in depends_on: if dependency: SubElement(dependencies, 'dependsOn', {'f.id': dependency}) aspaces = g.annotation_spaces annotation_spaces = SubElement(graph_header, 'annotationSpaces') if aspaces: for aspace in aspaces: SubElement(annotation_spaces, 'annotationSpace', {'as.id': aspace.as_id}) roots = g.header.roots if roots: roots_element = SubElement(graph_header, 'roots') for root in roots: if root: SubElement(roots_element, 'root').text = root return graph_header
def render_documentheader(self, standoffheader): """Create the documentHeader Element. Returns ------- documentheader : ElementTree Primary element of the primary data document header. """ now = datetime.datetime.now() pubDate = now.strftime("%Y-%m-%d") documentheader = Element('documentHeader', {"xmlns": "http://www.xces.org/ns/GrAF/1.0/", "xmlns:xlink": "http://www.w3.org/1999/xlink", "docId": "PoioAPI-" + str(random.randint(1, 1000000)), "version": standoffheader.version, "creator": getpass.getuser(), "date.created": pubDate}) filedesc = self.render_filedesc(standoffheader.filedesc) profiledesc = self.render_profiledesc(standoffheader.profiledesc) datadesc = self.render_datadesc(standoffheader.datadesc) profiledesc.append(datadesc.getchildren()[0]) profiledesc.append(datadesc.getchildren()[1]) documentheader.append(filedesc) documentheader.append(profiledesc) return documentheader
def _command_resum_rate(self, target): """ Performs 100 session resumptions with the server in order to estimate the session resumption rate. """ # Create a thread pool and process the jobs NB_THREADS = 20 MAX_RESUM = 100 thread_pool = ThreadPool() for _ in xrange(MAX_RESUM): thread_pool.add_job((self._resume_with_session_id, (target, ))) thread_pool.start(NB_THREADS) # Format session ID results (txt_resum, xml_resum) = self._format_resum_id_results(thread_pool, MAX_RESUM) # Text output cmd_title = 'Resumption Rate with Session IDs' txt_result = [self.PLUGIN_TITLE_FORMAT(cmd_title)+' '+ txt_resum[0]] txt_result.extend(txt_resum[1:]) # XML output xml_result = Element('resum_rate', title = cmd_title) xml_result.append(xml_resum) thread_pool.join() return PluginBase.PluginResult(txt_result, xml_result)
def sort_time(source): """Sort the source Element elements along their time (for annotations) and id (for relations). Returns a new Element """ dest=Element(source.tag) dest.attrib.update(source.attrib) antag=tag('annotation') reltag=tag('relation') rel=[ e for e in source if e.tag == reltag ] rel.sort(cmp_id) an=[ e for e in source if e.tag == antag ] # Pre-parse begin times for a in an: f=a.find(tag('millisecond-fragment')) if f is not None: a._begin = long(f.attrib['begin']) else: print "Error: cannot find begin time for ", a.attrib['id'] a._begin = 0 an.sort(cmp_time) for e in an: dest.append(e) for e in rel: dest.append(e) return dest
def as_xml(self): xml_result = Element(self.plugin_command, title=self.COMMAND_TITLE) if self.compression_name: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="True")) else: xml_result.append(Element('compressionMethod', type="DEFLATE", isSupported="False")) return xml_result
def makePayload(parms): """ this method takes a dictionary of parameters and inserts them into the XML payload which is POSTed to the report service. :param parms: a dict of parameters and values for the iReport :return: string, XML doc really, containing payload with parameters """ result = fromstring('''<?xml version="1.0"?> <ns2:invocationContext xmlns:ns2="http://collectionspace.org/services/common/invocable" xmlns:ns3="http://collectionspace.org/services/jaxb"> <mode>nocontext</mode> <docType>CollectionObjectTenant15</docType> <params/> </ns2:invocationContext>''') p = result.find('params') for k, v in parms: e = Element('param') key = Element('key') val = Element('value') key.text = k val.text = v e.append(key) e.append(val) p.append(e) return tostring(result)
def getItemsXML(expedition_id, category_id): """ Endpoint to return an XML List of all items associated with a certain expedition and category :param expedition_id: :param category_id: """ items = session.query(Item).filter_by( expedition_id=expedition_id, category_id=category_id).all() root = Element('allItems') comment = Comment('XML Endpoint Listing ' 'all Item for a specific Category and Expedition') root.append(comment) for i in items: ex = SubElement(root, 'expedition') ex.text = i.expedition.title category_name = SubElement(ex, 'category_name') category_description = SubElement(category_name, 'category_description') category_picture = SubElement(category_name, 'category_picture') category_name.text = i.category.name category_description.text = i.category.description category_picture.text = i.category.picture item_name = SubElement(category_name, 'item_name') item_decription = SubElement(item_name, 'item_description') item_picture = SubElement(item_name, 'item_picture') item_name.text = i.name item_decription.text = i.description item_picture.text = i.picture print tostring(root) return app.response_class(tostring(root), mimetype='application/xml')
def _get_metadata_node(location): node = Element('location_data') for key, value in location.metadata.items(): element = Element(key) element.text = value node.append(element) return node
def process_task(self, target, command, args): ctSSL_initialize() try: (can_reneg, is_secure) = self._test_renegotiation(target) finally: ctSSL_cleanup() # Text output reneg_txt = 'Honored' if can_reneg else 'Rejected' secure_txt = 'Supported' if is_secure else 'Not supported' cmd_title = 'Session Renegotiation' txt_result = [self.PLUGIN_TITLE_FORMAT.format(cmd_title)] RENEG_FORMAT = ' {0:<35} {1}' txt_result.append(RENEG_FORMAT.format('Client-initiated Renegotiations:', reneg_txt)) txt_result.append(RENEG_FORMAT.format('Secure Renegotiation: ', secure_txt)) # XML output xml_reneg_attr = {'canBeClientInitiated' : str(can_reneg), 'isSecure' : str(is_secure)} xml_reneg = Element('sessionRenegotiation', attrib = xml_reneg_attr) xml_result = Element(command, title = cmd_title) xml_result.append(xml_reneg) return PluginBase.PluginResult(txt_result, xml_result)
def execute(self, mappings, source): """Writes the given language code/name mappings to Android XML resource files. source = string indicating source of the data, for example, 'cldr' mappings = list of dictionaries containing mappings""" # In order to be able to to localize a particular, limited set of words across multiple # languages, here we define a list of language codes to support for every resource file # generated. Where localized language names are missing, a place holder is printed. If # ['*'] is specified, then all available language code/name pairs are generated. COVERAGE_LIST = ['*'] # Get language names in English as a dict for inclusion in XML comments english_pairs = {} for entry in mappings: for k, v in entry.iteritems(): if k == 'en': english_pairs = v break for entry in mappings: for k, v in entry.iteritems(): dir = os.path.join(os.path.dirname(__file__), self.get_directory(source) + "../" + source + "-android/values-" + k) if not os.path.exists(dir): os.makedirs(dir) with open(dir + "/arrays.xml", "w") as f: top = Element('resources') if k in english_pairs.keys(): top_comment = ElementTree.Comment(' ' + english_pairs[k].decode('utf-8') + ' (' + k + ') ') else: top_comment = ElementTree.Comment(' ' + k + ' ') top.append(top_comment) child = SubElement(top, 'string-array') child.attrib['name'] = 'languages_all' if '*' not in COVERAGE_LIST: # Iterate through only those codes in COVERAGE_LIST for lang_code in COVERAGE_LIST: if lang_code in english_pairs.keys(): comment = ElementTree.Comment(' ' + lang_code + ' - ' + english_pairs[lang_code].decode('utf-8') + ' ') else: comment = ElementTree.Comment(' ' + lang_code + ' ') child.append(comment) entry = SubElement(child, 'item') if lang_code in v.keys(): entry.text = v[lang_code].decode('utf-8') else: entry.text = "UNDEFINED" else: # Iterate through all available language codes for lang_code, lang_name in sorted(v.iteritems()): if lang_code in english_pairs.keys(): comment = ElementTree.Comment(' ' + lang_code + ' - ' + english_pairs[lang_code].decode('utf-8') + ' ') else: comment = ElementTree.Comment(' ' + lang_code + ' ') child.append(comment) entry = SubElement(child, 'item') entry.text = lang_name.decode('utf-8') f.write(self.prettify(top))
def _saveTracks(self, tracks): element = Element("tracks") for track in tracks: track_element = self._saveTrack(track) element.append(track_element) return element
def _build_gpx_rte(self, route): rte = Element(ns + "rte") rte = self._dict2gpx(rte, route.properties) rte = self._extensions2gpx(rte, route.extensions) for routept in route.rtepts: rte.append(self._build_gpx_wpt(routept, tag="rtept")) return rte
def insertTimes(currList, listElem, origin): for stop in listElem.getchildren(): # Get stop distance from origin sList = list(Stop.objects.filter(tag=stop.get('tag'))) stop_point = sList[0].get_point() flag = False #Add title to the stop dictionary stop_title = sList[0].get_title().__str__() stop_list_node = Element('stop_list') stop_list_node.append(stop) appendItem = {'stop':stop_list_node, 'title':stop_title, 'distance':vincenty(stop_point, origin).mi, 'point': stop_point, 'tag':stop.get('tag'), 'route':stop.get('route')} #print 'Append: {0}'.format(appendItem) #Where does it go for curr in currList: #print 'Stop: {0}'.format(stop) if curr['distance'] == vincenty(stop_point, origin).mi: #If distance is the same, && Location is the same ( Same stop title) # Then add stop the list of curr['stop'] if curr['title'] == stop_title: curr['stop'].append(stop) #Otherwise insert into list as new entry before current. else: currList.insert(currList.index(curr), appendItem) flag = True break # If current in list is closer than new stop, add new stop before current and break. elif curr['distance'] > stop_point.distance(origin): currList.insert(currList.index(curr), appendItem) flag = True break if flag is False: currList.append(appendItem) return currList
def to_xml(self): tier_dtos = Element("tierDtos") min_num_inst = SubElement(tier_dtos, "minimumNumberInstances") min_num_inst.text = self.tier_num_min ini_num_inst = SubElement(tier_dtos, "initialNumberInstances") ini_num_inst.text = self.tier_num_initial max_mum_inst = SubElement(tier_dtos, "maximumNumberInstances") max_mum_inst.text = self.tier_num_max name_tier = SubElement(tier_dtos, "name") name_tier.text = self.name image_tier = SubElement(tier_dtos, "image") image_tier.text = self.tier_image flavour_tier = SubElement(tier_dtos, "flavour") flavour_tier.text = self.tier_flavour keypair = SubElement(tier_dtos, "keypair") keypair.text = self.tier_keypair floating_ip = SubElement(tier_dtos, "floatingip") floating_ip.text = self.tier_floatingip if self.products: for product in self.products: prod = product.to_product_xml_env() tier_dtos.append(prod) return tier_dtos
def visitClass(self, node, parent = None): # build the class element element = Element('class') element.set('name', node.name) element.set('line', str(node.lineno)) # get the end of the class def walk(n,e): for c in n.getChildNodes(): if c.lineno > e: e = c.lineno e = walk(c,e) return e endline = walk(node, node.lineno) element.set('endline', str(endline)) # add class docs if node.doc: docElement = Element('doc') docElement.text = node.doc element.append(docElement) # add ourselves to the hierarchy self.append(parent, element) # walk our children, now we are the parent self.walkChildren(node, element)
def server_scan_completed(self, server_scan_result: CompletedServerScan) -> None: # Add server info server_info = server_scan_result.server_info target_attrib = {'host': server_info.hostname, 'port': str(server_info.port), 'tlsWrappedProtocol': self.TLS_PROTOCOL_XML_TEXT[server_info.tls_wrapped_protocol]} # Add proxy settings if server_info.http_tunneling_settings: target_attrib['httpsTunnelHostname'] = server_info.http_tunneling_settings.hostname target_attrib['httpsTunnelPort'] = str(server_info.http_tunneling_settings.port) elif server_info.ip_address: # We only know the IP if we're not scanning through a proxy target_attrib['ip'] = server_info.ip_address else: raise RuntimeError('Should never happen') server_scan_node = Element('target', attrib=target_attrib) server_scan_result.plugin_result_list.sort(key=lambda result: result.scan_command.get_cli_argument()) # Add each plugins's XML output for plugin_result in server_scan_result.plugin_result_list: server_scan_node.append(plugin_result.as_xml()) self._xml_results_node.append(server_scan_node)
def convert_to_xml(self): test_suites = Element('testsuites') for suite in self.suites: test_suites.append(suite.convert_to_xml()) return test_suites
def to_xml(self): '''write file: tree.write("output.xml",encoding="ISO-8859-1", method="xml") ''' survey = Element('survey') title = SubElement(survey,'title') title.text = self.title description = SubElement(survey,'description') description.text = self.description # startDate = SubElement(survey,'startDate') # startDate.text = str(self.startDate) # endDate = SubElement(survey,'endDate') # endDate.text = str(self.endDate) maxNumberRespondents = SubElement(survey,'maxNumberRespondents') maxNumberRespondents.text = str(self.maxNumberRespondents) duration = SubElement(survey,'duration') duration.text = str(self.duration) for consent in self.consents: survey.append(consent.to_xml()) for section in self.sections: survey.append(section.to_xml()) tree = ET.ElementTree(survey) return tree
def _saveTimelineObjects(self, timeline_objects): element = Element("timeline-objects") for timeline_object in timeline_objects: timeline_object_element = self._saveTimelineObject(timeline_object) element.append(timeline_object_element) return element
def main(): parser = argparse.ArgumentParser(description='Variable Audit Tool') #Output debug log parser.add_argument("-l", dest="OutputLog", help="Create an output log file: ie -l out.txt", default=None) parser.add_argument("--OutputXml", dest="OutputXml", help="Output Xml file that contains final results", default=None) parser.add_argument("--InputXml", dest="InputXml", help="Input Xml file", default=None) #Turn on dubug level logging parser.add_argument("--debug", action="store_true", dest="debug", help="turn on debug logging level for file log", default=False) options = parser.parse_args() #setup file based logging if outputReport specified if(options.OutputLog): if(len(options.OutputLog) < 2): logging.critical("the output log file parameter is invalid") return -2 else: #setup file based logging filelogger = logging.FileHandler(filename=options.OutputLog, mode='w') if(options.debug): filelogger.setLevel(logging.DEBUG) else: filelogger.setLevel(logging.INFO) filelogger.setFormatter(formatter) logging.getLogger('').addHandler(filelogger) logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p" )) #Check for required input parameters if(not options.InputXml) or (not os.path.isfile(options.InputXml)): logging.critical("No Input Xml file specified") return -1 if(not options.OutputXml): logging.critical("Output Xml file path not specified") return -2 Uefi = UefiVariable() #read in XML file as doc XmlFile = ET.parse(options.InputXml) XmlRoot = XmlFile.getroot() for var in XmlRoot.findall("Variable"): name = var.get("Name") guid = var.get("Guid") (ReadStatus, Data, ReadErrorString) = Uefi.GetUefiVar(name, guid) (WriteSuccess, ErrorCode, WriteErrorString)= Uefi.SetUefiVar(name, guid) if(WriteSuccess != 0): logging.info("Must Restore Var %s:%s" % (name, guid)) (RestoreSuccess, RestoreEC, RestoreErrorString) = Uefi.SetUefiVar(name, guid, Data) if (RestoreSuccess == 0): logging.critical("Restoring failed for Var %s:%s 0x%X ErrorCode: 0x%X %s" % (name, guid, RestoreSuccess, RestoreEC, RestoreErrorString)) #append #<FromOs> #<ReadStatus>0x0 Success</ReadStatus> #<WriteStatus>0x8000000000000002 Invalid Parameter</WriteStatus> ele = Element("FromOs") rs = Element("ReadStatus") ws = Element("WriteStatus") rs.text = "0x%lX" % (ReadStatus) if(ReadErrorString is not None): rs.text = rs.text + " %s" % ReadErrorString ws.text = "0x%lX" % ErrorCode if(WriteErrorString is not None): ws.text = ws.text + " %s" % WriteErrorString ele.append(rs) ele.append(ws) var.append(ele) XmlFile.write(options.OutputXml) return 0
from xml.etree.ElementTree import Element, dump, SubElement note = Element('note') to = Element('to') #자식 노드 to.text = "Tove" note.append(to) from_tag = SubElement(note, "from_tag") from_tag.text = "Jani" #SubElement는 어떤 태그를 만드는 동시에 자식노드를 추가한다. # note라는 태그 밑에 from이라는 이름의 자식노드를 추가하고, Jani라는 택트를 추가한다. # append를 활용하는 것과 동일한 결과 dump(note)
def main(): #--PLUGINS INITIALIZATION-- start_time = time() print '\n\n\n' + _format_title('Registering available plugins') sslyze_plugins = PluginsFinder() available_plugins = sslyze_plugins.get_plugins() available_commands = sslyze_plugins.get_commands() print '' for plugin in available_plugins: print ' ' + plugin.__name__ print '\n\n' # Create the command line parser and the list of available options sslyze_parser = CommandLineParser(available_plugins, PROJECT_VERSION) try: # Parse the command line (command_list, target_list, shared_settings) = sslyze_parser.parse_command_line() except CommandLineParsingError as e: print e.get_error_msg() return #--PROCESSES INITIALIZATION-- # Three processes per target from MIN_PROCESSES up to MAX_PROCESSES nb_processes = max(MIN_PROCESSES, min(MAX_PROCESSES, len(target_list)*3)) if command_list.https_tunnel: nb_processes = 1 # Let's not kill the proxy task_queue = JoinableQueue() # Processes get tasks from task_queue and result_queue = JoinableQueue() # put the result of each task in result_queue # Spawn a pool of processes, and pass them the queues process_list = [] for _ in xrange(nb_processes): priority_queue = JoinableQueue() # Each process gets a priority queue p = WorkerProcess(priority_queue, task_queue, result_queue, available_commands, \ shared_settings) p.start() process_list.append((p, priority_queue)) # Keep track of each process and priority_queue #--TESTING SECTION-- # Figure out which hosts are up and fill the task queue with work to do print _format_title('Checking host(s) availability') targets_OK = [] targets_ERR = [] # Each server gets assigned a priority queue for aggressive commands # so that they're never run in parallel against this single server cycle_priority_queues = cycle(process_list) target_results = ServersConnectivityTester.test_server_list(target_list, shared_settings) for target in target_results: if target is None: break # None is a sentinel here # Send tasks to worker processes targets_OK.append(target) (_, current_priority_queue) = cycle_priority_queues.next() for command in available_commands: if getattr(command_list, command): args = command_list.__dict__[command] if command in sslyze_plugins.get_aggressive_commands(): # Aggressive commands should not be run in parallel against # a given server so we use the priority queues to prevent this current_priority_queue.put( (target, command, args) ) else: # Normal commands get put in the standard/shared queue task_queue.put( (target, command, args) ) for exception in target_results: targets_ERR.append(exception) print ServersConnectivityTester.get_printable_result(targets_OK, targets_ERR) print '\n\n' # Put a 'None' sentinel in the queue to let the each process know when every # task has been completed for (proc, priority_queue) in process_list: task_queue.put(None) # One sentinel in the task_queue per proc priority_queue.put(None) # One sentinel in each priority_queue # Keep track of how many tasks have to be performed for each target task_num=0 for command in available_commands: if getattr(command_list, command): task_num+=1 # --REPORTING SECTION-- processes_running = nb_processes # XML output xml_output_list = [] # Each host has a list of results result_dict = {} for target in targets_OK: result_dict[target] = [] # If all processes have stopped, all the work is done while processes_running: result = result_queue.get() if result is None: # Getting None means that one process was done processes_running -= 1 else: # Getting an actual result (target, command, plugin_result) = result result_dict[target].append((command, plugin_result)) if len(result_dict[target]) == task_num: # Done with this target # Print the results and update the xml doc print _format_txt_target_result(target, result_dict[target]) if shared_settings['xml_file']: xml_output_list.append(_format_xml_target_result(target, result_dict[target])) result_queue.task_done() # --TERMINATE-- # Make sure all the processes had time to terminate task_queue.join() result_queue.join() #[process.join() for process in process_list] # Causes interpreter shutdown errors exec_time = time()-start_time # Output XML doc to a file if needed if shared_settings['xml_file']: result_xml_attr = {'httpsTunnel':str(shared_settings['https_tunnel_host']), 'totalScanTime' : str(exec_time), 'defaultTimeout' : str(shared_settings['timeout']), 'startTLS' : str(shared_settings['starttls'])} result_xml = Element('results', attrib = result_xml_attr) # Sort results in alphabetical order to make the XML files (somewhat) diff-able xml_output_list.sort(key=lambda xml_elem: xml_elem.attrib['host']) for xml_element in xml_output_list: result_xml.append(xml_element) xml_final_doc = Element('document', title = "SSLyze Scan Results", SSLyzeVersion = PROJECT_VERSION, SSLyzeWeb = PROJECT_URL) # Add the list of invalid targets xml_final_doc.append(ServersConnectivityTester.get_xml_result(targets_ERR)) # Add the output of the plugins xml_final_doc.append(result_xml) # Hack: Prettify the XML file so it's (somewhat) diff-able xml_final_pretty = minidom.parseString(tostring(xml_final_doc, encoding='UTF-8')) with open(shared_settings['xml_file'],'w') as xml_file: xml_file.write(xml_final_pretty.toprettyxml(indent=" ", encoding="utf-8" )) print _format_title('Scan Completed in {0:.2f} s'.format(exec_time))
# stduent Element에서 subelement인 practicable_computer_languages 생성 practicable = SubElement(student, "practicable_computer_languages") while True: print("사용 가능한 컴퓨터 언어를 입력하세요.") add_lang_name = input(" > 언어 이름(종료는 'Enter' 입력): ") if add_lang_name == '': break add_lang_period = input(" > 학습 기간(년/개월 단위): ") add_lang_level = input(" > 수준(상,중,하): ") # 개발언어, 수준의 속성을 가지는 language Element 생성하고 값 삽입 language = Element("language", name=add_lang_name, level=add_lang_level) # 개발기간의 속성을 가지는 period Element 생성하고 값 삽입 period = Element("period", value=add_lang_period) language.append( period) # period Element를 language Element의 하위 element로 연결 practicable.append( language ) # language Element를 practicable_computer_lanuguages의 하위 element로 연결 root.append(student) ElementTree(root).write("students_info.xml") elif menu_input == 3: # 조회 while True: read_menu_input = input( "<조회 서브 메뉴>\n1. 개별 학생 조회\n2. 전체 학생 조회\n3. 상위 메뉴\n4. 종료\n메뉴 입력 : " ) if read_menu_input == '1': while True: saved_number = []
def handle(self, *args, **options): super(Command, self).handle(self, *args, **options) if len(self.selected_dbs) != 1: self.stdout.write( self.style.ERROR( 'You need to select exactly one database for this command') ) return selected_db = self.selected_dbs[0] nodes = Element('nodes') edges = Element('edges') authors = set() self.stdout.write("Processing NewsItems") queryset = Newsitem.objects.using(selected_db).only('id').all() self.pbar_setup(maxval=queryset.count()) for newsitem in queryset_iterator(queryset, chunksize=10000): author_id = "A{}".format(newsitem.idauthor) nodes.append( Element('node', attrib={ 'id': author_id, 'label': "Author {}".format(newsitem.idauthor), })) authors.add(author_id) self.pbar_increment() self.pbar_destroy() self.stdout.write("Processing Comments") queryset = Comment.objects.using(selected_db)\ .all() edge_id = 0 self.pbar_setup(maxval=queryset.count()) for comment in queryset_iterator(queryset, chunksize=10000): author_id = "A{}".format(comment.authorid) if not author_id in authors: nodes.append( Element('node', attrib={ 'id': author_id, 'label': "Author {}".format(comment.authorid), })) authors.add(author_id) if comment.parent_id: target_id = author_id else: try: target_id = "A{}".format(comment.newsitem.idauthor) except Newsitem.DoesNotExist: continue edges.append( Element('edge', attrib={ 'id': str(edge_id), 'source': author_id, 'target': target_id, 'type': 'directed' })) edge_id += 1 self.pbar_increment() self.pbar_destroy() self.stdout.write("Finishing...") graph = Element('graph', attrib={ 'mode': 'dynamic', 'defaultedgetype': 'directed', }) graph.append(nodes) graph.append(edges) meta = Element( 'meta', attrib={'lastmodifieddate': datetime.date.today().isoformat()}) meta.append(Element('creator', text="Iris Steenhout")) meta.append(Element('description', text="Newsitems and it's comments.")) gexf = Element('gexf', attrib={ 'xmlns': 'http://www.gexf.net/1.2draft', 'version': '1.2', }) gexf.append(meta) gexf.append(graph) self.stdout.write("Writing...") options['output'].write(tostring(gexf, encoding="UTF-8")) options['output'].close() self.stdout.write(self.style.SUCCESS('Command executed succesfully'))
# -*- coding: utf-8 -*- """ 创建xml文件的两种方法 tree= ElementTree.parse("路径") tree = ElementTree.ElementTree(根节点(Element对象)) """ from xml.etree.ElementTree import Element, ElementTree from xml.etree.ElementTree import tostring e = Element('Data') # 创建一个元素 e.set('name', 'abc') # 为元素添加name属性,并指定name属性值为'abc' e.text = '123' # 为元素添加文本内容 print( tostring(e) ) # 将xml转化为字符串文本,但是不含'\n''\t':b'<Data name="abc">123</Data>',导致大文本的xml可读性差 e2 = Element('Row') e3 = Element('Open') e3.text = '8.80' e2.append(e3) # 将e3添加到e2的子元素 e.append(e2) # 将e2添加到e的子元素 print(tostring(e)) et = ElementTree(e) # 生成ElementTree树结构,只需传入根节点即可 et.write('demo02.xml') # 将树结构写文件即可
def etree(cls): root = Element("MFACHALLENGERS") root.append(MfachallengeTestCase.etree) root.append(MfachallengeTestCase.etree) return root
def process_task(self, target, command, arg): if arg == 'basic': txt_output_generator = self._get_basic_text elif arg == 'full': txt_output_generator = self._get_full_text else: raise Exception("PluginCertInfo: Unknown command.") (host, _, _, _) = target thread_pool = ThreadPool() if 'ca_file' in self._shared_settings and self._shared_settings[ 'ca_file']: AVAILABLE_TRUST_STORES[self._shared_settings['ca_file']] = ( 'Custom --ca_file', 'N/A') for (store_path, _) in AVAILABLE_TRUST_STORES.iteritems(): # Try to connect with each trust store thread_pool.add_job((self._get_cert, (target, store_path))) # Start processing the jobs thread_pool.start(len(AVAILABLE_TRUST_STORES)) # Store the results as they come x509_cert_chain = [] (verify_dict, verify_dict_error, x509_cert, ocsp_response) = ({}, {}, None, None) for (job, result) in thread_pool.get_result(): (_, (_, store_path)) = job (x509_cert_chain, verify_str, ocsp_response) = result # Store the returned verify string for each trust store x509_cert = x509_cert_chain[ 0] # First cert is always the leaf cert store_info = AVAILABLE_TRUST_STORES[store_path] verify_dict[store_info] = verify_str if x509_cert is None: # This means none of the connections were successful. Get out for (job, exception) in thread_pool.get_error(): raise exception # Store thread pool errors for (job, exception) in thread_pool.get_error(): (_, (_, store_path)) = job error_msg = str( exception.__class__.__name__) + ' - ' + str(exception) store_info = AVAILABLE_TRUST_STORES[store_path] verify_dict_error[store_info] = error_msg thread_pool.join() # Results formatting # Text output - certificate info text_output = [self.PLUGIN_TITLE_FORMAT('Certificate - Content')] text_output.extend(txt_output_generator(x509_cert)) # Text output - trust validation text_output.extend( ['', self.PLUGIN_TITLE_FORMAT('Certificate - Trust')]) # Hostname validation if self._shared_settings['sni']: text_output.append( self.FIELD_FORMAT("SNI enabled with virtual domain:", self._shared_settings['sni'])) # TODO: Use SNI name for validation when --sni was used host_val_dict = { X509_NAME_MATCHES_SAN: 'OK - Subject Alternative Name matches', X509_NAME_MATCHES_CN: 'OK - Common Name matches', X509_NAME_MISMATCH: 'FAILED - Certificate does NOT match ' + host } text_output.append( self.FIELD_FORMAT("Hostname Validation:", host_val_dict[x509_cert.matches_hostname(host)])) # Path validation that was successful for ((store_name, store_version), verify_str) in verify_dict.iteritems(): verify_txt = 'OK - Certificate is trusted' if (verify_str in 'ok') \ else 'FAILED - Certificate is NOT Trusted: ' + verify_str # EV certs - Only Mozilla supported for now if (verify_str in 'ok') and ('Mozilla' in store_info): if self._is_ev_certificate(x509_cert): verify_txt += ', Extended Validation' text_output.append( self.FIELD_FORMAT( self.TRUST_FORMAT(store_name=store_name, store_version=store_version), verify_txt)) # Path validation that ran into errors for ((store_name, store_version), error_msg) in verify_dict_error.iteritems(): verify_txt = 'ERROR: ' + error_msg text_output.append( self.FIELD_FORMAT( self.TRUST_FORMAT(store_name=store_name, store_version=store_version), verify_txt)) # Print the Common Names within the certificate chain cns_in_cert_chain = [] for cert in x509_cert_chain: cert_identity = self._extract_subject_cn_or_oun(cert) cns_in_cert_chain.append(cert_identity) text_output.append( self.FIELD_FORMAT('Certificate Chain Received:', str(cns_in_cert_chain))) # Text output - OCSP stapling text_output.extend( ['', self.PLUGIN_TITLE_FORMAT('Certificate - OCSP Stapling')]) text_output.extend(self._get_ocsp_text(ocsp_response)) # XML output xml_output = Element(command, argument=arg, title='Certificate Information') # XML output - certificate chain: always return the full certificate for each cert in the chain cert_chain_xml = Element('certificateChain') # First add the leaf certificate cert_chain_xml.append( self._format_cert_to_xml(x509_cert_chain[0], 'leaf', self._shared_settings['sni'])) # Then add every other cert in the chain for cert in x509_cert_chain[1:]: cert_chain_xml.append( self._format_cert_to_xml(cert, 'intermediate', self._shared_settings['sni'])) xml_output.append(cert_chain_xml) # XML output - trust trust_validation_xml = Element('certificateValidation') # Hostname validation is_hostname_valid = 'False' if (x509_cert.matches_hostname(host) == X509_NAME_MISMATCH) else 'True' host_validation_xml = Element( 'hostnameValidation', serverHostname=host, certificateMatchesServerHostname=is_hostname_valid) trust_validation_xml.append(host_validation_xml) # Path validation - OK for ((store_name, store_version), verify_str) in verify_dict.iteritems(): path_attrib_xml = { 'usingTrustStore': store_name, 'trustStoreVersion': store_version, 'validationResult': verify_str } # EV certs - Only Mozilla supported for now if (verify_str in 'ok') and ('Mozilla' in store_info): path_attrib_xml['isExtendedValidationCertificate'] = str( self._is_ev_certificate(x509_cert)) trust_validation_xml.append( Element('pathValidation', attrib=path_attrib_xml)) # Path validation - Errors for ((store_name, store_version), error_msg) in verify_dict_error.iteritems(): path_attrib_xml = { 'usingTrustStore': store_name, 'trustStoreVersion': store_version, 'error': error_msg } trust_validation_xml.append( Element('pathValidation', attrib=path_attrib_xml)) xml_output.append(trust_validation_xml) # XML output - OCSP Stapling if ocsp_response is None: ocsp_attr_xml = {'isSupported': 'False'} else: ocsp_attr_xml = {'isSupported': 'True'} ocsp_xml = Element('ocspStapling', attrib=ocsp_attr_xml) if ocsp_response: try: ocsp_resp_trusted = str( ocsp_response.verify(MOZILLA_STORE_PATH)) except OpenSSLError as e: if 'certificate verify error' in str(e): ocsp_resp_trusted = 'False' else: raise ocsp_resp_attr_xml = { 'isTrustedByMozillaCAStore': ocsp_resp_trusted } ocsp_resp_xmp = Element('ocspResponse', attrib=ocsp_resp_attr_xml) for (key, value) in ocsp_response.as_dict().items(): ocsp_resp_xmp.append(_keyvalue_pair_to_xml(key, value)) ocsp_xml.append(ocsp_resp_xmp) xml_output.append(ocsp_xml) return PluginBase.PluginResult(text_output, xml_output)
def add_edges(self, G, graph_element): def edge_key_data(G): # helper function to unify multigraph and graph edge iterator if G.is_multigraph(): for u, v, key, data in G.edges(data=True, keys=True): edge_data = data.copy() edge_data.update(key=key) edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data else: for u, v, data in G.edges(data=True): edge_data = data.copy() edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data edges_element = Element("edges") for u, v, key, edge_data in edge_key_data(G): kw = {"id": str(key)} try: edge_weight = edge_data.pop("weight") kw["weight"] = str(edge_weight) except KeyError: pass try: edge_type = edge_data.pop("type") kw["type"] = str(edge_type) except KeyError: pass try: start = edge_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = edge_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass source_id = str(G.nodes[u].get("id", u)) target_id = str(G.nodes[v].get("id", v)) edge_element = Element("edge", source=source_id, target=target_id, **kw) default = G.graph.get("edge_default", {}) if self.VERSION == "1.1": edge_data = self.add_slices(edge_element, edge_data) else: edge_data = self.add_spells(edge_element, edge_data) edge_data = self.add_viz(edge_element, edge_data) edge_data = self.add_attributes("edge", edge_element, edge_data, default) edges_element.append(edge_element) graph_element.append(edges_element)
def createXML(sources, startFrame, endFrame, fps, timecode, audioRate, ardourBasename, audiosFolder): '''Creates full Ardour XML to be written to a file''' global idCounter sources, repeated, tracks, idCounter = getAudioTimeline(audioRate, fps) tracks = sorted(set(tracks))[::-1] sampleFormat = checkSampleFormat() ardourStart = toSamples((startFrame - 1), audioRate, fps) ardourEnd = toSamples((endFrame - 1), audioRate, fps) ######## ------------------------------------------------------------------ ######## STATIC XML SECTIONS ######## ------------------------------------------------------------------ Session = Element("Session") # XML root = Session tree = ElementTree(Session) # Create Session Elements + Attributes xmlSections = [ "Config", "Metadata", "Sources", "Regions", "Locations", "Bundles", "Routes", "Playlists", "UnusedPlaylists", "RouteGroups", "Click", "Speakers", "TempoMap", "ControlProtocols", "Extra" ] for section in xmlSections: Session.append(Element(section)) # Create Option, IO, Tempo and Meter + Attributes for counter in range( valLength(atOption(audiosFolder, sampleFormat, timecode))): Option = SubElement(Session[0], "Option") # Session > Config > Option createSubElementsMulti(Option, atOption(audiosFolder, sampleFormat, timecode), counter) Location = SubElement(Session[4], "Location") # Session > Locations > Location IO = SubElement(Session[10], "IO") # Session > Click > IO Tempo = SubElement(Session[12], "Tempo") # Session > TempoMap > Tempo Meter = SubElement(Session[12], "Meter") # Session > TempoMap > Meter createSubElements(Session, atSession(audioRate, ardourBasename, idCounter)) createSubElements(Location, atLocation(ardourStart, ardourEnd, idCounter)) idCounter += 1 createSubElements(IO, atIO(idCounter)) idCounter += 1 createSubElements(Tempo, atTempo()) createSubElements(Meter, atMeter()) Port = "" for counter in range(valLength(atPort())): Port = SubElement(IO, "Port") # Session > Click > IO > Port createSubElementsMulti(Port, atPort(), counter) ######## ------------------------------------------------------------------ ######## DYNAMIC XML SECTIONS ######## ------------------------------------------------------------------ # create sources and sources' regions for source in sources: createAudioSources(Session, source) # create another sources' entry for stereo files stereoSources = [] for source in sources: if (source['channels'] == 1): source['id'] = int(source['id'] + idCounter) createAudioSources(Session, source, 1) stereoSources.append(source) idCounter += 1 # create playlists (tracks) for track in tracks: createPlaylists(Session, idCounter, track) # correct reference to master-source-0 and source-0 in repeated audios for rep in repeated: for sour in sources: if (rep['name'] == sour['name']): rep['sourceID'] = sour['id'] # create playlists regions (timeline) for audio in (sources + repeated): track = tracks.index(audio['track']) if (audio['channels'] == 0): createPlaylistRegions(Session, idCounter, audio, track) else: for stereos in stereoSources: if (audio['name'] == stereos['name']): audio['master-source-1'] = stereos['id'] audio['source-1'] = stereos['id'] createPlaylistRegions(Session, idCounter, audio, track) Session.set('id-counter', str(idCounter)) return Session, sources
def VOCify(self, outputdir, imgh=540, imgw=960): untaggedImg = [] noimgTag = [] train_img_dir = "{}/{}".format(outputdir, "train_img") train_annot_dir = "{}/{}".format(outputdir, "train_annot") valid_img_dir = "{}/{}".format(outputdir, "valid_img") valid_annot_dir = "{}/{}".format(outputdir, "valid_annot") shutil.rmtree(outputdir) mkdir(outputdir) mkdir(train_img_dir) mkdir(train_annot_dir) mkdir(valid_img_dir) mkdir(valid_annot_dir) for tag in self.tags: newtag = self.defTree imgname = "{}/ID{}.jpg".format(self.picdir, str(tag["id"]).zfill(10)) xmlname = "{}/ID{}.xml".format(train_annot_dir, str(tag["id"]).zfill(10)) if imgname not in self.imgs: untaggedImg.append(imgname) break shutil.copyfile( imgname, train_img_dir + "/ID{}.jpg".format(str(tag["id"]).zfill(10))) tagdata = self.client.find({'_id': tag['_id']}).next() print(tagdata['id']) newtag.find("folder").text = "train_img" newtag.find("filename").text = "ID{}.jpg".format( str(tag["id"]).zfill(10)) newtag.find("path").text = train_img_dir + "/ID{}.jpg".format( str(tag["id"]).zfill(10)) newtag.find("size").find("width").text = str(imgw) newtag.find("size").find("height").text = str(imgh) for i in range(len(tagdata["xpos"])): newobj = Element("object") newobj.append(Element("name")) newobj.append(Element("pose")) newobj.append(Element("truncated")) newobj.append(Element("difficult")) newobj.find("name").text = "person" + str(i) newobj.find("pose").text = "unspecified" newobj.find("truncated").text = "0" newobj.find("difficult").text = "0" newobj.find("name").text = "person" + str(i) BB = Element("bndbox") BB.append(Element("xmin")) BB.append(Element("ymin")) BB.append(Element("xmax")) BB.append(Element("ymax")) newobj.append(BB) BB.find("xmin").text = str(tagdata["xpos"][i] - tagdata["w"][i] / 2) BB.find("ymin").text = str(tagdata["ypos"][i] - tagdata["h"][i] / 2) BB.find("xmax").text = str(tagdata["xpos"][i] + tagdata["w"][i] / 2) BB.find("ymax").text = str(tagdata["ypos"][i] + tagdata["h"][i] / 2) indent(newobj) newtag.getroot().append(newobj) newtag.write(xmlname, xml_declaration=True, method="xml") return untaggedImg
def add_attributes(self, node_or_edge, xml_obj, data, default): # Add attrvalues to node or edge attvalues = Element("attvalues") if len(data) == 0: return data mode = "static" for k, v in data.items(): # rename generic multigraph key to avoid any name conflict if k == "key": k = "networkx_key" val_type = type(v) if val_type not in self.xml_type: raise TypeError("attribute value type is not allowed: %s" % val_type) if isinstance(v, list): # dynamic data for val, start, end in v: val_type = type(val) if start is not None or end is not None: mode = "dynamic" self.alter_graph_mode_timeformat(start) self.alter_graph_mode_timeformat(end) break attr_id = self.get_attr_id( str(k), self.xml_type[val_type], node_or_edge, default, mode ) for val, start, end in v: e = Element("attvalue") e.attrib["for"] = attr_id e.attrib["value"] = str(val) # Handle nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" if start is not None: e.attrib["start"] = str(start) if end is not None: e.attrib["end"] = str(end) attvalues.append(e) else: # static data mode = "static" attr_id = self.get_attr_id( str(k), self.xml_type[val_type], node_or_edge, default, mode ) e = Element("attvalue") e.attrib["for"] = attr_id if isinstance(v, bool): e.attrib["value"] = str(v).lower() else: e.attrib["value"] = str(v) # Handle float nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" attvalues.append(e) xml_obj.append(attvalues) return data
# ElementTree_csv_to_xml.py import csv from xml.etree.ElementTree import (Element, SubElement, Comment) import datetime from ElementTree_pretty import prettify generated_on = str(datetime.datetime.now()) # Configura un attributo con set() root = Element('opml') root.set('version', '1.0') root.append(Comment('Generato da ElementTree_csv_to_xml.py per PyMOTW-it')) head = SubElement(root, 'head') title = SubElement(head, 'title') title.text = 'My Podcasts' dc = SubElement(head, 'dateCreated') dc.text = generated_on dm = SubElement(head, 'dateModified') dm.text = generated_on body = SubElement(root, 'body') with open('podcasts.csv', 'rt') as f: current_group = None reader = csv.reader(f) for row in reader: group_name, podcast_name, xml_url, html_url = row if current_group is None or group_name != current_group.text:
class GEXFWriter(GEXF): # class for writing GEXF format files # use write_gexf() function def __init__( self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft" ): self.prettyprint = prettyprint self.encoding = encoding self.set_version(version) self.xml = Element( "gexf", { "xmlns": self.NS_GEXF, "xmlns:xsi": self.NS_XSI, "xsi:schemaLocation": self.SCHEMALOCATION, "version": self.VERSION, }, ) # Make meta element a non-graph element # Also add lastmodifieddate as attribute, not tag meta_element = Element("meta") subelement_text = "NetworkX {}".format(nx.__version__) SubElement(meta_element, "creator").text = subelement_text meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d")) self.xml.append(meta_element) register_namespace("viz", self.NS_VIZ) # counters for edge and attribute identifiers self.edge_id = itertools.count() self.attr_id = itertools.count() self.all_edge_ids = set() # default attributes are stored in dictionaries self.attr = {} self.attr["node"] = {} self.attr["edge"] = {} self.attr["node"]["dynamic"] = {} self.attr["node"]["static"] = {} self.attr["edge"]["dynamic"] = {} self.attr["edge"]["static"] = {} if graph is not None: self.add_graph(graph) def __str__(self): if self.prettyprint: self.indent(self.xml) s = tostring(self.xml).decode(self.encoding) return s def add_graph(self, G): # first pass through G collecting edge ids for u, v, dd in G.edges(data=True): eid = dd.get("id") if eid is not None: self.all_edge_ids.add(str(eid)) # set graph attributes if G.graph.get("mode") == "dynamic": mode = "dynamic" else: mode = "static" # Add a graph element to the XML if G.is_directed(): default = "directed" else: default = "undirected" name = G.graph.get("name", "") graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name) self.graph_element = graph_element self.add_nodes(G, graph_element) self.add_edges(G, graph_element) self.xml.append(graph_element) def add_nodes(self, G, graph_element): nodes_element = Element("nodes") for node, data in G.nodes(data=True): node_data = data.copy() node_id = str(node_data.pop("id", node)) kw = {"id": node_id} label = str(node_data.pop("label", node)) kw["label"] = label try: pid = node_data.pop("pid") kw["pid"] = str(pid) except KeyError: pass try: start = node_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = node_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass # add node element with attributes node_element = Element("node", **kw) # add node element and attr subelements default = G.graph.get("node_default", {}) node_data = self.add_parents(node_element, node_data) if self.VERSION == "1.1": node_data = self.add_slices(node_element, node_data) else: node_data = self.add_spells(node_element, node_data) node_data = self.add_viz(node_element, node_data) node_data = self.add_attributes("node", node_element, node_data, default) nodes_element.append(node_element) graph_element.append(nodes_element) def add_edges(self, G, graph_element): def edge_key_data(G): # helper function to unify multigraph and graph edge iterator if G.is_multigraph(): for u, v, key, data in G.edges(data=True, keys=True): edge_data = data.copy() edge_data.update(key=key) edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data else: for u, v, data in G.edges(data=True): edge_data = data.copy() edge_id = edge_data.pop("id", None) if edge_id is None: edge_id = next(self.edge_id) while str(edge_id) in self.all_edge_ids: edge_id = next(self.edge_id) self.all_edge_ids.add(str(edge_id)) yield u, v, edge_id, edge_data edges_element = Element("edges") for u, v, key, edge_data in edge_key_data(G): kw = {"id": str(key)} try: edge_weight = edge_data.pop("weight") kw["weight"] = str(edge_weight) except KeyError: pass try: edge_type = edge_data.pop("type") kw["type"] = str(edge_type) except KeyError: pass try: start = edge_data.pop("start") kw["start"] = str(start) self.alter_graph_mode_timeformat(start) except KeyError: pass try: end = edge_data.pop("end") kw["end"] = str(end) self.alter_graph_mode_timeformat(end) except KeyError: pass source_id = str(G.nodes[u].get("id", u)) target_id = str(G.nodes[v].get("id", v)) edge_element = Element("edge", source=source_id, target=target_id, **kw) default = G.graph.get("edge_default", {}) if self.VERSION == "1.1": edge_data = self.add_slices(edge_element, edge_data) else: edge_data = self.add_spells(edge_element, edge_data) edge_data = self.add_viz(edge_element, edge_data) edge_data = self.add_attributes("edge", edge_element, edge_data, default) edges_element.append(edge_element) graph_element.append(edges_element) def add_attributes(self, node_or_edge, xml_obj, data, default): # Add attrvalues to node or edge attvalues = Element("attvalues") if len(data) == 0: return data mode = "static" for k, v in data.items(): # rename generic multigraph key to avoid any name conflict if k == "key": k = "networkx_key" val_type = type(v) if val_type not in self.xml_type: raise TypeError("attribute value type is not allowed: %s" % val_type) if isinstance(v, list): # dynamic data for val, start, end in v: val_type = type(val) if start is not None or end is not None: mode = "dynamic" self.alter_graph_mode_timeformat(start) self.alter_graph_mode_timeformat(end) break attr_id = self.get_attr_id( str(k), self.xml_type[val_type], node_or_edge, default, mode ) for val, start, end in v: e = Element("attvalue") e.attrib["for"] = attr_id e.attrib["value"] = str(val) # Handle nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" if start is not None: e.attrib["start"] = str(start) if end is not None: e.attrib["end"] = str(end) attvalues.append(e) else: # static data mode = "static" attr_id = self.get_attr_id( str(k), self.xml_type[val_type], node_or_edge, default, mode ) e = Element("attvalue") e.attrib["for"] = attr_id if isinstance(v, bool): e.attrib["value"] = str(v).lower() else: e.attrib["value"] = str(v) # Handle float nan, inf, -inf differently if val_type == float: if e.attrib["value"] == "inf": e.attrib["value"] = "INF" elif e.attrib["value"] == "nan": e.attrib["value"] = "NaN" elif e.attrib["value"] == "-inf": e.attrib["value"] = "-INF" attvalues.append(e) xml_obj.append(attvalues) return data def get_attr_id(self, title, attr_type, edge_or_node, default, mode): # find the id of the attribute or generate a new id try: return self.attr[edge_or_node][mode][title] except KeyError: # generate new id new_id = str(next(self.attr_id)) self.attr[edge_or_node][mode][title] = new_id attr_kwargs = {"id": new_id, "title": title, "type": attr_type} attribute = Element("attribute", **attr_kwargs) # add subelement for data default value if present default_title = default.get(title) if default_title is not None: default_element = Element("default") default_element.text = str(default_title) attribute.append(default_element) # new insert it into the XML attributes_element = None for a in self.graph_element.findall("attributes"): # find existing attributes element by class and mode a_class = a.get("class") a_mode = a.get("mode", "static") if a_class == edge_or_node and a_mode == mode: attributes_element = a if attributes_element is None: # create new attributes element attr_kwargs = {"mode": mode, "class": edge_or_node} attributes_element = Element("attributes", **attr_kwargs) self.graph_element.insert(0, attributes_element) attributes_element.append(attribute) return new_id def add_viz(self, element, node_data): viz = node_data.pop("viz", False) if viz: color = viz.get("color") if color is not None: if self.VERSION == "1.1": e = Element( "{%s}color" % self.NS_VIZ, r=str(color.get("r")), g=str(color.get("g")), b=str(color.get("b")), ) else: e = Element( "{%s}color" % self.NS_VIZ, r=str(color.get("r")), g=str(color.get("g")), b=str(color.get("b")), a=str(color.get("a")), ) element.append(e) size = viz.get("size") if size is not None: e = Element("{%s}size" % self.NS_VIZ, value=str(size)) element.append(e) thickness = viz.get("thickness") if thickness is not None: e = Element("{%s}thickness" % self.NS_VIZ, value=str(thickness)) element.append(e) shape = viz.get("shape") if shape is not None: if shape.startswith("http"): e = Element( "{%s}shape" % self.NS_VIZ, value="image", uri=str(shape) ) else: e = Element("{%s}shape" % self.NS_VIZ, value=str(shape)) element.append(e) position = viz.get("position") if position is not None: e = Element( "{%s}position" % self.NS_VIZ, x=str(position.get("x")), y=str(position.get("y")), z=str(position.get("z")), ) element.append(e) return node_data def add_parents(self, node_element, node_data): parents = node_data.pop("parents", False) if parents: parents_element = Element("parents") for p in parents: e = Element("parent") e.attrib["for"] = str(p) parents_element.append(e) node_element.append(parents_element) return node_data def add_slices(self, node_or_edge_element, node_or_edge_data): slices = node_or_edge_data.pop("slices", False) if slices: slices_element = Element("slices") for start, end in slices: e = Element("slice", start=str(start), end=str(end)) slices_element.append(e) node_or_edge_element.append(slices_element) return node_or_edge_data def add_spells(self, node_or_edge_element, node_or_edge_data): spells = node_or_edge_data.pop("spells", False) if spells: spells_element = Element("spells") for start, end in spells: e = Element("spell") if start is not None: e.attrib["start"] = str(start) self.alter_graph_mode_timeformat(start) if end is not None: e.attrib["end"] = str(end) self.alter_graph_mode_timeformat(end) spells_element.append(e) node_or_edge_element.append(spells_element) return node_or_edge_data def alter_graph_mode_timeformat(self, start_or_end): # If 'start' or 'end' appears, alter Graph mode to dynamic and # set timeformat if self.graph_element.get("mode") == "static": if start_or_end is not None: if isinstance(start_or_end, str): timeformat = "date" elif isinstance(start_or_end, float): timeformat = "double" elif isinstance(start_or_end, int): timeformat = "long" else: raise nx.NetworkXError( "timeformat should be of the type int, float or str" ) self.graph_element.set("timeformat", timeformat) self.graph_element.set("mode", "dynamic") def write(self, fh): # Serialize graph G in GEXF to the open fh if self.prettyprint: self.indent(self.xml) document = ElementTree(self.xml) document.write(fh, encoding=self.encoding, xml_declaration=True) def indent(self, elem, level=0): # in-place prettyprint formatter i = "\n" + " " * level if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: self.indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i
elif element.tag == "attr": user_partition_entries[NumPartitions - 1]["which_flash"] = int( element.text) #if element.keys(): # for name, value in element.items(): # print "\tKEY '%s' = '%s'" % (name,value) #if element.text: # print element.text ## Create RAWPROGRAM NewXML = Element( RootTag ) ## elem = tree.getroot(), typically it's '<data>' and type(root.getroot().tag) is <type 'str'> NewXML.append(Comment('NOTE: This is an ** Autogenerated file **')) NewXML.append(Comment('BlockSize = %d KB' % block_size_in_kb)) NewXML.append(Comment('PageSize = %d KB' % page_size_in_kb)) NewXML.append(Comment('NUM_PARTITION_SECTORS = %d' % total_page_count)) # elem = Element("erase") # elem.attrib["num_partition_sectors"] = str(4294967295) # elem.attrib["start_sector"] = str(0) # NewXML.append(elem) NumPartitions = len(user_partition_entries) ## NumSectorsUsed = 0 sys_parti_buffer = struct.pack(sys_parti_header_format, FLASH_PART_MAGIC1, FLASH_PART_MAGIC2, FLASH_PARTITION_VERSION, NumPartitions)
from xml.etree.ElementTree import Element, SubElement, Comment, tostring top = Element('top') comment = Comment('Generated for PyMOTW') top.append(comment) child = SubElement(top, 'child') child.text = 'This child contains text.' child_with_tail = SubElement(top, 'child_with_tail') child_with_tail.text = 'This child has regular text.' child_with_tail.tail = 'And "tail" text.' child_with_entity_ref = SubElement(top, 'child_with_entity_ref') child_with_entity_ref.text = 'This & that' print tostring(top)
def _types_to_fixture(location_db, type, locs): type_node = Element('%ss' % type.code) # hacky pluralization for loc in locs: type_node.append(_location_to_fixture(location_db, loc, type)) return type_node
class _FakeIncrementalFileWriter(object): """Replacement for _IncrementalFileWriter of lxml. Uses ElementTree to build xml in memory.""" def __init__(self, output_file): self._element_stack = [] self._top_element = None self._file = output_file self._have_root = False @contextmanager def element(self, tag, attrib=None, nsmap=None, **_extra): """Create a new xml element using a context manager. The elements are written when the top level context is left. This is for code compatibility only as it is quite slow. """ # __enter__ part self._have_root = True if attrib is None: attrib = {} self._top_element = Element(tag, attrib=attrib, **_extra) self._top_element.text = '' self._top_element.tail = '' self._element_stack.append(self._top_element) yield # __exit__ part el = self._element_stack.pop() if self._element_stack: parent = self._element_stack[-1] parent.append(self._top_element) self._top_element = parent else: self._write_element(el) self._top_element = None def write(self, arg): """Write a string or subelement.""" if isinstance(arg, str): # it is not allowed to write a string outside of an element if self._top_element is None: raise LxmlSyntaxError() if len(self._top_element) == 0: # element has no children: add string to text self._top_element.text += arg else: # element has children: add string to tail of last child self._top_element[-1].tail += arg else: if self._top_element is not None: self._top_element.append(arg) elif not self._have_root: self._write_element(arg) else: raise LxmlSyntaxError() def _write_element(self, element): xml = tostring(element) self._file.write(xml) def __enter__(self): pass def __exit__(self, type, value, traceback): # without root the xml document is incomplete if not self._have_root: raise LxmlSyntaxError()
def root(self): root = Element("SECLISTRQ") for i in range(2): root.append(SecrqTestCase().root) return root
class vFeedXML(object): ''' Produce the vFeed XML format The XML file is the flagship feature of the vFeed Concept ''' def __init__(self, cveID): self.cve_url = config.gbVariables['cve_url'] self.redhat_oval_url = config.gbVariables['redhat_oval_url'] self.cwe_url = config.gbVariables['cwe_url'] self.capec_url = config.gbVariables['capec_url'] self.osvdb_url = config.gbVariables['osvdb_url'] self.milw0rm_url = config.gbVariables['milw0rm_url'] self.ms_bulletin_url = config.gbVariables['ms_bulletin_url'] self.ms_kb_url = config.gbVariables['ms_kb_url'] #Invoking the vFeed api with CVE object self.cveID = cveID.upper() self.vfeed = vFeed(cveID) # Calling all available methods self.cveInfo = self.vfeed.get_cve() self.cveRef = self.vfeed.get_refs() self.SCIP_id = self.vfeed.get_scip() self.CERTVN_id = self.vfeed.get_certvn() self.IAVM_id = self.vfeed.get_iavm() self.OSVDB_id = self.vfeed.get_osvdb() self.CPE_id = self.vfeed.get_cpe() self.CWE_id = self.vfeed.get_cwe() self.CAPEC_id = self.vfeed.get_capec() self.Risk = self.vfeed.get_risk() self.cvssScore = self.vfeed.get_cvss() self.MS_id = self.vfeed.get_ms() self.KB_id = self.vfeed.get_kb() self.AIXAPAR_id = self.vfeed.get_aixapar() self.REDHAT_id, self.BUGZILLA_id = self.vfeed.get_redhat() self.DEBIAN_id = self.vfeed.get_debian() self.FEDORA_id = self.vfeed.get_fedora() self.SUSE_id = self.vfeed.get_suse() self.GENTOO_id = self.vfeed.get_gentoo() self.UBUNTU_id = self.vfeed.get_ubuntu() self.CISCO_id = self.vfeed.get_cisco() self.MANDRIVA_id = self.vfeed.get_mandriva() self.OVAL_id = self.vfeed.get_oval() self.NESSUS_id = self.vfeed.get_nessus() self.OPENVAS_id = self.vfeed.get_openvas() self.EDB_id = self.vfeed.get_edb() self.SAINT_id = self.vfeed.get_saint() self.MSF_id = self.vfeed.get_msf() self.MILWORM_id = self.vfeed.get_milw0rm() self.SNORT_id = self.vfeed.get_snort() def export(self): ''' exporting data to the vFeed XML format Output : CVE_xxxx_xxx_.xml file ''' # define id self.vfeedid = self.cveID.replace('self.cveID', 'vFeed') self.vfeedfile = self.cveID.replace('-', '_') + '.xml' # define generation time self.generated_on = strftime("%a, %d %b %Y %H:%M:%S", gmtime()) # define the vFeed XML attributes self.root = Element('vFeed') self.root.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance") self.root.set('xmlns:meta', "http://www.toolswatch.org/vfeed/") self.root.set('xmlns', "http://www.toolswatch.org/vfeed/") self.root.set( 'xsi:schemaLocation', "http://www.toolswatch.org/vfeed/ http://www.toolswatch.org/vfeed/vFeed.xsd" ) self.root.append(Comment('#####################################')) self.root.append(Comment(config.product['__title__'])) self.root.append(Comment('Generated by vFeedApi.py')) self.head = SubElement(self.root, 'release') self.project_name = SubElement(self.head, 'name') self.project_name.text = 'vFeed XML for %s' % self.cveID self.project_version = SubElement(self.head, 'version') self.project_version.text = config.product['__build__'] self.project_author = SubElement(self.head, 'author') self.project_author.text = config.author['__name__'] self.project_url = SubElement(self.head, 'url') self.project_url.text = config.author['__website__'] self.date_generated = SubElement(self.head, 'date_generated') self.date_generated.text = self.generated_on # Exporting Vulnerability Summary self.root.append(Comment('#####################################')) self.root.append(Comment('Entry ID')) self.entry_head = SubElement(self.root, 'entry', { 'exported': self.vfeedfile, 'id': self.vfeedid, }) self.vul_summary_date = SubElement( self.entry_head, 'date', { 'published': self.cveInfo['published'], 'modified': self.cveInfo['modified'], }) self.vul_summary = SubElement(self.entry_head, 'summary') self.vul_summary.text = self.cveInfo['summary'] self.vul_summary_ref = SubElement(self.entry_head, 'cve_ref') self.vul_summary_ref.text = self.cve_url + self.cveID # Exporting references as they come from NVD XML self.entry_head.append( Comment('#####################################')) self.entry_head.append(Comment('Official References')) self.references_head = SubElement(self.entry_head, 'references') for i in range(0, len(self.cveRef)): self.source_head = SubElement(self.references_head, 'ref', { 'url': self.cveRef[i]['link'], 'source': self.cveRef[i]['id'], }) self.entry_head.append( Comment('#####################################')) self.entry_head.append(Comment('vFeed Mapped References')) self.mappedrefs_head = SubElement(self.entry_head, 'crossReferences') # Exporting extra SCIP ref from Mapping for i in range(0, len(self.SCIP_id)): self.source_head = SubElement( self.mappedrefs_head, 'ref', { 'url': self.SCIP_id[i]['link'], 'id': self.SCIP_id[i]['id'], 'source': "SCIP", }) # Exporting extra CERT VN ref from Mapping for i in range(0, len(self.CERTVN_id)): self.source_head = SubElement( self.mappedrefs_head, 'ref', { 'url': self.CERTVN_id[i]['link'], 'id': self.CERTVN_id[i]['id'], 'source': "CERT-VN", }) # Exporting IAVM ref from Mapping for i in range(0, len(self.IAVM_id)): self.source_head = SubElement( self.mappedrefs_head, 'ref', { 'vmskey': self.IAVM_id[i]['key'], 'id': self.IAVM_id[i]['id'], 'title': self.IAVM_id[i]['title'], 'source': "DISA/IAVM", }) # Exporting OSVDB ref from Mapping for i in range(0, len(self.OSVDB_id)): self.source_head = SubElement( self.mappedrefs_head, 'ref', { 'id': self.OSVDB_id[i]['id'], 'url': self.osvdb_url + self.OSVDB_id[i]['id'], 'source': "OSVDB", }) # Exporting Targets CPEs ids if self.CPE_id: self.entry_head.append( Comment('#####################################')) self.entry_head.append( Comment('Vulnerable Targets according to CPE')) self.vulnerabletargets_head = SubElement( self.entry_head, 'vulnerableTargets', { 'totalCPE': str(len(self.CPE_id)), }) for i in range(0, len(self.CPE_id)): self.cpe_head = SubElement(self.vulnerabletargets_head, 'cpe', { 'id': self.CPE_id[i]['id'], }) # Exporting Risk Scoring self.entry_head.append( Comment('#####################################')) self.entry_head.append(Comment('Risk Scoring Evaluation')) self.riskscoring_head = SubElement(self.entry_head, 'riskScoring') self.risk_head = SubElement(self.riskscoring_head, 'severityLevel', { 'status': self.Risk['severitylevel'], }) self.risk_head = SubElement( self.riskscoring_head, 'cvss', { 'base': self.cvssScore['base'], 'impact': self.cvssScore['impact'], 'exploit': self.cvssScore['exploit'], }) self.risk_head = SubElement( self.riskscoring_head, 'cvssVector', { 'AV': self.cvssScore['access_vector'], 'AC': self.cvssScore['access_complexity'], 'Au': self.cvssScore['authentication'], 'C': self.cvssScore['confidentiality_impact'], 'I': self.cvssScore['integrity_impact'], 'A': self.cvssScore['availability_impact'], }) self.risk_head = SubElement(self.riskscoring_head, 'topVulnerable', { 'status': str(self.Risk['topvulnerable']), }) self.risk_head = SubElement(self.riskscoring_head, 'topAlert', { 'status': str(self.Risk['topAlert']), }) self.risk_head = SubElement(self.riskscoring_head, 'pciCompliance', { 'status': self.Risk['pciCompliance'], }) # Exporting Patch Management self.entry_head.append( Comment('#####################################')) self.entry_head.append(Comment('Patch Management')) self.patchmanagement_head = SubElement(self.entry_head, 'patchManagement') ## Exporting Microsoft MS Patches for i in range(0, len(self.MS_id)): self.patch_head = SubElement( self.patchmanagement_head, 'patch', { 'id': self.MS_id[i]['id'], 'title': self.MS_id[i]['title'], 'source': 'microsoft', 'url': self.ms_bulletin_url + self.MS_id[i]['id'], }) ## Exporting Microsoft KB Patches for i in range(0, len(self.KB_id)): self.patch_head = SubElement( self.patchmanagement_head, 'patch', { 'id': self.KB_id[i]['id'], 'title': self.KB_id[i]['title'], 'source': 'microsoft KB', 'url': self.ms_kb_url + self.KB_id[i]['id'], }) ## Exporting IBM AIXAPAR Patches for i in range(0, len(self.AIXAPAR_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.AIXAPAR_id[i]['id'], 'source': 'IBM', }) ## Exporting REDHAT Patches for i in range(0, len(self.REDHAT_id)): self.patch_head = SubElement( self.patchmanagement_head, 'patch', { 'id': self.REDHAT_id[i]['id'], 'title': self.REDHAT_id[i]['title'], 'source': 'REDHAT', }) for i in range(0, len(self.BUGZILLA_id)): self.patch_head = SubElement( self.patchmanagement_head, 'patch', { 'date_issue': self.BUGZILLA_id[i]['date_issue'], 'id': self.BUGZILLA_id[i]['id'], 'title': self.BUGZILLA_id[i]['title'], 'source': 'BUGZILLA', }) ## Exporting SUSE Patches for i in range(0, len(self.SUSE_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.SUSE_id[i]['id'], 'source': 'SUSE', }) ## Exporting DEBIAN Patches for i in range(0, len(self.DEBIAN_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.DEBIAN_id[i]['id'], 'source': 'DEBIAN', }) ## Exporting MANDRIVA Patches for i in range(0, len(self.MANDRIVA_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.MANDRIVA_id[i]['id'], 'source': 'MANDRIVA', }) ## Exporting CISCO Patches for i in range(0, len(self.CISCO_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.CISCO_id[i]['id'], 'source': 'CISCO', }) ## Exporting UBUNTU Patches for i in range(0, len(self.UBUNTU_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.UBUNTU_id[i]['id'], 'source': 'UBUNTU', }) ## Exporting GENTOO Patches for i in range(0, len(self.GENTOO_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.GENTOO_id[i]['id'], 'source': 'GENTOO', }) ## Exporting FEDORA Patches for i in range(0, len(self.FEDORA_id)): self.patch_head = SubElement(self.patchmanagement_head, 'patch', { 'id': self.FEDORA_id[i]['id'], 'source': 'FEDORA', }) # Attack and Weaknesses Patterns if self.CWE_id: self.entry_head.append( Comment('#####################################')) self.entry_head.append( Comment( 'Attack and Weaknesses Categories. Useful when performing classification of threats' )) self.attackclassification_head = SubElement( self.entry_head, 'attackPattern') for i in range(0, len(self.CWE_id)): self.cwe_id_url = self.CWE_id[i]['id'].split("CWE-") self.attackPattern_head = SubElement( self.attackclassification_head, 'cwe', { 'standard': 'CWE - Common Weakness Enumeration', 'id': self.CWE_id[i]['id'], 'title': self.CWE_id[i]['title'], 'url': self.cwe_url + self.cwe_id_url[1] }) for i in range(len(self.CWE_id), len(self.CAPEC_id) + len(self.CWE_id)): self.attackPattern_head = SubElement( self.attackclassification_head, 'capec', { 'standard': 'CAPEC - Common Attack Pattern Enumeration and Classification', 'relatedCWE': self.CAPEC_id[i]['cwe'], 'id': self.CAPEC_id[i]['id'], 'url': self.capec_url + self.CAPEC_id[i]['id'] }) # Exporting Assessment, security tests and exploitation self.entry_head.append( Comment('#####################################')) self.entry_head.append( Comment( 'Assessment and security Tests. The IDs and source could be leveraged to test the vulnerability' )) self.securitytest_head = SubElement(self.entry_head, 'assessment') ## Exporting OVAL ids for i in range(0, len(self.OVAL_id)): self.ovalChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Local Security Testing', 'id': self.OVAL_id[i]['id'], 'utility': "OVAL Interpreter", 'file': self.OVAL_id[i]['file'], }) for i in range(0, len(self.REDHAT_id)): self.ovalChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Local Security Testing', 'id': self.REDHAT_id[i]['oval'], 'utility': "OVAL Interpreter", 'file': self.redhat_oval_url + self.REDHAT_id[i]['oval'].split( 'oval:com.redhat.rhsa:def:')[1] + '.xml', }) ## Exporting Nessus attributes for i in range(0, len(self.NESSUS_id)): self.nessusChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Remote Security Testing', 'id': self.NESSUS_id[i]['id'], 'name': self.NESSUS_id[i]['name'], 'family': self.NESSUS_id[i]['family'], 'file': self.NESSUS_id[i]['file'], 'utility': "Nessus Vulnerability Scanner", }) ## Exporting OpenVAS attributes for i in range(0, len(self.OPENVAS_id)): self.openvasChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Remote Security Testing', 'id': self.OPENVAS_id[i]['id'], 'name': self.OPENVAS_id[i]['name'], 'family': self.OPENVAS_id[i]['family'], 'file': self.OPENVAS_id[i]['file'], 'utility': "OpenVAS Vulnerability Scanner", }) ## Exporting EDB ids for i in range(0, len(self.EDB_id)): self.exploitChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Exploitation', 'utility': "exploit-db", 'id': self.EDB_id[i]['id'], 'file': self.EDB_id[i]['file'], }) ## Exporting Milw0rm ids for i in range(0, len(self.MILWORM_id)): self.exploitChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Exploitation', 'utility': "milw0rm", 'id': self.MILWORM_id[i]['id'], 'file': self.milw0rm_url + self.MILWORM_id[i]['id'], }) ## Exporting SAINT ids for i in range(0, len(self.SAINT_id)): self.exploitChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Exploitation', 'utility': "saintExploit", 'id': self.SAINT_id[i]['id'], 'title': self.SAINT_id[i]['title'], 'file': self.SAINT_id[i]['file'], }) ## Exporting MSF - Metasploit ids for i in range(0, len(self.MSF_id)): self.exploitChecks_head = SubElement( self.securitytest_head, 'check', { 'type': 'Exploitation', 'utility': "Metasploit", 'id': self.MSF_id[i]['id'], 'title': self.MSF_id[i]['title'], 'script': self.MSF_id[i]['file'], }) # Exporting Defense rules self.entry_head.append( Comment('#####################################')) self.entry_head.append( Comment( 'Defense and IDS rules. The IDs and source could be leveraged to deploy effective rules' )) self.defense_head = SubElement(self.entry_head, 'defense') ## Exporting Snort Rules for i in range(0, len(self.SNORT_id)): self.snortRules_head = SubElement( self.defense_head, 'rule', { 'type': 'Defense', 'utility': "Snort", 'id': self.SNORT_id[i]['id'], 'signature': self.SNORT_id[i]['signature'], 'classtype': self.SNORT_id[i]['classtype'], }) self.xmlfile = open(self.vfeedfile, 'w+') print '[info] vFeed xml file %s exported for %s' % (self.vfeedfile, self.cveID) print >> self.xmlfile, self.prettify(self.root) def prettify(self, elem): """Return a pretty-printed XML string for the Element. This function found on internet. So thanks to its author whenever he is. """ rough_string = ElementTree.tostring(elem, 'UTF-8') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
def Udacity2Voc(self, dataSet_dir = "example/udacity/", anno_dir = "label/labels.csv", label_dir = "results/", label_list = None): print(color.BOLD + color.RED + "------------------------- CSV Parsing Start-------------------------" + color.END) if label_list is None: label_list = self.label_list work_dir = getcwd() + "/" + dataSet_dir anno_dir = work_dir + anno_dir label_dir = work_dir + label_dir print("Input file : {}".format(anno_dir)) f = open(anno_dir, 'r', encoding='utf-8') l = csv.reader(f) try: for line in l: print(color.BOLD + color.RED + "------------------------- CSV Parsing -------------------------" + color.END) convertList = line[0].split(" ") length = len(convertList) image_name = convertList[0] xmin = convertList[1] ymin = convertList[2] xmax = convertList[3] ymax = convertList[4] label = convertList[6].split('"')[1] if length is 8: state = convertList[7].split('"')[1] label = label + state # Open output result files img = Image.open(dataSet_dir + "JPEG/" + image_name) img_width = int(img.size[0]) img_height = int(img.size[1]) img_depth = 3 #int(img.size[2]) print("image size (width, height) : {}".format(img.size)) print() print("Output : {}".format(label_dir + image_name[:-3] + "xml")) print() print("class name, index : ({})".format(label)) result_outpath = str(label_dir + image_name[:-3] + "xml") if not os.path.isfile(result_outpath): xml_annotation = Element("annotation") xml_folder = Element("folder") xml_folder.text = "udacity" xml_annotation.append(xml_folder) xml_filename = Element("filename") xml_filename.text = str(image_name) xml_annotation.append(xml_filename) xml_path = Element("path") xml_path.text = str(label_dir + image_name) xml_annotation.append(xml_path) xml_source = Element("source") xml_database = Element("database") xml_database.text = "Unknown" xml_source.append(xml_database) xml_annotation.append(xml_source) xml_size = Element("size") xml_width = Element("width") xml_width.text = str(img_width) xml_size.append(xml_width) xml_height = Element("height") xml_height.text = str(img_height) xml_size.append(xml_height) xml_depth = Element("depth") xml_depth.text = str(img_depth) xml_size.append(xml_depth) xml_annotation.append(xml_size) xml_segmented = Element("segmented") xml_segmented.text = "0" xml_annotation.append(xml_segmented) xml_object = Element("object") xml_name = Element("name") xml_name.text = label xml_object.append(xml_name) xml_pose = Element("pose") xml_pose.text = "Unspecified" xml_object.append(xml_pose) xml_truncated = Element("truncated") xml_truncated.text = "0" xml_object.append(xml_truncated) xml_difficult = Element("difficult") xml_difficult.text = "0" xml_object.append(xml_difficult) xml_bndbox = Element("bndbox") xml_xmin = Element("xmin") xml_xmin.text = str(xmin) xml_bndbox.append(xml_xmin) xml_ymin = Element("ymin") xml_ymin.text = str(ymin) xml_bndbox.append(xml_ymin) xml_xmax = Element("xmax") xml_xmax.text = str(xmax) xml_bndbox.append(xml_xmax) xml_ymax = Element("ymax") xml_ymax.text = str(ymax) xml_bndbox.append(xml_ymax) xml_object.append(xml_bndbox) xml_annotation.append(xml_object) self.indent(xml_annotation) dump(xml_annotation) ElementTree(xml_annotation).write(result_outpath) else: tree = parse(result_outpath) xml_annotation = tree.getroot() xml_object = Element("object") xml_name = Element("name") xml_name.text = label xml_object.append(xml_name) xml_pose = Element("pose") xml_pose.text = "Unspecified" xml_object.append(xml_pose) xml_truncated = Element("truncated") xml_truncated.text = "0" xml_object.append(xml_truncated) xml_difficult = Element("difficult") xml_difficult.text = "0" xml_object.append(xml_difficult) xml_bndbox = Element("bndbox") xml_xmin = Element("xmin") xml_xmin.text = str(xmin) xml_bndbox.append(xml_xmin) xml_ymin = Element("ymin") xml_ymin.text = str(ymin) xml_bndbox.append(xml_ymin) xml_xmax = Element("xmax") xml_xmax.text = str(xmax) xml_bndbox.append(xml_xmax) xml_ymax = Element("ymax") xml_ymax.text = str(ymax) xml_bndbox.append(xml_ymax) xml_object.append(xml_bndbox) xml_annotation.append(xml_object) self.indent(xml_annotation) dump(xml_annotation) ElementTree(xml_annotation).write(result_outpath) print(color.BOLD + color.RED + "------------------------- CSV Parsing -------------------------" + color.END) print(color.BOLD + color.RED + "------------------------- CSV Parsing END -------------------------" + color.END) except Exception as e: print(color.BOLD + color.RED + "ERROR : {}".format(e) + color.END)
def test_xml_find(self): el = Element("parent") el.append(Element("foo")) el.append(Element("bar")) assert validate(xml_find("bar"), el).tag == "bar"
def _geometryFromSymbolizer(sl): geomExpr = convertExpression(sl.get("geometry", None)) if geomExpr is not None: geomElement = Element("Geometry") geomElement.append(geomExpr) return geomElement
def root(self): root = Element("FIMFASSETCLASS") for i in range(4): portion = FiportionTestCase().root root.append(portion) return root
def populate(self, fasta, stem, time_unit='days', chain_length=None, screen_step=None, log_step=None, treelog_step=None, root_height=None): """ Load sequences from FASTA object into BEAST XML template :param fasta: a Python list object containing sublists of header/sequence pairs :param stem: file path and prefix to write *.log and *.tree files :param time_unit: used by BEAST for annotation only (e.g., days, years) :param chain_length: optional setting for number of steps in MCMC chain :return: paths to BEAST log and tree log files """ logfile = stem + '.log' treefile = stem + '.trees' # reset TAXA and ALIGNMENT blocks t_taxa = self.template.findall('taxa')[0] t_taxa._children = [] t_aln = self.template.find('alignment') t_aln._children = [] for k, v in fasta.iteritems(): h = v['header'] s = v['sequence'] date_val = float(v['days']) date = Node( 'date', { 'units': time_unit, 'direction': 'forwards', 'value': str(date_val) }) # TAXA taxon = Node('taxon', {'id': h}) taxon.append(date) t_taxa.append(taxon) # SEQUENCE seqtag = Node('sequence', {}) staxon = Node('taxon', {'idref': h}) staxon.tail = '\n\t\t\t' + s.upper( ) # mimic formatting in BEAST XML seqtag.append(staxon) t_aln.append(seqtag) # revise log settings t_mcmc = self.template.find('mcmc') if chain_length: t_mcmc.set('chainLength', str(int(chain_length))) # number of MCMC steps # set prior distribution for rootheight if root_height and type(root_height) is tuple and len( root_height) == 2: lower, upper = root_height assert lower <= upper, 'Root height prior specification lower must be <= upper.' # # set the uniform prior priors = t_mcmc.find('posterior').find('prior').getchildren() found = False for prior in priors: parameter = prior.find('parameter') if parameter is None or parameter.get( 'idref') != 'treeModel.rootHeight': continue found = True prior.set('lower', str(lower)) prior.set('upper', str(upper)) if not found: # TODO: create new element prior = Node('uniformPrior') prior.set('idref', 'treeModel.rootHeight') prior.set('lower', str(lower)) prior.set('upper', str(upper)) # rescale starting tree to be compatible with this prior t_tree = self.template.find('rescaledTree') t_tree.set('height', str(0.1 * (upper - lower) + lower)) for log in t_mcmc.findall('log'): if log.get('id') == 'fileLog': log.set('fileName', logfile) if log_step: log.set('logEvery', str(int(log_step))) elif log.get('id') == 'screenLog': if screen_step: log.set('logEvery', str(int(screen_step))) log_tree_element = t_mcmc.find('logTree') log_tree_element.set('fileName', treefile) if treelog_step: log_tree_element.set('logEvery', str(int(treelog_step))) return logfile, treefile
def to_svg(self, file=None, canvas_shape=None): """Convert the current layer state to an SVG. Parameters ---------- file : path-like object, optional An object representing a file system path. A path-like object is either a str or bytes object representing a path, or an object implementing the `os.PathLike` protocol. If passed the svg will be written to this file canvas_shape : 4-tuple, optional View box of SVG canvas to be generated specified as `min-x`, `min-y`, `width` and `height`. If not specified, calculated from the last two dimensions of the layer. Returns ---------- svg : string SVG representation of the layer. """ if canvas_shape is None: min_shape = [r[0] for r in self.dims.range[-2:]] max_shape = [r[1] for r in self.dims.range[-2:]] shape = np.subtract(max_shape, min_shape) else: shape = canvas_shape[2:] min_shape = canvas_shape[:2] props = { 'xmlns': 'http://www.w3.org/2000/svg', 'xmlns:xlink': 'http://www.w3.org/1999/xlink', } xml = Element( 'svg', height=f'{shape[0]}', width=f'{shape[1]}', version='1.1', **props, ) transform = f'translate({-min_shape[1]} {-min_shape[0]})' xml_transform = Element('g', transform=transform) xml_list = self.to_xml_list() for x in xml_list: xml_transform.append(x) xml.append(xml_transform) svg = ('<?xml version=\"1.0\" standalone=\"no\"?>\n' + '<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n' + '\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n' + tostring(xml, encoding='unicode', method='xml')) if file: # Save svg to file with open(file, 'w') as f: f.write(svg) return svg
def processFile(folder, path, filename, width, height, xmin, xmax, ymin, ymax): xml_path = DIR_SEP.join([folder, filename[: filename.rindex('.')] + '.xml']) if os.path.exists(xml_path): _root = Et.parse(xml_path).getroot() _object = Element('object') _name = Element('name') _name.text = 'plate' _object.append(_name) _pose = Element('pose') _pose.text = 'Unspecified' _object.append(_pose) _truncated = Element('truncated') _truncated.text = '0' _object.append(_truncated) _difficult = Element('difficult') _difficult.text = '0' _object.append(_difficult) _bndbox = Element('bndbox') _xmin = Element('xmin') _xmin.text = xmin _bndbox.append(_xmin) _ymin = Element('ymin') _ymin.text = ymin _bndbox.append(_ymin) _xmax = Element('xmax') _xmax.text = xmax _bndbox.append(_xmax) _ymax = Element('ymax') _ymax.text = ymax _bndbox.append(_ymax) _object.append(_bndbox) _root.append(_object) else: _root = Element('annotation') _root.set('verified', 'yes') _folder = Element('folder') _folder.text = folder[folder.rindex(DIR_SEP) + 1:] _root.append(_folder) _filename = Element('filename') _filename.text = filename _root.append(_filename) _path = Element('path') _path.text = path _root.append(_path) _source = Element('source') _database = Element('database') _database.text = 'Unknown' _source.append(_database) _root.append(_source) _size = Element('size') _width = Element('width') _width.text = width _size.append(_width) _height = Element('height') _height.text = height _size.append(_height) _depth = Element('depth') _depth.text = '3' _size.append(_depth) _root.append(_size) _segmented = Element('segmented') _segmented.text = '0' _root.append(_segmented) _object = Element('object') _name = Element('name') _name.text = 'plate' _object.append(_name) _pose = Element('pose') _pose.text = 'Unspecified' _object.append(_pose) _truncated = Element('truncated') _truncated.text = '0' _object.append(_truncated) _difficult = Element('difficult') _difficult.text = '0' _object.append(_difficult) _bndbox = Element('bndbox') _xmin = Element('xmin') _xmin.text = xmin _bndbox.append(_xmin) _ymin = Element('ymin') _ymin.text = ymin _bndbox.append(_ymin) _xmax = Element('xmax') _xmax.text = xmax _bndbox.append(_xmax) _ymax = Element('ymax') _ymax.text = ymax _bndbox.append(_ymax) _object.append(_bndbox) _root.append(_object) with(open(xml_path, 'w')) as w: _result = str(Et.tostring(_root)) w.writelines([_result[2:-1]])
def createGst(username, sliceName, industry, rateLimit, rateLimitHosts, userDataAccess, userDataHosts, exportGST, createNetSlice, switches, onosUrl, onosUsr, onosPwd): netSlices_dir = Path("./resources/network_slices") rateLimitHosts = rateLimitHosts.replace(" ", "").split(',') userDataHosts = userDataHosts.replace(" ", "").split(',') root = Element('GST') tree = ET.ElementTree(root) if sliceName == "": return " Error: Please fill all obligatory fields " elif industry == "Select": return " Error: Please choose an industry " elif rateLimit == "": rateLimit = 0 elif not rateLimit.isdigit() or int(rateLimit) <= 0: return " Error: Rate limit must be a positive integer " elif userDataAccess == "Select": return " Error: Please choose User data access option " elif netSliceExists(username, sliceName): return " Error: Network slice name already exists " # Slice Name sliceNameXml = Element('slice_name') root.append(sliceNameXml) sliceNameXml.text = str(sliceName) # Slice Industry industryXml = Element('industry') root.append(industryXml) industryXml.text = str(industry) # Rate limit slice rateLimitXml = Element('rate_limit') root.append(rateLimitXml) rateLimitValueXml = ET.SubElement(rateLimitXml, 'value') rateLimitValueXml.text = "None" if rateLimit == "" else str(rateLimit) rateLimitHostsXml = ET.SubElement(rateLimitXml, 'hosts') for i in range(len(rateLimitHosts)): hostIp = ET.SubElement(rateLimitHostsXml, 'host_ip') hostIp.text = str(rateLimitHosts[i]) # Slice User data access userDataAccessXml = Element('user_data_access') root.append(userDataAccessXml) userDataAccessValueXml = ET.SubElement(userDataAccessXml, 'value') userDataAccessValueXml.text = str(userDataAccess) userDataHostsXml = ET.SubElement(userDataAccessXml, 'hosts') for i in range(len(userDataHosts)): hostIp = ET.SubElement(userDataHostsXml, 'host_ip') hostIp.text = str(userDataHosts[i]) slice_file = sliceName + ".xml" with open(netSlices_dir / slice_file, 'wb') as f: f.write(b'<?xml version="1.0" encoding="UTF-8"?>') tree.write(f, xml_declaration=False, encoding='utf-8') if exportGST == 1: outputPath = functions_miscellaneous.outputFolder() if outputPath != '': slice_file = "GANSO_slice_" + sliceName + ".xml" output_file = Path(outputPath) with open(output_file / slice_file, 'wb') as f: f.write(b'<?xml version="1.0" encoding="UTF-8"?>') tree.write(f, xml_declaration=False, encoding='utf-8') if createNetSlice == 1: createNetworkSlice(sliceName, switches, onosUrl, onosUsr, onosPwd, False) # If switches does not exist, include network slice in netSlices file netSlicesFile = open(netSlices_dir / "network_slices.txt", "a+") netSlicesFile.write(sliceName + "\n") netSlicesFile.close() return "Success! "
class Construct: """ construct soap envelope of xml structure """ def __init__(self): """ do some initial """ # create an empty tree self.soap = ElementTree() # this variable used to save created soap envelpoe's xml content self.str_xml = "" def create_soap_envelope(self, rpc_name, cwmp_version="cwmp-1-0", rpc_args="", cwmp_id=""): """ create soap envelope, and convert the structure to xml rpc_name: RPC name need to create cwmp_version: CPE supported cwmp version, default is cwmp-1-0 rpc_args: RPC arguments, default is "" """ log.debug_info("create_soap_envelope") try: dict_envelope_attrib = { 'xmlns:SOAP-ENV': 'http://schemas.xmlsoap.org/soap/envelope/', 'xmlns:SOAP-ENC': 'http://schemas.xmlsoap.org/soap/encoding/', 'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance' } dict_envelope_attrib[ 'xmlns:cwmp'] = '' + 'urn:dslforum-org:' + cwmp_version # create an element self.soap_envelope = Element('SOAP-ENV:Envelope', dict_envelope_attrib) # set root of tree self.soap._setroot(self.soap_envelope) # create sub elemnts of soap_envelop self.soap_header = Element('SOAP-ENV:Header') self.soap_body = Element('SOAP-ENV:Body') # add soap_header and soap_body to soap_envelope self.soap_envelope.append(self.soap_header) self.soap_envelope.append(self.soap_body) # create sub elements of soap header self.create_soap_header(cwmp_id) # create sub elements of soap body self.create_soap_body(rpc_name, rpc_args) # convert structure to xml self.str_xml = tostring(self.soap_envelope) except Exception, e: log.debug_err(e) return CONSTRUCT_FAIL, e return CONSTRUCT_SUC, ""
def emptyBase(cls): root = Element("STPCHKRQ") root.append(bk_stmt.BankacctfromTestCase.etree) return root