def upload_multipart(self, key, content_type='application/octet-stream', MultipartUpload=MultipartUpload, encryption_key=None): """Upload file to S3 by uploading multiple chunks""" if isinstance(key, Key): key = key.key result = yield from self._request( Request("POST", '/' + key, {'uploads': ''}, { 'HOST': self._host, 'CONTENT-TYPE': content_type, }, payload=b'', encryption_key=encryption_key)) try: if result.status != 200: xml = yield from result.read() raise errors.AWSException.from_bytes(result.status, xml) xml = yield from result.read() upload_id = parse_xml(xml).find('s3:UploadId', namespaces=NS).text assert upload_id, xml return MultipartUpload(self, key, upload_id) finally: result.close()
def read_next(): nonlocal final, marker result = yield from self._request( Request( "GET", "/", { 'prefix': prefix, 'max-keys': str(max_keys), 'marker': marker }, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) result = list( map(Key.from_xml, x.findall('s3:Contents', namespaces=NS))) if (x.find('s3:IsTruncated', namespaces=NS).text == 'false' or len(result) == 0): final = True else: marker = result[-1].key return result
def check_upstream(api_proxy_base, api_key): ''' Check connectivity and consistency of NameCheap-hosted records. Throw exceptions if a problem is found, otherwise return nothing. ''' query = dict( ApiKey=api_key, Command='namecheap.domains.dns.getHosts', ClientIp=get_proxy_ipaddr(api_proxy_base) ) query.update(api_defaults) got = get(api_proxy_base + '?' + urlencode(query)) tree = parse_xml(got.content) for el in tree.iter('{http://api.namecheap.com/xml.response}Error'): raise ValueError('Upstream API error: {}'.format(el.text)) hosts, expected_hash = [], None for el in tree.iter('{http://api.namecheap.com/xml.response}host'): if (el.attrib['Type'], el.attrib['Name']) == ('TXT', 'hosts-hash'): expected_hash = el.attrib['Address'] else: hosts.append(format_xml_element(el)) found_hash = hash_host_records(hosts) if expected_hash != found_hash: raise ValueError('Calculated hash {} but expected {}'.format(found_hash, expected_hash)) print >> stderr, 'Remote host checks out with hash "{}"'.format(found_hash)
def target_outdated(self): outdated = False error = False if not self.source.exists(): log.error('File {} does not exist.'.format(self.source)) raise ValueError('Fatal error.') if self.target.exists(): if self_mtime > self.target.lstat().st_mtime: outdated = True target_mtime = self.target.lstat().st_mtime if self.source.lstat().st_mtime > target_mtime: outdated = True else: target_mtime = -1 outdated = True files = [Path(file.text) for file in parse_xml(self.source.open('rb')).findall('./qresource/file')] for file in files: real_file = self.source.parent / file if not real_file.exists(): log.error('File {} does not exist. (defined in {})'.format(file, self.source)) error = True if real_file.lstat().st_mtime > target_mtime: outdated = True if error: raise ValueError('Fatal error.') return outdated
def load_data(url, format): raw = urlopen(url).read() if format == "json": data = json.loads(raw) elif format == "xml": data = parse_xml(raw) return data
def try_parse_single_tag_from_xml_document(path: Path, tag_name: str) -> Optional[str]: assert tag_name.startswith("{"), "Should start with schema" root = parse_xml(str(path)).getroot() tags = tuple(_iter_tag_recursive(root, tag_name)) if is_empty(tags): return None else: assert len(tags) == 1 # Should only be specified once tag = tags[0] return tag.text
def qtc(): data = urllib.urlopen(url).read() xml = parse_xml(data) date = xml.get("date") index = {} for group in xml.findall("index"): this = {} timeperiod = int(group.get("timeperiod")) for item in group.getchildren(): name = fixname(item.tag) price = float(item.find("price").text) volume = float(item.find("volume").text) this[name] = (price, volume) index[timeperiod] = this return (date, index)
def _load_hadoop_config(self): if not self._hadoop_configs: self._hadoop_configs = dict() for file_name in os.listdir(self.local_hadoop_conf_dir): if not file_name.lower().endswith(".xml"): continue xml_doc = parse_xml(os.path.join(self.local_hadoop_conf_dir, file_name)) for property in xml_doc.getiterator("property"): name = property.find("name") if name is None or name.text is None: continue value = property.find("value") if value is None or value.text is None: continue self._hadoop_configs[name.text] = value.text
def _load_hadoop_config(self): if not self._hadoop_configs: self._hadoop_configs = dict() for file_name in os.listdir(self.local_hadoop_conf_dir): if not file_name.lower().endswith(".xml"): continue xml_doc = parse_xml( os.path.join(self.local_hadoop_conf_dir, file_name)) for property in xml_doc.getiterator("property"): name = property.find("name") if name is None or name.text is None: continue value = property.find("value") if value is None or value.text is None: continue self._hadoop_configs[name.text] = value.text
async def _parse_legends_file(self, file_path: str, parse_only: typing.List[str] = None): if not paths.exists(file_path): raise InternalError(f'{file_path} does not exist') self.context.logger.debug(f'Parsing {file_path}') tree = parse_xml(file_path) root = tree.getroot() for elem in root: # type: Element if elem.tag in self._parsers and (parse_only is None or elem.tag in parse_only): self.context.logger.debug(f'Parsing {elem.tag}') await self._parsers[elem.tag].parse(elem) else: self.context.logger.debug(f'Not parsing {elem.tag}') self.context.logger.debug(f'Done parsing {file_path}')
def exists(self, prefix=''): result = yield from self._request(Request( "GET", "/", {'prefix': prefix, 'separator': '/', 'max-keys': '1'}, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) return any(map(Key.from_xml, x.findall('s3:Contents', namespaces=NS)))
def getWeather(self, place, unit): if place == 'here': place = self._getLocation() WOEID = self._getWOEID(place) url = 'http://weather.yahooapis.com/forecastrss?w=' + WOEID + '&u=' + unit response = parse_xml(urlopen(url).read()) title = response.find('channel/description').text[7:] + ' on ' + response.find('channel/item/pubDate').text + ':\n' raw = response.find('channel/item/description').text start = 'Forecast:</b><BR />' end = '<a href' weather = raw[raw.find(start) + len(start): raw.find(end) - 1].replace('<br />', '')[:-1] return title + weather
def from_bytes(status, body): if not body: raise RuntimeError("HTTP Error {}".format(status)) try: xml = parse_xml(body) except ParseError: raise RuntimeError(body) code_el = xml.find("Code") if code_el is None or not code_el.text: raise RuntimeError(body) class_name = code_el.text try: cls = globals()[class_name] except KeyError: raise RuntimeError("Error {} is unknown".format(class_name)) msg = xml.find("Message") return cls(class_name if msg is None else msg.text)
def list(self, prefix='', max_keys=1000): result = yield from self._request(Request( "GET", "/", {'prefix': prefix, 'max-keys': str(max_keys)}, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) if x.find('s3:IsTruncated', namespaces=NS).text != 'false': raise AssertionError( "File list is truncated, use bigger max_keys") return list(map(Key.from_xml, x.findall('s3:Contents', namespaces=NS)))
def parse_file(file: IO) -> Dict[int, instructions.InstructionBase]: """ Nacteni XML dat z datoveho proudu a zpracovani. Perameters ---------- file: IO Vstupni datovy proud. Returns ------- Dict[int, instructions.InstructionBase] Serazeny slovnik, kde klicem bude obsah XML atributu order. """ try: xml_data = parse_xml(file).getroot() return InstructionsParser.parse(xml_data) except ParseError: exit_app(exitCodes.INVALID_XML_FORMAT, 'Invalid XML format.', True)
def exists(self, prefix=''): result = yield from self._request( Request( "GET", "/", { 'prefix': prefix, 'separator': '/', 'max-keys': '1' }, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) return any(map(Key.from_xml, x.findall('s3:Contents', namespaces=NS)))
def list(self, prefix='', max_keys=1000): result = yield from self._request( Request( "GET", "/", { 'prefix': prefix, 'max-keys': str(max_keys) }, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) if x.find('s3:IsTruncated', namespaces=NS).text != 'false': raise AssertionError("File list is truncated, use bigger max_keys") return list(map(Key.from_xml, x.findall('s3:Contents', namespaces=NS)))
def from_bytes(status, body): if not body: # sometimes Riak CS doesn't have response body :( # TODO(tailhook) maybe use status to create specific error? raise RuntimeError("HTTP Error {}".format(status)) try: xml = parse_xml(body) except ParseError: raise RuntimeError(body) code_el = xml.find("Code") if code_el is None or not code_el.text: raise RuntimeError(body) class_name = code_el.text try: cls = globals()[class_name] except KeyError: raise RuntimeError("Error {} is unknown".format(class_name)) msg = xml.find("Message") return cls(class_name if msg is None else msg.text)
def commit(self): if self._done: raise RuntimeError("Can't commit twice or after close") self._done = True data = xml_tostring(self.xml) result = yield from self.bucket._request(Request("POST", '/' + self.key, { 'uploadId': self.upload_id, }, headers={ 'CONTENT-LENGTH': str(len(data)), 'HOST': self.bucket._host, 'CONTENT-TYPE': 'application/xml', }, payload=data)) try: xml = yield from result.read() if result.status != 200: raise errors.AWSException.from_bytes(result.status, xml) xml = parse_xml(xml) return xml.find('s3:ETag', namespaces=NS) finally: result.close()
def get_weather(self, place, unit): """Get Weather information. Use the argument 'place' to say which city you want to know about. if the place argument is "here", it'll fetch your place from your IP. """ if place == 'here': place = self._get_location() woeid = self._get_woeid(place) url = 'http://weather.yahooapis.com/forecastrss?w=%s&u=%s' % (woeid, unit) response = parse_xml(urlopen(url).read()) title = response.find('channel/description').text[7:] + \ ' on ' + response.find('channel/item/pubDate').text + ':\n' raw = response.find('channel/item/description').text start = 'Forecast:</b><BR />' end = '<a href' weather = raw[raw.find(start) + len(start): raw.find(end) - 1].replace('<br />', '')[:-1] return title + weather
def update_version_file(versions_file_name, new_version_str): new_version = Version.parse(new_version_str) doc = parse_xml(versions_file_name) branch, = (branch for branch in doc.findall('branch') if int(branch.get('id')) == new_version.major) old_stable_version = Version.parse(branch.get('stable')) old_unstable_version = Version.parse(branch.get('unstable')) assert_upgrade(old_stable_version, new_version) if new_version.is_stable: branch.attrib['stable'] = str(new_version) if old_unstable_version is not None and old_unstable_version < new_version: del branch.attrib['unstable'] else: if old_unstable_version is not None: assert_upgrade(old_unstable_version, new_version) branch.attrib['unstable'] = str(new_version) doc.write(open(versions_file_name, "w"))
def push_upstream(api_proxy_base, api_key, host_records): ''' Post replacement host records to NameCheap. Throw exceptions if a problem is found, otherwise return nothing. ''' hash = hash_host_records(map(format_csv_row, host_records)) form = dict( ApiKey=api_key, Command='namecheap.domains.dns.setHosts', ClientIp=get_proxy_ipaddr(api_proxy_base), # Hash record is the first record. HostName1='hosts-hash', RecordType1='TXT', Address1=hash, MXPref1=0, TTL1=300 ) form.update(api_defaults) for (record, number) in zip(host_records, count(2)): form.update({ 'HostName{:d}'.format(number): record['Host'], 'RecordType{:d}'.format(number): record['Type'], 'Address{:d}'.format(number): record['Value'], 'MXPref{:d}'.format(number): record['MXPref'] or '0', 'TTL{:d}'.format(number): record['TTL'] }) posted = post(api_proxy_base, data=form) tree = parse_xml(posted.content) for el in tree.iter('{http://api.namecheap.com/xml.response}Error'): raise ValueError('Upstream API error: {}'.format(el.text)) if posted.status_code not in range(200, 299): raise Exception('Bad response status {}'.format(posted.status_code))
def upload_multipart(self, key, content_type='application/octed-stream', MultipartUpload=MultipartUpload): """Upload file to S3 by uploading multiple chunks""" if isinstance(key, Key): key = key.key result = yield from self._request(Request("POST", '/' + key, {'uploads': ''}, { 'HOST': self._host, 'CONTENT-TYPE': content_type, }, payload=b'')) try: if result.status != 200: xml = yield from result.read() raise errors.AWSException.from_bytes(result.status, xml) xml = yield from result.read() upload_id = parse_xml(xml).find('s3:UploadId', namespaces=NS).text assert upload_id, xml return MultipartUpload(self, key, upload_id) finally: result.close()
def format_style_sheet(xml_doc, layer_name, element_name, element_attrs, element_text, target_xml): # formats template XML stylesheet during runtime. et = parse_xml(xml_doc) # parsing XML document for i in et.getroot().findall('Layer'): # iterates over all available layers in XML and selects those which is mentioned in layer_name parameter if (not layer_name): break # if layer_name is exhausted, just break loop if (i.attrib.get('name') in layer_name): # remove that layer which is found and to be processed layer_name.remove(i.attrib.get('name')) target_elem = i.find('Datasource') # this is our target element if (not target_elem): # if something goes wrong with target Layer Name, denote failure return False # creating a new element, which is child of our target `Datasource` element subelement = SubElement(target_elem, element_name) subelement.text = element_text # setting text for newly added element subelement.set(*element_attrs) # sets attribute of new element if (exists(target_xml)): unlink(target_xml) et.write(target_xml) # writes to another XML stylesheet return True
def read_next(): nonlocal final, marker result = yield from self._request(Request( "GET", "/", {'prefix': prefix, 'max-keys': str(max_keys), 'marker': marker}, {'HOST': self._host}, b'', )) data = (yield from result.read()) if result.status != 200: raise errors.AWSException.from_bytes(result.status, data) x = parse_xml(data) result = list(map(Key.from_xml, x.findall('s3:Contents', namespaces=NS))) if(x.find('s3:IsTruncated', namespaces=NS).text == 'false' or len(result) == 0): final = True else: marker = result[-1].key return result
def commit(self): if self._done: raise RuntimeError("Can't commit twice or after close") self._done = True data = xml_tostring(self.xml) result = yield from self.bucket._request( Request("POST", '/' + self.key, { 'uploadId': self.upload_id, }, headers={ 'CONTENT-LENGTH': str(len(data)), 'HOST': self.bucket._host, 'CONTENT-TYPE': 'application/xml', }, payload=data)) try: xml = yield from result.read() if result.status != 200: raise errors.AWSException.from_bytes(result.status, xml) xml = parse_xml(xml) return xml.find('s3:ETag', namespaces=NS) finally: result.close()
def main(argv): try: (sFile) = smr.loadArgs(argv, 'calls') sFilePrefix = sFile aColumns = [ 'year', 'mon', 'day', 'time', 'dur(m)', 'contact', 'phone', 'type', 'timestamp' ] out_fname = 'z_{}.tsv'.format(sFilePrefix) tree = parse_xml(sFile) calls = tree.getroot() pc("""Backup info: File: {} count={} backup_set={} backup_date={}""".format(sFile, calls.get('count'), calls.get('backup_set'), calls.get('backup_date'))) rows = [] iC = 0 for m in calls.findall('call'): iC = iC + 1 # pc('type(m) = {}', type(m)) # pc('m = {}', smr.dump(m, True)) iType = smr.aget("m", m, 'type', req=True, toint=True) if iType == 1: sType = "1" elif iType == 2: sType = "2" elif iType == 3: sType = "3" elif iType == 5: sType = "5" else: raise Exception("Unexpected type {} in xml: {}".format( iType, smr.dump(m))) sPhone = smr.fixPhone(smr.aget("m", m, 'number', req=True)) d = smr.dateFromTimestamp(smr.aget("m", m, 'date', req=True)) rows.append([ d.year, d.month, d.day, d.strftime('%H:%M:%S'), round( smr.aget("m", m, 'duration', req=True, toint=True) / 60, 1), smr.aget("m", m, 'contact_name', req=True), sPhone, sType, d.strftime('%y%m%d-%H%M%S') ]) pc("calls found: {}", iC) # Sort rows = sorted(rows, key=lambda r: (r[0], r[1], r[2], r[3])) fTmp = open(out_fname, 'w', newline='') bNewFile = True wri = csv.writer(fTmp, delimiter='\t') wri.writerow(aColumns) for r in rows: wri.writerow(r) fTmp.close() pc("Data written to: {}".format(out_fname)) pc('.: done :.') pc('.') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() sTB = '\n'.join(traceback.format_tb(exc_traceback)) pc("Fatal exception: {}\n - msg: {}\n stack: {}".format( exc_type, exc_value, sTB))
def parse_specdata(source, doc_name, doc_type, doc_version): """ Reads a schema specification from a file (e.g., specdata.xml) or file-like object, and returns a tuple containing: * a mapping of class names to Element subclasses * a Document subclass :arg source: the file or file-like object :type source: str or file-like object :arg schema_name: the name of the schema :type schema_name: str :returns: tuple """ tree = parse_xml(source) elements = {} globals = [] def child_elements(parent_level, element_list, upper_recursive=None): children = [] while element_list: raw_element = element_list[0] raw_attrs = raw_element.attrib element_level = int(raw_attrs['level']) is_global = False if element_level == -1: is_global = True elif parent_level is not None and not element_level > parent_level: break element_list = element_list[1:] element_name = '%sElement' % raw_attrs.get( 'cppname', raw_attrs.get('name')).translate(None, '-') element_attrs = { '__module__': None, 'id': int(raw_attrs['id'], 0), 'name': raw_attrs['name'], 'type': SPECDATA_TYPES[raw_attrs['type']], 'mandatory': True if raw_attrs.get('mandatory', False) == '1' else False, 'multiple': True if raw_attrs.get('multiple', False) == '1' else False } try: element_attrs['default'] = { INT: lambda default: int(default), UINT: lambda default: int(default), FLOAT: lambda default: float(default), STRING: lambda default: str(default), UNICODE: lambda default: unicode(default) }.get(element_attrs['type'], lambda default: default)(raw_attrs['default']) except (KeyError, ValueError): element_attrs['default'] = None element = type(element_name, (Element, ), element_attrs) elements[element_name] = element recursive = [] if upper_recursive: recursive.extend(upper_recursive) if raw_attrs.get('recursive', False) == '1': recursive.append(element) element_children, element_list = child_elements( element_level if not is_global else 0, element_list, recursive) element_children += tuple(recursive) element.children = element_children if is_global: globals.append(element) else: children.append(element) return tuple(children), element_list children = child_elements(None, tree.getroot().getchildren())[0] document_attrs = { '__module__': None, 'type': doc_type, 'version': doc_version, 'children': children, 'globals': tuple(globals) } document = type(doc_name, (Document, ), document_attrs) return elements, document
def get(baseurl, args): url = "%s?%s" % (baseurl, urllib.urlencode(args)) data = urllib.urlopen(url).read() return parse_xml(data)
# In[9]: def _voc_cls_name_to_idx(name): idx = dict(list(zip(VOC_CLASSES, list(range(len(VOC_CLASSES))))))[name] return idx sample_id = '000330' # Read the image file. img = cv2.imread(osp.join(DATASET_DIR, 'JPEGImages/{}.jpg'.format(sample_id))) # Read and parse the corresponding annotation file. anno = parse_xml(osp.join(DATASET_DIR, 'Annotations/{}.xml'.format(sample_id))).getroot() gt_boxes = [] for obj in anno.findall('object'): bndbox = obj.find('bndbox') bbox = [ float(bndbox.find('xmin').text) - 1, float(bndbox.find('ymin').text) - 1, float(bndbox.find('xmax').text) - 1, float(bndbox.find('ymax').text) - 1, _voc_cls_name_to_idx(obj.find('name').text) ] gt_boxes.append(bbox) gt_boxes = np.array(gt_boxes, dtype=np.float32) # Resize the image and rescale the GT boxes accordingly. img_size_min = np.min(img.shape[:2])
def parse_specdata(source, doc_name, doc_type, doc_version): """ Reads a schema specification from a file (e.g., specdata.xml) or file-like object, and returns a tuple containing: * a mapping of class names to Element subclasses * a Document subclass :arg source: the file or file-like object :type source: str or file-like object :arg schema_name: the name of the schema :type schema_name: str :returns: tuple """ tree = parse_xml(source) elements = {} globals = [] def child_elements(parent_level, element_list, upper_recursive=None): children = [] while element_list: raw_element = element_list[0] raw_attrs = raw_element.attrib element_level = int(raw_attrs['level']) is_global = False if element_level == -1: is_global = True elif parent_level is not None and not element_level > parent_level: break element_list = element_list[1:] element_name = '%sElement' % raw_attrs.get('cppname', raw_attrs.get('name')).translate(None, '-') element_attrs = { '__module__': None, 'id': int(raw_attrs['id'], 0), 'name': raw_attrs['name'], 'type': SPECDATA_TYPES[raw_attrs['type']], 'mandatory': True if raw_attrs.get('mandatory', False) == '1' else False, 'multiple': True if raw_attrs.get('multiple', False) == '1' else False } try: element_attrs['default'] = { INT: lambda default: int(default), UINT: lambda default: int(default), FLOAT: lambda default: float(default), STRING: lambda default: str(default), UNICODE: lambda default: unicode(default) }.get(element_attrs['type'], lambda default: default)(raw_attrs['default']) except (KeyError, ValueError): element_attrs['default'] = None element = type(element_name, (Element,), element_attrs) elements[element_name] = element recursive = [] if upper_recursive: recursive.extend(upper_recursive) if raw_attrs.get('recursive', False) == '1': recursive.append(element) element_children, element_list = child_elements(element_level if not is_global else 0, element_list, recursive) element_children += tuple(recursive) element.children = element_children if is_global: globals.append(element) else: children.append(element) return tuple(children), element_list children = child_elements(None, tree.getroot().getchildren())[0] document_attrs = { '__module__': None, 'type': doc_type, 'version': doc_version, 'children': children, 'globals': tuple(globals) } document = type(doc_name, (Document,), document_attrs) return elements, document
def main(argv): try: (sFile) = smr.loadArgs(argv, "sms") sFilePrefix = sFile # First lets write SMS aColumns = ['type', 'year', 'mon', 'day', 'time', 'contact', 'phone', 'dir', 'body', 'timestamp'] out_fname = 'z_{}.tsv'.format(sFilePrefix) tree = parse_xml(sFile) smses = tree.getroot() rows = []; iC = 0 for m in smses.findall('sms'): iC = iC + 1 # pc('type(m) = {}', type(m)) # pc('m = {}', smr.dump(m, True)) iType = smr.aget("m", m, 'type', req=True, toint=True) if iType == 1: sType = "in" elif iType == 2: sType = "out" else: raise Exception("Unexpected type {} in xml: {}".format(iType, smr.dump(m))) sPhone = smr.fixPhone(smr.aget("m", m, 'address', req=True)) d = smr.dateFromTimestamp(smr.aget("m", m, 'date', req=True)) rows.append([ 'sms' ,d.year, d.month, d.day, d.strftime('%H:%M:%S') ,smr.aget("m", m, 'contact_name', req=True) ,sPhone ,sType ,smr.aget("m", m, 'body', req=True) ,d.strftime('%y%m%d-%H%M%S') ]) pc("sms records found: {}", iC) iC = 0 for m in smses.findall('mms'): iC = iC + 1 # pc('type(m) = {}', type(m)) # pc('m = {}', smr.dump(m, True)) iType = smr.aget("m", m, 'type', req=True, toint=True) if iType == -1: sType = "?" else: raise Exception("Unexpected type {} in xml: {}".format(iType, smr.dump(m))) sPhone = smr.fixPhone(smr.aget("m", m, 'address', req=True)) sMsg = "" for part in m.findall('part'): # for part in m.find('parts').getchildren(): iSeq = smr.aget("part", part, 'seq', req=True, toint=True) if iSeq == 0: sMsg = smr.aget("part", part, 'text', req=True) d = smr.dateFromTimestamp(smr.aget("m", m, 'date', req=True)) rows.append([ 'mms' ,d.year, d.month, d.day, d.strftime('%H:%M:%S') ,smr.aget("m", m, 'contact_name', req=True) ,sPhone ,sType ,sMsg ,d.strftime('%y%m%d-%H%M%S') ]) pc("mms records found: {}", iC) # Sort rows = sorted(rows, key=lambda r: (r[1], r[2], r[3], r[4]) ) fTmp = open(out_fname,'w', newline='') bNewFile=True wri = csv.writer(fTmp, delimiter='\t') wri.writerow(aColumns) for r in rows: wri.writerow(r) fTmp.close() pc("starting argv: {}".format(argv)) pc("Data written to: {}".format(out_fname)) pc('.: done :.') pc('.') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() sTB = '\n'.join(traceback.format_tb(exc_traceback)) pc("Fatal exception: {}\n - msg: {}\n stack: {}".format(exc_type, exc_value, sTB))
def dlcs_parse_xml(data, split_tags=False): """Parse any del.icio.us XML document and return Python data structure. Recognizes all XML document formats as returned by the version 1 API and translates to a JSON-like data structure (dicts 'n lists). Returned instance is always a dictionary. Examples:: {'posts': [{'url':'...','hash':'...',},],} {'tags':['tag1', 'tag2',]} {'dates': [{'count':'...','date':'...'},], 'tag':'', 'user':'******'} {'result':(True, "done")} # etcetera. """ # TODO: split_tags is not implemented if DEBUG > 3: print("dlcs_parse_xml: parsing from ", data, sys.stderr) if not hasattr(data, 'read'): data = StringIO(data) doc = parse_xml(data) root = doc.getroot() fmt = root.tag # Split up into three cases: Data, Result or Update if fmt in ('tags', 'posts', 'dates', 'bundles'): # Data: expect a list of data elements, 'resources'. # Use `fmt` (without last 's') to find data elements, elements # don't have contents, attributes contain all the data we need: # append to list elist = [el.attrib for el in doc.findall(fmt[:-1])] # Return list in dict, use tagname of rootnode as keyname. data = {fmt: elist} # Root element might have attributes too, append dict. data.update(root.attrib) return data elif fmt == 'result': # Result: answer to operations if 'code' in root.attrib: msg = root.attrib['code'] else: msg = root.text # XXX: Return {'result':(True, msg)} for /known/ O.K. messages, # use (False, msg) otherwise. Move this to DeliciousAPI? v = msg in DLCS_OK_MESSAGES return {fmt: (v, msg)} elif fmt == 'update': # Update: "time" return {fmt: { 'time': time.strptime(root.attrib['time'], ISO_8601_DATETIME)}} else: raise PyDeliciousException("Unknown XML document format '%s'" % fmt)
import os import datetime from tomlkit import comment from tomlkit import document from tomlkit import nl from tomlkit import table from tomlkit import integer as toml_int from tomlkit.toml_file import TOMLFile from xml.etree.ElementTree import parse as parse_xml from lib import toml_file_header xml_file = os.path.join(os.path.dirname(__file__), "..", "srcdata", "stm32_db", "mcu", "families.xml") xml_tree = parse_xml(xml_file) xml_root = xml_tree.getroot() toml_file = os.path.join(os.path.dirname(__file__), "..", "data", "mcu_families", "stm.toml") toml = TOMLFile(toml_file) doc = document() toml_file_header(doc) for xml_family in xml_root.findall("./Family"): family = table() family.add("name", xml_family.get("Name")) family.add("object_type", "family") for xml_subfamily in xml_family.findall("./SubFamily"):