def main(): corpus_path = os_getcwd() + "\\Data" output_path = os_getcwd() + "\\postings" stemming = True queries = os_getcwd() + "\\queries.txt" num_doc_to_retrieve = 2000 corpus_size, average_length = reconstruct_from_postings(output_path, stemming)
def cached_file(path: str, root: str = os_getcwd(), max_age: int = 38400) -> Union[bytes, Response]: """ Load a static file, but use the in-memory cache to avoid roundtrips to disk. """ return static_file(path, root, max_age)
def process_cvs_command(self) -> str: """Processes input arguments Returns info message in case of success """ if self._cvs_command not in self._CVS_COMMANDS: raise ConsoleInputError('Unknown command') elif self._cvs_command == 'init': self._changes_codec.cvs_init() # WouldBeBetter: Change 'os_getcwd' to specific dir return ("Initialized Edu-cvs repository in " + os_getcwd()) elif self._cvs_command == 'add': if len(self._cvs_command_args) == 0: raise ConsoleInputError('No input for "add"') if len(self._cvs_command_args) > 1: raise ConsoleInputError('add: multiple files') cvs_add_result: Tuple[str, str] = (self._changes_codec.cvs_add( self._cvs_command_args[0])) return '\n'.join( [item[0] + ': ' + item[1] for item in [cvs_add_result]]) raise CVSException('[_cvs_command] switch was passed')
def test_init(self): cvs: ControlVersionSystem = ControlVersionSystem(['init', '123']) self.assertEqual("Initialized Edu-cvs repository in " + os_getcwd(), cvs.process_cvs_command()) shutil_rmtree(cvs._changes_codec._CVS_DIR_PATH)
def cleStart(): ## base from json import load as j_loads v_lst = ("p2p_packet_info.txt", "packet_info.txt", "profile.txt", "stdout", "syslog", "syserr", "usage.txt", "VERSION.txt", "DEV_LOG.log", "mob_count", "*.core") szPWD = os_getcwd() ## clear files from alog with open("clear.list", "r") as fList: mList = j_loads(fList) for dic1 in mList: # goto alog path # print dic1["path"] os_chdir(dic1["path"]) # clean files fShell("cat /dev/null > PTS") fShell("rm -rf log/* cores/*") # goto base again os_chdir(szPWD) ## clean other logs with open("start.list", "r") as fList: mList = j_loads(fList) for dic1 in mList: # goto alog path # print dic1["path"] os_chdir(dic1["path"]) fShell("echo --- delete inside '%s' ---" % dic1["path"]) fShell("rm -fv %s" % " ".join(v_lst)) # goto base again os_chdir(szPWD)
def __init__(self, tela, design, idioma, interface_idioma, icon): self.icon = icon self.interface_idioma = interface_idioma self.tp_atualizacao = None self.idioma = idioma self.design = design self.tela = tela # Destino dos Downloads e Backups data = str(datetime.now()).split('.')[0] data = data.replace(' ', '-') data = data.replace(':', '-') dest_download = os_path.join(os_getcwd(), 'AtualizarSafira') dest_backup = os_path.join(os_getcwd(), 'backups', data) # Instância de Upgrades self.up = Upgrade.Upgrade(dest_download, dest_backup)
def __str__(self): py_prefix1 = f"{Color(self.py_prefix_color)}{self.py_prefix_text}{Color().NONE}" if self.py_version_enable: for item in self.search_f: if exists(join(os_getcwd(), item)) or "VIRTUAL_ENV" in os_environ: return str((f"{separator(self.config)}{py_prefix1}" f"{Color(self.py_color)}{self.py_symbol}" f"{self.get_version()}{Color().NONE}")) return ""
def co_savings(cfg: Dict[str, Any]) -> Iterator[None]: """ Saves vsz, exports images and saves hdf5 log Corutine must receive: veusze: Veusz embedded object log: dict with parameters: 'out_name' - log's index, 'out_vsz_full' - vsz file name to save log parameters will be saved to pandas dataframe end then to hdf5 log cfg['program']['log'])[0]+'.h5' """ with pd.HDFStore(Path(cfg['program']['log']).with_suffix('.h5'), mode='a') as storeLog: veusze = None if __name__ != '__main__': path_prev = os_getcwd() os_chdir(cfg['out']['path'].parent) print('Saving to {}'.format(cfg['out']['path'].parent.absolute())) try: while True: veusze, log = yield () if not cfg['out']['b_images_only']: veusze.Save(str(log['out_vsz_full'])) # Save vsz modification date log['fileChangeTime'] = datetime.fromtimestamp( Path(log['out_vsz_full']).stat().st_mtime), dfLog = pd.DataFrame.from_records( log, exclude=['out_name', 'out_vsz_full'], index=[log['out_name']]) storeLog.append(Path(cfg['out']['path']).name, dfLog, data_columns=True, expectedrows=cfg['in']['nfiles'], index=False, min_itemsize={'index': 30}) if cfg['async']['loop']: try: # yield from asyncio.ensure_future( # asyncio.wait_for(, cfg['async']['export_timeout_s'], loop=cfg['async']['loop']) b = cfg['async']['loop'].run_until_complete( export_images_timed(veusze, cfg, '#' + log['out_name'])) except asyncio.TimeoutError: l.warning('can not export in time') else: export_images(veusze, cfg['out'], '#' + log['out_name']) except GeneratorExit: print('Ok>') finally: if __name__ != '__main__': os_chdir(path_prev) if veusze and cfg['program']['return'] != '<embedded_object>': veusze.Close() l.info('closing Veusz embedded object')
def static_file(filename: str, path: str = os_getcwd(), max_age: int = 38400) -> Union[bytes, Response]: """ Load static file from `path`, using `root` as the directory to start at. """ file_type, encoding = guess_type(filename) full_path = path_join(path, filename) try: with open(full_path, "rb") as file: stats = os_stat(full_path) last_mod = formatdate(stats.st_mtime, usegmt=True) return simple_response( file.read(), content_type=file_type, headers={ "Cache-Control": f"private, max-age={max_age}", "Last-Modified": last_mod, }, ) except FileNotFoundError as e: raise e
def run(): """ main function. """ # Fetch current working directory. cwd = os_getcwd() # Remove potential trailing slashes. while cwd[-1:] == '/': cwd = cwd[:-1] # Add exactly one trailing slash. cwd = cwd + '/' # Assert a known hardwired location for the runtime of this # script... (it's just for myself for now, so it's OK). try: assert cwd.endswith('/amara/utils/grammar/') except AssertionError as ae: print(cwd) raise ae do()
str(path_db), # name of hdf5 pandas store where is log table #min_time, max_time: datetime, optional, allows range table_log rows 'table_log': f'/{device}/logRuns', # str: name of log table - table with intervals: 'pattern_path': path_cruise / device / '~pattern~.vsz' } f_row = lambda r: [ '{Index:%y%m%d_%H%M}-{DateEnd:%H%M}.vsz'.format_map(r), bytes( "time_range = ['{:%Y-%m-%dT%H:%M:%S}', '{:%Y-%m-%dT%H:%M:%S}']". format(r['Index'], r['DateEnd'] + pd.Timedelta(300, "s")), 'utf-8') ] pattern_code = cfg_in['pattern_path'].read_bytes() #encoding='utf-8' from os import getcwd as os_getcwd, chdir as os_chdir path_prev = os_getcwd() argv_prev = sys.argv os_chdir(cfg_in['pattern_path'].parent) for filename, str_expr in h5log_names_gen(cfg_in, f_row): path_vsz = cfg_in['pattern_path'].with_name(filename) path_vsz.write_bytes( re.sub(rb'^([^\n]+)', str_expr, pattern_code, count=1)) veuszPropagate.main([ 'ini/veuszPropagate.ini', '--path', str(cfg_in['pattern_path'].with_name( '??????_????-????.vsz')), #path_db), '--pattern_path', f"{cfg_in['pattern_path']}_", # here used to auto get export dir only. must not be not existed file path
def main(): """Entrypoint of GHDL's Language Protocol Server.""" logger = getLogger(__loggerName) parser = _generateCLIParser() args = parser.parse_args() if args.disp_config: errorout_console.Install_Handler() libghdl.disp_config() print("python:") print("sys.platform: {}, os.name: {}".format(sys.platform, os.name)) print(sys.version) return # Setup logging if args.verbose >= 2: loglevel = DEBUG elif args.verbose >= 1: loglevel = INFO else: loglevel = ERROR if args.log_file: __rotate_log_files(args.log_file, 5) logstream = open(args.log_file, "w") else: logstream = sys_stderr basicConfig( format="%(asctime)-15s [%(levelname)s] %(message)s", stream=logstream, level=loglevel, ) if args.verbose != 0: sys_stderr.write("Args: {}\n".format(sys_argv)) sys_stderr.write("Current directory: {}\n".format(os_getcwd())) logger.info("Args: %s", sys_argv) logger.info("Current directory is %s", os_getcwd()) # Connection instream = sys_stdin.buffer if args.input is not None: instream = open(args.input, "rb") conn = LSPConn(instream, sys_stdout.buffer) trace_file = args.trace_file if trace_file is None: trace_file = os_environ.get("GHDL_LS_TRACE") if trace_file is not None: if args.input is None: __rotate_log_files(trace_file + ".in", 5) __rotate_log_files(trace_file + ".out", 5) conn = LSPConnTrace(trace_file, conn) else: logger.info("Traces disabled when -i/--input") handler = VhdlLanguageServer() try: server = LanguageProtocolServer(handler, conn) server.run() except Exception: logger.exception("Uncaught error") sys_exit(1)
def homeSourceFilter(communityName, communityLink): """ // 小区房源基本信息存储list """ addresses = [] floods = [] followInfoes = [] """ // 获取首页 """ res = requests_get(communityLink, timeout=(20, 60)) soup = BeautifulSoup(res.text, 'lxml') """ // 获取分页信息 """ if len(soup.select('div[class="page-box house-lst-page-box"]')) == 0: print("{0}:未找到分页信息".format(communityName)) raise Exception("未找到分页信息") else: page_data = soup.select( 'div[class="page-box house-lst-page-box"]')[0]['page-data'] page_data = eval(page_data) totalPage = page_data["totalPage"] """ // 分页处理 // 涵盖只有单页的情况 """ for pageIndex in range(1, totalPage + 1): if (pageIndex > 1): insertIndex = communityLink.rfind('/', 0, -2) pageLink = communityLink[:insertIndex + 1] + 'pg' + str( pageIndex) + communityLink[insertIndex + 1:] res = requests_get(pageLink, timeout=(20, 60)) soup = BeautifulSoup(res.text, 'lxml') """ // 通过li标签定位房源 // 每个房源有三个class属性存有所需信息: address, flood, followInfo """ houseResourceList = soup.select('li[class="clear"]') for houseResource in houseResourceList: """ // address """ address = houseResource.find_all('div', class_="address") """ // flood """ flood = houseResource.find_all('div', class_="flood") """ // followInfo """ followInfo = houseResource.find_all('div', class_="followInfo") addresses.append("".join(list(address[0].stripped_strings))) floods.append("".join(list(flood[0].stripped_strings))) followInfoes.append("".join(list(followInfo[0].stripped_strings))) allHomeSourceInfo = DataFrame({ "address": addresses, "flood": floods, "followInfo": followInfoes }) allHomeSourceInfo.to_csv(os_getcwd() + "\\" + "AllCommunity\\" + communityName + ".csv", encoding='gbk')
def main(new_arg=None, veusze=None, **kwargs): """ Initialise configuration and runs or returns routines cfg: ['program']['log'], 'out' 'in' 'async' globals: load_vsz l :param new_arg: :param veusze: used to reuse veusz embedded object (thus to not leak memory) :return: """ global l, load_vsz cfg = cfg_from_args(my_argparser(), new_arg, **kwargs) if not cfg or not cfg['program'].get('return'): print('Can not initialise') return cfg elif cfg['program']['return'] == '<cfg_from_args>': # to help testing return cfg l = init_logging(logging, None, cfg['program']['log'], cfg['program']['verbose']) cfg['program']['log'] = l.root.handlers[ 0].baseFilename # sinchronize obtained absolute file name print('\n' + this_prog_basename(__file__), 'started', end=' ') __name__ = '__main__' # indicate to other functions that they are called from main if cfg['out'].get('paths'): if not cfg['out']['b_images_only']: raise NotImplementedError( 'Provided out in not "b_images_only" mode!') cfg['out']['nfiles'] = len(cfg['out']['paths']) cfg['out']['path'] = cfg['out']['paths'][0] print( end=f"\n- {cfg['out']['nfiles']} output files to export images...") pass else: if cfg['out']['b_images_only']: print( 'in images only mode. Output pattern: ') # todo Export path: ' else: print('. Output pattern and Data: ') try: # Using cfg['out'] to store pattern information if not Path(cfg['in']['pattern_path']).is_absolute(): cfg['in']['pattern_path'] = Path(cfg['in']['path']).with_name( str(cfg['in']['pattern_path'])) cfg['out']['path'] = cfg['in']['pattern_path'] cfg['out']['paths'], cfg['out']['nfiles'], cfg['out'][ 'path'] = init_file_names(**cfg['out'], b_interact=False) except Ex_nothing_done as e: if not cfg['out']['b_images_only']: l.warning( f'{e.message} - no pattern. Specify it or use "b_images_only" mode!' ) return # or raise FileNotFoundError? if (cfg['out']['b_images_only'] and cfg['out']['paths']): cfg['in']['paths'] = cfg['out']['paths'] # have all we need to export else: try: cfg['in']['paths'], cfg['in']['nfiles'], cfg['in'][ 'path'] = init_file_names(**cfg['in'], b_interact=cfg['program'] ['b_interact']) except Ex_nothing_done as e: print(e.message) return # or raise FileNotFoundError? except TypeError: # expected str, bytes or os.PathLike object, not NoneType # cfg['in']['path'] is None. May be it is not need cfg['in']['paths'] = [cfg['in']['pattern_path'] ] # dummy for compatibility cfg['in']['nfiles'] = 1 cfg['out']['export_dir'] = dir_from_cfg(cfg['out']['path'].parent, cfg['out']['export_dir']) if 'restore_config' in cfg['program']['before_next']: cfg['in_saved'] = cfg['in'].copy() # Next is commented because reloading is Ok: not need to Close() # if cfg['out']['b_images_only'] and not 'Close()' in cfg['program']['before_next']: # cfg['program']['before_next'].append( # 'Close()') # usually we need to load new file for export (not only modify previous file) if cfg['program']['export_timeout_s'] and export_images_timed: cfg['async'] = { 'loop': asyncio.get_event_loop(), 'export_timeout_s': cfg['program']['export_timeout_s'] } else: cfg['async'] = {'loop': None} load_vsz = load_vsz_closure(cfg['program']['veusz_path'], cfg['program']['load_timeout_s'], cfg['program']['b_execute_vsz']) cfg['load_vsz'] = load_vsz cfg['co'] = {} if cfg['in']['table_log'] and cfg['in']['path'].suffix == '.h5' and not ( cfg['out']['b_images_only'] and len(cfg['in']['paths']) > 1): # load data by ranges from table log rows cfg['in']['db_path'] = cfg['in']['path'] in_fulls = h5log_names_gen(cfg['in']) elif cfg['in']['tables']: # tables instead files in_fulls = ge_names_from_hdf5_paths(cfg) else: # switch to use found vsz as source if need only export images (even with database source) in_fulls = ge_names(cfg) cor_savings = co_savings(cfg) cor_savings.send(None) nfiles = 0 try: # if True: path_prev = os_getcwd() os_chdir(cfg['out']['path'].parent) if cfg['program']['return'] == '<corutines_in_cfg>': cfg['co']['savings'] = cor_savings cfg['co']['gen_veusz_and_logs'] = load_to_veusz(in_fulls, cfg) cfg['co']['send_data'] = co_send_data(load_to_veusz, cfg, cor_savings) return cfg # return with link to generator function elif cfg['in'].get('data_yield_prefix'): # Cycle with obtaining Veusz data cfgin_update = None while True: # for vsz_data, log in cor_send_data.send(cfgin_update): try: vsz_data, log = co_send_data.send(cfgin_update) nfiles += 1 except (GeneratorExit, StopIteration, Ex_nothing_done): break if 'f_custom_in_cycle' in cfg['program']: cfgin_update = cfg['program']['f_custom_in_cycle']( vsz_data, log) else: # Cycle without obtaining Veusz data (or implemented by user's cfg['program']['f_custom_in_cycle']) for veusze, log in load_to_veusz(in_fulls, cfg, veusze): file_name_r = Path(log['out_vsz_full']).relative_to( cfg['out']['path'].parent) if cfg['program'].get('f_custom_in_cycle'): cfgin_update = cfg['program']['f_custom_in_cycle'](veusze, log) veusze_commands(veusze, cfgin_update, file_name_r) cor_savings.send((veusze, log)) nfiles += 1 cor_savings.close() if cfg['program']['return'] != '<embedded_object>': veusze = None # to note that it is closed in cor_savings.close() print(f'{nfiles} processed. ok>') pass except Exception as e: l.exception('Not good') return # or raise FileNotFoundError? finally: if cfg['async']['loop']: cfg['async']['loop'].close() os_chdir(path_prev) if veusze and cfg['program']['return'] == '<end>': veusze.Close() veusze.WaitForClose() veusze = None elif cfg['program']['return'] == '<embedded_object>': cfg['veusze'] = veusze return cfg
def load_vsz( vsz: Union[str, PurePath, None] = None, veusze: Optional[str] = None, prefix: Optional[str] = None, suffix_prior: Optional[str] = '_fbot' ) -> Tuple[veusz.Embedded, Optional[Dict[str, Any]]]: """ Load (create) specifid data from '*.vsz' files :param vsz: full name of vsz or None. If not None and such file not found then create it :param veusze: veusz.Embedded object or None - will be created if None else reused :param prefix: only data started with this prefix will be loaded :param suffix_prior: high priority names suffix, removes other version of data if starts same but with no such suffix (see veusz_data()) :return: (veusze, vsz_data): - veusze - veusz.Embedded object - vsz_data - loaded data if prefix is not None else None """ if vsz is None: file_exists = False if veusze is None: title = 'empty' l.debug('new embedded window') else: l.debug('keep same embedded window') else: # isinstance(vsz, (str, PurePath)): vsz = Path(vsz) file_exists = vsz.is_file() if file_exists: l.debug(f'loading found vsz: {vsz}') title = f'{vsz} - was found' else: l.debug(f'creatig vsz: {vsz}') title = f'{vsz} - was created' if veusze is None: # Veusz embedded window construction # Save right path in veusz.Embedded (closure) if __name__ != '__main__': # if this haven't done in main() path_prev = os_getcwd() # to recover os_chdir( vsz.parent ) # allows veusze.Load(path) to work if _path_ is relative or relative paths is used in vsz veusze = veusz.Embedded(title) # , hidden=True # veusze.EnableToolbar() # veusze.Zoom('page') if __name__ != '__main__': os_chdir(path_prev) # recover if file_exists: if not b_execute_vsz: if load_timeout_s: # veusze.Load(str(vsz.name)) with timeout: # not tried veusze.serv_socket.settimeout(60) SingletonTimeOut.run(partial(veusze.Load, str(vsz.name)), load_timeout_s) sleep(1) else: veusze.Load(vsz.name) else: def load_by_exec(vsz, veusze): """ Unsafe replasement for veusze.Load(vsz) to add variable argv Runs any python commands before 1st Title command of :param vsz: :return: """ with vsz.open(encoding='utf-8') as v: # comine pure python lines lines = [] for line in v: if line[:2].istitle(): break lines.append(line) # dangerous for unknown vsz but we allow 1 time at beginning of file: to use for known vsz loc_exclude = locals().copy() del loc_exclude['veusze'] loc = { 'argv': ['veusz.exe', str(vsz)], 'BASENAME': (lambda: vsz.stem) } # match = re.match exec('\n'.join(lines), {}, loc) loc.update(locals().copy()) for k in loc_exclude.keys(): del loc[k] basename_result = "'{}'".format(loc['BASENAME']()) # eval Veusz commands eval(f"""veusze.{line}""") for line in v: if 'BASENAME()' in line: line = line.replace( 'BASENAME()', basename_result ) # only this helps in Custom Definitions expressions # cmd, params = line.split('(', maxsplit=1) eval(f"""veusze.{line}""", {}, loc) # , {"__builtins__": {}} # from ast import literal_eval # params_dict = literal_eval(params.rsplit(')', maxsplit=1)[0]) # getattr(veusze, cmd)(**params_dict) return load_by_exec(vsz, veusze) if prefix is None: return veusze, None return veusze, veusz_data(veusze, prefix, suffix_prior)
def output2file(string2output,filename=os_getcwd()+'tempfile_from_output2file',mode=None): if mode==None: raise Exception("Please give a mode argument when calling this function (use mode='w' to overwrite file or mode='a' to append to file).") with open(filename,mode) as fout: fout.write(string2output)
def homeSourceDownload(): """ // 目标文件如果存在,则读取;否则,新建目标文件 """ if (path_exists(r"allCommunity.csv")): allCommunity = read_csv(r"allCommunity.csv", encoding='gbk') print("小区信息汇总文件已存在!") else: districtNameList, districtLinkList = districtFilter() districtCommunityDicList = communityFilter(districtNameList, districtLinkList) communityNameList = [] communityLinkList = [] for districtCommunityDic in districtCommunityDicList: for communityName, communityLink in districtCommunityDic.items(): communityNameList.append(communityName) communityLinkList.append(communityLink) allCommunity = DataFrame({ 'communityName': communityNameList, 'communityLink': communityLinkList }) allCommunity["descriptor"] = "No" allCommunity.to_csv(r"allCommunity.csv", encoding='gbk') print("小区信息汇总文件不存在,创建成功!") """ // 如果发生异常,输出异常,并进入下一个小区 """ communityNum = len(allCommunity) for communityIndex in range(communityNum): try: if (allCommunity["descriptor"][communityIndex] == "Yes"): print("{0}信息已下载".format( allCommunity["communityName"][communityIndex])) continue else: communityName = allCommunity["communityName"][communityIndex] communityLink = allCommunity["communityLink"][communityIndex] homeSourceListLink = mainpageLink2homeSourceListLink( communityLink) """ // 返回链接有可能为空,则创建同名空文件夹 """ if (homeSourceListLink): homeSourceFilter(communityName, homeSourceListLink) allCommunity["descriptor"][communityIndex] = "Yes" print("{0}下载完成".format(communityName)) else: DataFrame().to_csv(os_getcwd() + "\\" + "AllCommunity\\" + communityName + ".csv", encoding='gbk') allCommunity["descriptor"][communityIndex] = "Yes" print("{0}下载完成".format(communityName)) except Exception as e: if (communityIndex == communityNum - 1): allCommunity.to_csv(r"allCommunity.csv", encoding='gbk') print("全部下载完成") print("***************发生异常***************") print("当前处理小区名字:{0}".format( allCommunity["communityName"][communityIndex])) logging.exception(e) allCommunity.to_csv(r"allCommunity.csv", encoding='gbk') print("全部下载完成")
from sys import ( stderr as sys_stderr, stdout as sys_stdout, exit as sys_exit, ) config = ConfigParser.ConfigParser() with open('buildout.cfg') as conf: config.readfp(conf) bind_ip = config.get('mongodb','bind_ip') port = config.get('mongodb','port') mongodb_data_path = config.get('mongodb','dbpath') mongodb_log_path = config.get('mongodb','logpath') path_cwd = os_getcwd() # need to fix : all options from rod.recipe.mongodb are not handle mongod_proc = subprocess_Popen([ os_path_join(path_cwd, 'bin/mongod'), '--dbpath', os_path_join(path_cwd,mongodb_data_path.split('/',1)[1]), '--bind_ip', bind_ip, '--master', '--port', port, '--logpath', os_path_join(path_cwd,mongodb_log_path.split('/',1)[1]), '--directoryperdb', ]) time_sleep(3) sys_stdout.write('Process ID of MongoD %s' % (mongod_proc.pid))
request = self.build_xml('ListaProcedimenti', **kw) res = self.query_service('ListaProcedimenti', request) if self.testinfo and 'result' in res: res['length'] = len(res['result']['Procedimenti']) and \ len(res['result']['Procedimenti']['Procedimento']) return res def AttivaProcedimento(self, NumeroPratica, mittenti=[], allegati=[], **kw): """ Attivazione di un procedimento """ request = self.get_ProtocolloIn( mittenti = mittenti, allegati = allegati, NumeroPratica = NumeroPratica, **kw) return self.query_service('AttivaProcedimento', request) if __name__ == '__main__': import sys from os import getcwd as os_getcwd from os.path import join as os_join sys.path.append(os_join(os_getcwd(), 'src/gisweb.irideworkflow/gisweb/irideworkflow/iride/')) doc = prepare_xml_richiesta(tit_desindr='VIA ARNIER', tit_codfisc='NNNMRC65R12H294J', tit_dtnas=datetime(1965, 4, 2), int_annopr=1984) print doc2xml(doc, pprint=True)