def main(): bibupload = ChunkedBibUpload(mode='a', user=SCRIPT_NAME, notimechange=True) bibindex = ChunkedBibIndex(indexes='reportnumber', user=SCRIPT_NAME) # open url and parse xml source = sys.argv[1] tree = ET.parse(urllib.urlopen(source)) root = tree.getroot() for item in root.iter('article'): doi = item.get('doi') arxiv = item.get('preprint_id') recID = add_doi(arxiv) if recID: recID = recID[0] record_xml = append_doi(recID, doi) if record_xml: messages.append("Now we will run the bibupload and bibindex for " + str(recID) + " record") messages.append("We will upload the following xml code " + repr(record_xml)) bibupload.add(record_xml) bibindex.add(recID) if WRITE_ERRORS: er = enable_log('errors.txt') map(er, errors) er("CLOSE") if WRITE_LOG: msg = enable_log('log.txt') map(msg, messages) msg("CLOSE") bibupload.__del__() bibindex.__del__()
def main(): bibupload = ChunkedBibUpload(mode='c', user=SCRIPT_NAME, notimechange=True) for recid in sys.argv[1:]: bibupload.add(fix_encoding(int(recid)))