def main(): global logger, const, cerebrum_db, xmlwriter logger = Factory.get_logger("cronjob") logger.info("generating a new XML for export_ACL") cerebrum_db = Factory.get("Database")() const = Factory.get("Constants")(cerebrum_db) opts, rest = getopt.getopt(sys.argv[1:], "f:", [ "--out-file=", ]) filename = None for option, value in opts: if option in ("-f", "--out-file"): filename = value # fi # od _cache_id_types() stream = AtomicFileWriter(filename) xmlwriter = xmlprinter.xmlprinter( stream, indent_level=2, # Human-readable output data_mode=True, input_encoding="latin1") generate_report() stream.close()
def write_roles(stream, items): xml_data = {} for data in items: current = xml_data.get(data['gname']) if current: xml_data[data['gname']].append(data['uname']) else: xml_data[data['gname']] = [data['uname']] keys = xml_data.keys() keys.sort() writer = xmlprinter.xmlprinter(stream, indent_level=2, data_mode=True, input_encoding="iso-8859-1") # TODO: Do we want to change the encoding here? writer.startDocument(encoding="iso-8859-1") writer.startElement("roles") for data in keys: admin = 'no' if data.find('admin') >= 0: admin = 'yes' writer.startElement("role", {"name": data, "admin": admin}) xml_list = xml_data.get(data) for x in xml_list: writer.dataElement("member", x) writer.endElement("role") writer.endElement("roles") writer.endDocument()
def write_fnrupdate_info(outfile): """Lager fil med informasjon om alle fødselsnummerendringer""" logger.info("Writing fnrupdate info to '%s'" % outfile) stream = AtomicFileWriter(outfile, 'w') writer = xmlprinter.xmlprinter(stream, indent_level = 2, # Human-readable output data_mode = True, input_encoding = "latin1") writer.startDocument(encoding = "iso8859-1") db = Factory.get("Database")() const = Factory.get("Constants")(db) writer.startElement("data", {"source_system" : str(const.system_fs)}) data = fs.person.list_fnr_endringer() for row in data: # Make the format resemble the corresponding FS output as close as # possible. attributes = { "type" : str(const.externalid_fodselsnr), "new" : "%06d%05d" % (row["fodselsdato_naverende"], row["personnr_naverende"]), "old" : "%06d%05d" % (row["fodselsdato_tidligere"], row["personnr_tidligere"]), "date" : str(row["dato_foretatt"]), } writer.emptyElement("external_id", attributes) # od writer.endElement("data") writer.endDocument() stream.close()
def write_fnrupdate_info(self, fnr_update_file): """Lager fil med informasjon om alle fødselsnummerendringer""" logger.info("Writing fnrupdate info to '%s'", fnr_update_file) stream = AtomicStreamRecoder(fnr_update_file, mode='w', encoding=XML_ENCODING) writer = xmlprinter.xmlprinter(stream, indent_level=2, data_mode=True) writer.startDocument(encoding=XML_ENCODING) db = Factory.get("Database")() const = Factory.get("Constants")(db) writer.startElement("data", {"source_system": six.text_type(const.system_fs)}) data = self.fs.person.list_fnr_endringer() for row in data: # Make the format resemble the corresponding FS output as close as # possible. attributes = { "type": six.text_type(const.externalid_fodselsnr), "new": "%06d%05d" % (row["fodselsdato_naverende"], row["personnr_naverende"]), "old": "%06d%05d" % (row["fodselsdato_tidligere"], row["personnr_tidligere"]), "date": six.text_type(row["dato_foretatt"]), } writer.emptyElement("external_id", attributes) writer.endElement("data") writer.endDocument() stream.close()
def main(): """Main driver for the file generation.""" global xmlwriter, db, const, logger db = Factory.get("Database")() const = Factory.get("Constants")(db) logger = Factory.get_logger("cronjob") try: opts, args = getopt.getopt(sys.argv[1:], "o:", ["out-file="]) except getopt.GetoptError: usage(1) filename = None for opt, val in opts: if opt in ('-o', '--out-file'): filename = val if not filename: usage(1) stream = AtomicFileWriter(filename) xmlwriter = xmlprinter.xmlprinter(stream, indent_level=2, # human-friendly output data_mode=True, input_encoding="UTF-8") # Get information about persons persons = fetch_person_data() # Get information about courses (kurs) courses = fetch_course_data() # Generate and write document generate_document(persons, courses) stream.close()
def __init__(self, fname): stream = AtomicStreamRecoder(fname, mode='w', encoding=XML_ENCODING) self.gen = xmlprinter.xmlprinter( stream, # io.file(fname, 'wb'), indent_level=2, data_mode=1)
def write_fnrupdate_info(outfile): """Lager fil med informasjon om alle fødselsnummerendringer""" stream = AtomicFileWriter(outfile, 'w') writer = xmlprinter.xmlprinter(stream, indent_level = 2, # Human-readable output data_mode = True, input_encoding = "latin1") writer.startDocument(encoding = "iso8859-1") db = Factory.get("Database")() const = Factory.get("Constants")(db) writer.startElement("data", {"source_system" : str(const.system_fs)}) data = fs.person.list_fnr_endringer() for row in data: # Make the format resemble the corresponding FS output as close as # possible. attributes = { "type" : str(const.externalid_fodselsnr), "new" : "%06d%05d" % (row["fodselsdato_naverende"], row["personnr_naverende"]), "old" : "%06d%05d" % (row["fodselsdato_tidligere"], row["personnr_tidligere"]), "date" : str(row["dato_foretatt"]), } writer.emptyElement("external_id", attributes) # od writer.endElement("data") writer.endDocument() stream.close()
def main(): global logger, const, cerebrum_db, xmlwriter logger = Factory.get_logger("cronjob") logger.info("generating a new XML for export_ACL") cerebrum_db = Factory.get("Database")() const = Factory.get("Constants")(cerebrum_db) opts, rest = getopt.getopt(sys.argv[1:], "f:", ["--out-file=",]) filename = None for option, value in opts: if option in ("-f", "--out-file"): filename = value # fi # od _cache_id_types() stream = AtomicFileWriter(filename) xmlwriter = xmlprinter.xmlprinter(stream, indent_level = 2, # Human-readable output data_mode = True, input_encoding = "latin1") generate_report() stream.close()
def _build_xml(self): """Generate the xml files.""" with AtomicFileWriter(self.userfile_pay, 'wb') as fh_pay, \ AtomicFileWriter(self.userfile_track, 'wb') as fh_trk: logger.info("Start building pay export, writing to %s" % self.userfile_pay) xml_pay = xmlprinter(fh_pay, indent_level=2, data_mode=True, input_encoding='utf-8') xml_pay.startDocument(encoding='utf-8') xml_pay.startElement('UserList') logger.info("Start building track export, writing to %s" % self.userfile_track) xml_trk = xmlprinter(fh_trk, indent_level=2, data_mode=True, input_encoding='utf-8') xml_trk.startDocument(encoding='utf-8') xml_trk.startElement('UserList') for item in self.export_users: if item['Mode'] == "Pay": xml_pay.startElement('User') xml_pay.dataElement('UserLogon', item['UserLogon']) xml_pay.dataElement('CostCode', item['CostCode']) xml_pay.dataElement('FullName', item['FullName']) xml_pay.dataElement('Email', item['Email']) xml_pay.endElement('User') elif item['Mode'] == "Track": xml_trk.startElement('User') xml_trk.dataElement('UserLogon', item['UserLogon']) xml_trk.dataElement('CostCode', item['CostCode']) xml_trk.dataElement('FullName', item['FullName']) xml_trk.dataElement('Email', item['Email']) xml_trk.endElement('User') else: logger.error("MODE invalid: %s" % (item['Mode'], )) xml_pay.endElement('UserList') xml_pay.endDocument() xml_trk.endElement('UserList') xml_trk.endDocument() logger.info("Writing done")
def __init__(self, settings): self.sett = settings filename = re.sub(".xml$", "-parsed.xml", self.sett.variables['filename']) self.fd = open(filename, "w") self.xp = xmlprinter.xmlprinter(self.fd, indent_level=2) self.xp.startDocument() self.xp.startElement('document') self.xp.newline()
def output_xml(sink, tag, root_ou, perspective, source_system, spread): writer = xmlprinter.xmlprinter(sink, indent_level=2, data_mode=True) global output_element output_element = ( lambda *rest, **kw: output_element_helper(writer, *rest, **kw)) # Incredibly enough, latin-1 is a requirement. writer.startDocument(encoding=XML_ENCODING) writer.startElement("fridaImport") output_headers(writer, tag, root_ou) output_OUs(writer, perspective, spread) output_people(writer, perspective, source_system, spread) writer.endElement("fridaImport") writer.endDocument()
def output_xml(sink, tag, root_ou, perspective, source_system, spread): writer = xmlprinter.xmlprinter(sink, indent_level=2, data_mode=True, input_encoding="iso8859-1") global output_element output_element = lambda *rest, **kw: output_element_helper(writer, *rest, **kw) # Incredibly enough, latin-1 is a requirement. writer.startDocument(encoding="iso8859-1") writer.startElement("fridaImport") output_headers(writer, tag, root_ou) output_OUs(writer, perspective, spread) output_people(writer, perspective, source_system, spread) writer.endElement("fridaImport") writer.endDocument()
def print_ou(self, final_ou, out_file): logger.info("Writing OU file %s", out_file) encoding = 'iso-8859-1' with SimilarSizeStreamRecoder(out_file, "w", encoding=encoding) as stream: writer = xmlprinter.xmlprinter(stream, indent_level=2, data_mode=True) writer.startDocument(encoding=encoding) writer.startElement("data") for ou, ou_data in final_ou.items(): writer.emptyElement("sted", ou_data) writer.endElement("data") writer.endDocument()
def get_fnr_update_info(filename): """ Fetch updates in Norwegian sosial security number (fødselsnummer) from LT and generate a suitable xml dump containing the changes. """ output_stream = AtomicFileWriter(filename, "w") writer = xmlprinter.xmlprinter(output_stream, indent_level = 2, # Output is for humans too data_mode = True, input_encoding = 'latin1') writer.startDocument(encoding = "iso8859-1") db = Factory.get("Database")() const = Factory.get("Constants")(db) writer.startElement("data", {"source_system" : str(const.system_lt)}) for row in LT.GetFnrEndringer(): # Make the format resemble the corresponding FS output as close as # possible. attributes = { "type" : str(const.externalid_fodselsnr), "new" : "%02d%02d%02d%05d" % (row["fodtdag_ble_til"], row["fodtmnd_ble_til"], row["fodtar_ble_til"], row["personnr_ble_til"]), "old" : "%02d%02d%02d%05d" % (row["fodtdag_kom_fra"], row["fodtmnd_kom_fra"], row["fodtar_kom_fra"], row["personnr_kom_fra"]), "date" : str(row["dato_endret"]), } writer.emptyElement("external_id", attributes) # od writer.endElement("data") writer.endDocument() output_stream.close()
def output_xml(output_file, sysname, personfile, oufile): """Output the data from sysname source.""" with AtomicFileWriter(output_file, "wb") as output_stream: writer = xmlprinter.xmlprinter(output_stream, indent_level=2, data_mode=True, input_encoding="utf-8", encoding_errors="ignore") # Hardcoded headers writer.startDocument(encoding="iso8859-1") writer.startElement("fridaImport") writer.startElement("beskrivelse") output_element(writer, "UIO", "kilde") # ISO8601 style -- the *only* right way :) output_element(writer, time.strftime("%Y-%m-%d %H:%M:%S"), "dato") output_element(writer, "UiO-FRIDA", "mottager") writer.endElement("beskrivelse") writer.startElement("institusjon") output_element(writer, cereconf.DEFAULT_INSTITUSJONSNR, "institusjonsnr") output_element(writer, "Universitetet i Oslo", "navnBokmal") output_element(writer, "University of Oslo", "navnEngelsk") output_element(writer, "UiO", "akronym") output_element(writer, "1110", "NSDKode") writer.endElement("institusjon") # Dump all OUs ou_cache = output_ous(writer, sysname, oufile) # Dump all people output_people(writer, sysname, personfile, ou_cache) writer.endElement("fridaImport") writer.endDocument()
else: id_list.append(value) elif option in ("-f", "--file"): output_filename = value # Option "--all" overrides specific id-lists for option, value in options: if option in ("-a", "--all"): logger.info( "Option '--all' specified; all id-types will be included") id_list = selectors.keys() if not id_list: logger.warn("No IDs specified for export. No XML file generated") return 2 stream = AtomicFileWriter(output_filename, "w") writer = xmlprinter.xmlprinter( stream, indent_level=2, # Human-readable output data_mode=True, input_encoding="latin1") generate_export(writer, id_list) stream.close() if __name__ == "__main__": sys.exit(main())
elif value in id_list: logger.warn("Duplicate ID value %s (duplicate ignored)", value) else: id_list.append(value) elif option in ("-f", "--file"): output_filename = value # Option "--all" overrides specific id-lists for option, value in options: if option in ("-a", "--all"): logger.info("Option '--all' specified; all id-types will be included") id_list = selectors.keys() if not id_list: logger.warn("No IDs specified for export. No XML file generated") return 2 stream = AtomicFileWriter(output_filename, "w") writer = xmlprinter.xmlprinter(stream, indent_level = 2, # Human-readable output data_mode = True, input_encoding = "latin1") generate_export(writer, id_list) stream.close() if __name__ == "__main__": sys.exit(main())
def create(self, persons, affiliations, permisjoner): """ Build a xml that import_lt should process: <person tittel_personlig="" fornavn="" etternavn="" fnr="" national_id_type="" national_id="" country="" fakultetnr_for_lonnsslip="" instituttnr_for_lonnsslip="" gruppenr_for_lonnsslip="" #adresselinje1_privatadresse="" #poststednr_privatadresse="" #poststednavn_privatadresse="" #uname=""> <bilag stedkode=""/> </person> """ stream = SimilarSizeWriter(self.out_file, "wb") stream.max_pct_change = 50 writer = xmlprinter.xmlprinter(stream, indent_level=2, data_mode=True) writer.startDocument(encoding="iso8859-1") writer.startElement("data") for fnr, person_data in persons.iteritems(): affs = affiliations.get(fnr) aff_keys = affs.keys() person_data['fnr'] = fnr temp_tils = list() for sted in aff_keys: aff = affs.get(sted) # use . instead of , as decimal char. st_andel = aff.get('stillingsandel', '').replace(',', '.') if st_andel == '': logger.error("ST.andel for fnr %s er tom", fnr) tils_dict = { 'hovedkategori': aff['hovedkategori'], 'stillingskode': aff['stillingskode'], 'tittel': aff['tittel'], 'stillingsandel': st_andel, 'fakultetnr_utgift': sted[0:2], 'instituttnr_utgift': sted[2:4], 'gruppenr_utgift': sted[4:6], 'dato_fra': aff['dato_fra'], 'dato_til': aff['dato_til'], 'dbh_kat': aff['dbh_kat'], 'hovedarbeidsforhold': aff['hovedarbeidsforhold'], 'tjenesteforhold': aff['tjenesteforhold'], } temp_tils.append(tils_dict) writer.startElement("person", person_data) for tils in temp_tils: writer.emptyElement("tils", tils) writer.endElement("person") writer.endElement("data") writer.endDocument() stream.close()
def __init__(self, output_stream): self.gen = xmlprinter.xmlprinter(output_stream, indent_level=2, data_mode=1, input_encoding='ISO-8859-1')
def main(): global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \ with_cell, extra_contact_fields cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-l', '--logger-name', dest='logger', help='Logger instance to use (default: cronjob)', default='cronjob') args = parser.parse_args() if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [ field_entry.strip() for field_entry in extra_fields_unparsed ] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger = Factory.get_logger(args.logger) logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() stream = AtomicFileWriter(args.filename) xmlwriter = xmlprinter.xmlprinter( stream, indent_level=2, # human-friendly output data_mode=True, input_encoding="latin1") generate_report(args.institution) stream.close()
def build_xml(fh, persons): """ Write person data to xml file. :param fh: An open filelike bytestream :param persons: A dict with data to write, from :py:func:`generate_export_data` """ xml = xmlprinter(fh, indent_level=2, data_mode=True, input_encoding='utf-8') xml.startDocument(encoding='utf-8') xml.startElement('data') xml.startElement('properties') xml.dataElement('exportdate', datetime.datetime.now().isoformat(b' ')) xml.endElement('properties') for person_id in sorted(persons): attrs = persons[person_id]['attrs'] xml_attr = {'given': attrs['given'], 'sn': attrs['sn'], 'birth': attrs['birth']} # get person employee number employee_number = attrs['employee_number'] if employee_number: logger.debug("collected employee_number=%r", employee_number) xml_attr['employee_number'] = employee_number # get home address home_addressinfo = attrs['home_address'] if home_addressinfo: for c in home_addressinfo: home_address = c['address_text'] home_postalnumber = six.text_type(c['postal_number']) home_city = c['city'] if home_address is not None: home_address = home_address xml_attr['home_address'] = home_address if home_postalnumber is not None: xml_attr['home_postal_code'] = home_postalnumber if home_city is not None: home_city = home_city xml_attr['home_city'] = home_city # get campus campusinfo = attrs['campus'] if campusinfo: for c in campusinfo: campus_name = c['address_text'] xml_attr['campus'] = campus_name # if attrs['worktitle']: xml_attr['worktitle'] = attrs['worktitle'] xml.startElement('person', xml_attr) xml.emptyElement('account', {'username': attrs['uname'], 'userpassword': '******', 'email': attrs['email']}) affs = persons[person_id]['affs'] if affs: xml.startElement('affiliations') for aff in affs: aff_attrs = OrderedDict( (k, (aff.get(k) or '')) for k in ( 'affiliation', 'status', 'stedkode', 'prosent', 'hovedarbeidsforhold', 'stillingskode', 'dbh_kategori', 'stillingstittel', 'last_date', )) xml.emptyElement('aff', aff_attrs) xml.endElement('affiliations') contactinfo = attrs['contacts'] if contactinfo: xml.startElement('contactinfo') for c in contactinfo: c_attrs = OrderedDict( (k, six.text_type(c[k] or '')) for k in ('source', 'pref', 'type', 'value')) xml.emptyElement('contact', c_attrs) xml.endElement('contactinfo') xml.endElement('person') xml.endElement('data') xml.endDocument()
def write_xml(agrgroup_dict, xmlfile): """ write results to file produce this: <xml encoding="utf-8"> <data> <properties> <tstamp>2013-05-12 15:44</tstamp> </properties> <groups> <group> <name>AD navn</name> <samaccountname>sam_name_of_group</samaccountname> <description>some descriptive text</description> <!-- this name will show in Addressbook --> <displayname>displayname of group</displayname> <member>usera,userb,userc,userd</member> <mail>[email protected]</mail> <!-- startswith emnekode/progkode (searchable in adrbook) --> <mailnickname>[email protected]</mailnickname> <!-- undenh or studierprogram --> <extensionAttribute1>type</extensionAttribute1> <!-- emnekode or studieprogramkode --> <extensionAttribute2>emnekode</extensionAttribute2> <!-- year --> <extensionAttribute3>2014</extensionAttribute3> <!-- semester --> <extensionAttribute4>høst</extensionAttribute4> <!-- only for emner --> <extensionAttribute5>versjonskode</extensionAttribute5> <!-- only for emner --> <extensionAttribute6>terminkode</extensionAttribute6> <!-- only student at present --> <extensionAttribute7>rolle</extensionAttribute7> <!-- full name of emne or studieprogram --> <extensionAttribute8>emne or stprognavn</extensionAttribute8> <!-- short versjon of nr 8 --> <extensionAttribute9> emne or studieprog forkortelse </extensionAttribute9> <!-- ansvarlig enhets fulle navn --> <extensionAttribute10> Institutt for samfunnsmedisin </extensionAttribute10> <!-- ansvarlig enhets forkortelse --> <extensionAttribute11>ISM</extensionAttribute11> <!-- ansvarlig enhets plassering i org --> <extensionAttribute12>UiT.Helsefak.ISM</extensionAttribute12> <extensionAttribute13></extensionAttribute13> <extensionAttribute14><extensionAttribute14> <extensionAttribute15></extensionAttribute15> </group> <group> ... </group> </groups> </data> </xml> """ logger.info("Writing results to '%s'", xmlfile) fh = open(xmlfile, 'w') xml = xmlprinter(fh, indent_level=2, data_mode=True, input_encoding='ISO-8859-1') xml.startDocument(encoding='utf-8') xml.startElement('data') xml.startElement('properties') xml.dataElement('tstamp', str(mx.DateTime.now())) xml.endElement('properties') xml.startElement('groups') for grpname, gdata in group_dict.iteritems(): xml.startElement('group') logger.debug("Writing %s", grpname) keys = gdata.keys() keys.sort() for k in keys: xml.dataElement(k, gdata[k]) xml.endElement('group') for grpname, gdata in agrgroup_dict.iteritems(): xml.startElement('group') logger.debug("Writing %s", grpname) keys = gdata.keys() keys.sort() for k in keys: xml.dataElement(k, gdata[k]) xml.endElement('group') xml.endElement('groups') xml.endElement('data') xml.endDocument() logger.info("Writing results to '%s' done", xmlfile)
def __init__(self, fname): self.gen = xmlprinter.xmlprinter(file(fname, 'w'), indent_level=2, data_mode=1, input_encoding='ISO-8859-1')
def main(): global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \ with_cell, extra_contact_fields cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-l', '--logger-name', dest='logger', help='Logger instance to use (default: cronjob)', default='cronjob') args = parser.parse_args() if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [field_entry.strip() for field_entry in extra_fields_unparsed] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger = Factory.get_logger(args.logger) logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() stream = AtomicFileWriter(args.filename) xmlwriter = xmlprinter.xmlprinter(stream, indent_level=2, # human-friendly output data_mode=True, input_encoding="latin1") generate_report(args.institution) stream.close()
def __init__(self, fname): self.__file = AtomicFileWriter(fname, 'wb') self.gen = xmlprinter.xmlprinter( self.__file, indent_level=2, data_mode=1, input_encoding='UTF-8')
def main(inargs=None): global cerebrum_db, constants, fs_db, xmlwriter global with_email, with_cell, extra_contact_fields # Sorry, but the alternative is to rewrite this whole thing. global transliterate parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', type=Cerebrum.utils.argutils.UnicodeType(), help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-o', '--encoding', dest='encoding', default='iso8859-1', help='Override the default encoding (iso8859-1)') parser.add_argument('-r', '--errors', dest='encoding_errors', default='strict', help=('Override default encoding error handler ' '(strict). Common handlers: strict, ignore, ' 'replace. See Python Codec Base Classes for all ' 'supported handlers.')) Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) logger.info('Start of script %s', parser.prog) logger.debug("args: %r", args) cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) transliterate = for_encoding(args.encoding) if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [ field_entry.strip() for field_entry in extra_fields_unparsed ] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell encoding_errors = args.encoding_errors _cache_id_types() fs_db = make_fs() with AtomicStreamRecoder(args.filename, mode='w', encoding=args.encoding) as stream: xmlwriter = xmlprinter.xmlprinter( stream, indent_level=2, # human-friendly output data_mode=True, encoding_errors=encoding_errors) generate_report(args.institution, args.encoding) logger.info('Report written to %s', stream.name) logger.info('Done with script %s', parser.prog)
def __init__(self, fname): self.gen = xmlprinter.xmlprinter( file(fname, 'w'), indent_level=2, data_mode=1, input_encoding='ISO-8859-1')
def __init__(self, output_stream): self.gen = xmlprinter.xmlprinter( output_stream, indent_level=2, data_mode=1, input_encoding='ISO-8859-1')
def __init__(self, fname): self.__file = AtomicFileWriter(fname, 'wb') self.gen = xmlprinter.xmlprinter(self.__file, indent_level=2, data_mode=1, input_encoding='UTF-8')
def main(inargs=None): global cerebrum_db, constants, fs_db, xmlwriter global with_email, with_cell, extra_contact_fields parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', type=Cerebrum.utils.argutils.UnicodeType(), help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-o', '--encoding', dest='encoding', default='iso8859-1', help='Override the default encoding (iso8859-1)') Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) logger.info('Start of script %s', parser.prog) logger.debug("args: %r", args) cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [field_entry.strip() for field_entry in extra_fields_unparsed] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() with AtomicStreamRecoder(args.filename, mode='w', encoding=args.encoding) as stream: xmlwriter = xmlprinter.xmlprinter(stream, indent_level=2, # human-friendly output data_mode=True) generate_report(args.institution, args.encoding) logger.info('Report written to %s', stream.name) logger.info('Done with script %s', parser.prog)