def __init__(self, subjects, year, version=None, typecode=None, timecode=None): self.db = Factory.get('Database')() self.db.cl_init(change_program='proc-digeks') self.co = Factory.get('Constants')(self.db) self.fs = make_fs() # TODO: Describe the structure here self.exams = set() self.candidates = set() # FIXME: We shouldn't need to specify subject/semester/... if not isinstance(subjects, (list,set,tuple)): raise Exception('Subjects must be a (list,set,tuple)') self.subjects = subjects self.year = year self.typecode = typecode # vurdkombkode self.timecode = timecode # vurdtidkode self.version = version # versjonskode # Start processing # self.process_exams() all_candidates = set([c.username for c in self.candidates]) logger.debug('Caching candidate data for %d unique candidates...' % len(all_candidates)) self.cache = CandidateCache(self.db, all_candidates)
def main(inargs=None): parser = argparse.ArgumentParser(description="Convert OU files", ) parser.add_argument( '--ou-source', dest='sources', action='append', help='Read OUs from source file %(metavar)s', metavar='<file>', ) parser.add_argument( '--webservice', dest='webservice_url', help='Read OUs from webservice url', metavar='<url>', ) parser.add_argument( '--out-file', dest='output', required=True, help='Write output a %(metavar)s XML file', metavar='<file>', ) Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) if not (args.sources or args.webservice_url): parser.error("out and/or webservice has to be set") logger.info('Start %r', parser.prog) logger.debug("args: %r", args) ou_files = list(_parse_ou_files(args.sources or ())) logger.debug("sources: %r", ou_files) output = args.output logger.debug('output: %r', output) webservice_url = args.webservice_url logger.debug("webservice_url: %r", webservice_url) fs = make_fs() my_ou = OuGenerator(fs, ou_files) logger.info('fetching ous from fs...') fs_ou = my_ou.get_fs_ou() logger.info('found %d ous in fs', len(fs_ou)) logger.info('parsing ous from webservice and file(s)...') auth_ou = my_ou.get_authoritative_ou(webservice_url) logger.info('found %d ous in webservice and file(s)', len(auth_ou)) logger.info('merging ou data...') final_ou = my_ou.generate_ou(fs_ou, auth_ou) logger.info('ended up with %d ous', len(final_ou)) my_ou.print_ou(final_ou, output) logger.info('Output written to %r', output) logger.info('Done %r', parser.prog)
def __init__(self, subjects, year, version=None, typecode=None, timecode=None): self.db = Factory.get('Database')() self.db.cl_init(change_program='proc-digeks') self.co = Factory.get('Constants')(self.db) self.fs = make_fs() # TODO: Describe the structure here self.exams = set() self.candidates = set() # FIXME: We shouldn't need to specify subject/semester/... if not isinstance(subjects, (list, set, tuple)): raise Exception('Subjects must be a (list,set,tuple)') self.subjects = subjects self.year = year self.typecode = typecode # vurdkombkode self.timecode = timecode # vurdtidkode self.version = version # versjonskode # Start processing # self.process_exams() all_candidates = set([c.username for c in self.candidates]) logger.debug('Caching candidate data for %d unique candidates...' % len(all_candidates)) self.cache = CandidateCache(self.db, all_candidates)
def get_targets(db, regkort_check=True): fs_db = make_fs() co = Factory.get('Constants')(db) pe = Factory.get('Person')(db) t = collections.defaultdict(list) for r in pe.list_affiliations(fetchall=False): t[r['person_id']].append(r['status']) for k in t: if [co.affiliation_status_student_opptak] == t[k]: if regkort_check and regkort_ok(db, fs_db, k): continue yield k
def gather_student_information(self): logger.debug("gather_student_information start") fs = make_fs() students = fs.student.list_aktiv() person = Factory.get('Person')(db) for student in students: #person.clear() fnr = "%06d%05d" % (int(student['fodselsdato']), int(student['personnr'])) student_data = self.gather_person_information(fnr=fnr) if student_data is not None: self.students[fnr] = student_data logger.debug("gather_student_information done")
def __init__(self): LMSImport.__init__(self) self.fs_db = make_fs() self.UndervEnhet = {} self.not_exported_to_lms = {} self.enhet_names = {} self.emne_versjon = {} self.emne_termnr = {} self.emner = {} self.classes = {} """Contains a mapping of class-IDs to a list of the entity IDs of the primary accounts of the students belonging to that class. See 'get_classes()'.""" logger.debug("FSImport initialized")
def person_student_info(self, operator, person_id): person = self._get_person(*self._map_person_id(person_id)) self.ba.can_get_student_info(operator.get_entity_id(), person) fnr = person.get_external_id(id_type=self.const.externalid_fodselsnr, source_system=self.const.system_fs) if not fnr: raise CerebrumError("No matching fnr from FS") fodselsdato, pnum = fodselsnr.del_fnr(fnr[0]['external_id']) ret = [] try: fs_db = make_fs() except database.DatabaseError, e: self.logger.warn("Can't connect to FS (%s)" % e) raise CerebrumError("Can't connect to FS, try later")
def gather_student_information(self): logger.debug("gather_student_information start") fs = make_fs() students = fs.student.list_aktiv() person = Factory.get('Person')(db) for student in students: #person.clear() fnr = "%06d%05d" % (int( student['fodselsdato']), int(student['personnr'])) student_data = self.gather_person_information(fnr=fnr) if student_data is not None: self.students[fnr] = student_data logger.debug("gather_student_information done")
def person_student_info(self, operator, person_id): person = self._get_person(*self._map_person_id(person_id)) self.ba.can_get_student_info(operator.get_entity_id(), person) fnr = person.get_external_id(id_type=self.const.externalid_fodselsnr, source_system=self.const.system_fs) if not fnr: raise CerebrumError("No matching fnr from FS") fodselsdato, pnum = fodselsnr.del_fnr(fnr[0]['external_id']) har_opptak = {} ret = [] try: fs_db = make_fs() except Database.DatabaseError, e: self.logger.warn("Can't connect to FS (%s)" % e) raise CerebrumError("Can't connect to FS, try later")
def main(): global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \ with_cell, extra_contact_fields cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-l', '--logger-name', dest='logger', help='Logger instance to use (default: cronjob)', default='cronjob') args = parser.parse_args() if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [field_entry.strip() for field_entry in extra_fields_unparsed] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger = Factory.get_logger(args.logger) logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() stream = AtomicFileWriter(args.filename) xmlwriter = xmlprinter.xmlprinter(stream, indent_level=2, # human-friendly output data_mode=True, input_encoding="latin1") generate_report(args.institution) stream.close()
def main(): try: opts, junk = getopt.getopt(sys.argv[1:], 'p:f:da:o:', ('person-affiliation=', 'fagperson-affiliation=', 'dryrun', 'authoritative-system=', 'ou-perspective=', 'with-cache-email',)) except getopt.GetoptError: print "Wrong option", sys.exc_info() return def append_affiliation(value, where): if len(value.split("/")) == 1: aff, status = ( constants.human2constant(value, constants.PersonAffiliation), None) elif len(value.split("/")) == 2: aff, status = value.split("/") aff, status = ( constants.human2constant(aff, constants.PersonAffiliation), constants.human2constant(status, constants.PersonAffStatus)) assert not (aff is None or status is None), "Missing aff/status" else: logger.error("Wrong syntax for affiliation %s", value) return where.append((aff, status)) # end append_affiliation person_affiliations = list() fagperson_affiliations = list() dryrun = False authoritative_system = ou_perspective = None email_cache = False for option, value in opts: if option in ('-p', '--person-affiliation',): append_affiliation(value, person_affiliations) elif option in ('-f', '--fagperson-affiliation',): append_affiliation(value, fagperson_affiliations) elif option in ('-d', '--dryrun',): dryrun = True elif option in ('-a', '--authoritative-system',): authoritative_system = constants.human2constant( value, constants.AuthoritativeSystem) elif option in ('-o', '--ou-perspective',): ou_perspective = constants.human2constant( value, constants.OUPerspective) elif option in ('--with-cache-email',): email_cache = True assert authoritative_system is not None assert ou_perspective is not None if not person_affiliations: logger.error("No person affiliations are specified. " "This is most likely not what you want") return fs = make_fs() if dryrun: fs.db.commit = fs.db.rollback # This is a performance improvement hack. It can be removed, if memory is # at a premium. The trade-off is 5x difference in execution speed. _populate_caches(person_affiliations + fagperson_affiliations, authoritative_system, email_cache) make_fs_updates(person_affiliations, fagperson_affiliations, fs, authoritative_system, ou_perspective) logger.debug("Pushed all changes to FS")
def main(): logger.info("Starting import from FS") try: opts, args = getopt.getopt(sys.argv[1:], "ptsroefbknd", ["datadir=", "person-file=", "topics-file=", "studprog-file=", "regkort-file=", 'emne-file=', "ou-file=", 'fnr-update-file=', 'betalt-papir-file=', 'role-file=', 'netpubl-file=', 'edu-file=', "misc-func=", "misc-file=", "misc-tag=", "pre-course", "pre-course-file="]) except getopt.GetoptError: usage() sys.exit(2) datadir = cereconf.FS_DATA_DIR person_file = 'persons.xml' topics_file = 'topics.xml' studprog_file = 'studieprogrammer.xml' regkort_file = 'regkort.xml' emne_file = 'emner.xml' ou_file = 'ou.xml' role_file = 'roles.xml' fnrupdate_file = 'fnr_update.xml' betalt_papir_file = 'betalt_papir.xml' netpubl_file = 'nettpublisering.xml' edu_file = 'edu_info.xml' pre_course_file = 'pre_course.xml' for o, val in opts: if o in ('--datadir',): datadir = val elif o in ('--person-file',): person_file = val elif o in ('--topics-file',): topics_file = val elif o in ('--emne-file',): emne_file = val elif o in ('--studprog-file',): studprog_file = val elif o in ('--regkort-file',): regkort_file = val elif o in ('--ou-file',): ou_file = val elif o in ('--fnr-update-file',): fnrupdate_file = val elif o in ('--betalt-papir-file',): betalt_papir_file = val elif o in('--role-file',): role_file = val elif o in('--netpubl-file',): netpubl_file = val elif o in ('--edu-file',): edu_file = val elif o in ('--pre-course-file',): pre_course_file = val global fs fs = make_fs() for o, val in opts: try: if o in ('-p',): write_person_info(set_filepath(datadir, person_file)) elif o in ('-t',): write_topic_info(set_filepath(datadir, topics_file)) elif o in ('-b',): write_betalt_papir_info(set_filepath(datadir, betalt_papir_file)) elif o in ('-s',): write_studprog_info(set_filepath(datadir, studprog_file)) elif o in ('-f',): write_fnrupdate_info(set_filepath(datadir, fnrupdate_file)) elif o in ('-e',): write_emne_info(set_filepath(datadir, emne_file)) elif o in ('-r',): write_regkort_info(set_filepath(datadir, regkort_file)) elif o in ('-o',): write_ou_info(set_filepath(datadir, ou_file)) elif o in ('-k',): write_personrole_info(set_filepath(datadir, role_file)) elif o in ('-n',): write_netpubl_info(set_filepath(datadir, netpubl_file)) elif o in ('-d',): write_edu_info(set_filepath(datadir, edu_file)) elif o in ('--pre-course',): write_forkurs_info(set_filepath(datadir, pre_course_file)) # We want misc-* to be able to produce multiple file in one script-run elif o in ('--misc-func',): misc_func = val elif o in ('--misc-tag',): misc_tag = val elif o in ('--misc-file',): write_misc_info(set_filepath(datadir, val), misc_tag, misc_func) except FileChangeTooBigError as msg: logger.error("Manual intervention required: %s", msg) logger.info("Import from FS done")
def assert_connected(user="******", service="FSHIOF.uio.no"): global fs if fs is None: fs = make_fs()
def assert_connected(): global fs if fs is None: fs = make_fs()
def person_student_info(self, operator, person_id): person = self._get_person(*self._map_person_id(person_id)) self.ba.can_get_student_info(operator.get_entity_id(), person) fnr = person.get_external_id(id_type=self.const.externalid_fodselsnr, source_system=self.const.system_fs) if not fnr: raise CerebrumError("No matching fnr from FS") fodselsdato, pnum = fodselsnr.del_fnr(fnr[0]['external_id']) ret = [] try: fs_db = make_fs() except database.DatabaseError as e: self.logger.warn("Can't connect to FS (%s)" % e) raise CerebrumError("Can't connect to FS, try later") har_opptak = set() for row in fs_db.student.get_studierett(fodselsdato, pnum): har_opptak.add(row['studieprogramkode']) ret.append({ 'studprogkode': row['studieprogramkode'], 'studierettstatkode': row['studierettstatkode'], 'studentstatkode': row['studentstatkode'], 'studieretningkode': row['studieretningkode'], 'dato_tildelt': self._ticks_to_date(row['dato_studierett_tildelt']), 'dato_gyldig_til': self._ticks_to_date(row['dato_studierett_gyldig_til']), 'privatist': row['status_privatist'], }) for row in fs_db.student.get_eksamensmeldinger(fodselsdato, pnum): programmer = [] for row2 in fs_db.info.get_emne_i_studieprogram(row['emnekode']): if row2['studieprogramkode'] in har_opptak: programmer.append(row2['studieprogramkode']) ret.append({ 'ekskode': row['emnekode'], 'programmer': ",".join(programmer), 'dato': self._ticks_to_date(row['dato_opprettet']), }) for row in fs_db.student.get_utdanningsplan(fodselsdato, pnum): ret.append({ 'studieprogramkode': row['studieprogramkode'], 'terminkode_bekreft': row['terminkode_bekreft'], 'arstall_bekreft': row['arstall_bekreft'], 'dato_bekreftet': self._ticks_to_date(row['dato_bekreftet']), }) for row in fs_db.student.get_semreg(fodselsdato, pnum): ret.append({ 'regformkode': row['regformkode'], 'betformkode': row['betformkode'], 'dato_endring': self._ticks_to_date(row['dato_endring']), 'dato_regform_endret': self._ticks_to_date(row['dato_regform_endret']), }) return ret
def main(): """Argparser and script run.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( '-p', '--person-affiliations', dest='person_affs', action='append', required=True, help='List of person affiliations to use. On the form <affiliation> ' 'or <affiliation>/<status>. ' 'affiliation_ansatt/affiliation_status_ansatt_vit') parser.add_argument('-f', '--fagperson-affiliation', dest='fagperson_affs', action='append', required=True, help='TODO Fagperson aff') parser.add_argument('-a', '--authoritative-system', dest='authoritative_system', required=True, help='TODO Authoritative system') parser.add_argument('-o', '--ou-perspective', dest='ou_perspective', required=True, help='TODO The OU perspective') parser.add_argument( '-e', '--fagperson-fields', dest='fagperson_fields', action='append', choices=['work_title', 'phone', 'fax', 'mobile'], help='Fagperson data fields to be exported. Default is all') parser.add_argument( '-n', '--no-extra-fields', action='store_true', dest='no_extra_fields', help='Do not export any of the "extra" fagperson fields (work_title, ' 'phone, fax, mobile)') parser.add_argument('-m', '--with-cache-email', action='store_true', dest='email_cache', help='Cache e-mail addresses') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Write data to FS') db = Factory.get("Database")() co = Factory.get("Constants")(db) fs = make_fs() Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args() Cerebrum.logutils.autoconf('cronjob', args) logger.info('START {0}'.format(parser.prog)) def parse_affiliation_string(affiliation): """Splits string into aff and status.""" if affiliation is None: return None if len(affiliation.split("/")) == 1: aff, status = (co.human2constant(affiliation, co.PersonAffiliation), None) elif len(affiliation.split("/")) == 2: aff, status = affiliation.split("/") aff, status = (co.human2constant(aff, co.PersonAffiliation), co.human2constant(status, co.PersonAffStatus)) if aff is None or status is None: return None else: logger.error("Wrong syntax for affiliation: %r", affiliation) return None return aff, status person_affs = [parse_affiliation_string(x) for x in args.person_affs] fagperson_affs = [parse_affiliation_string(x) for x in args.fagperson_affs] ou_perspective = get_constant(db, parser, co.OUPerspective, args.ou_perspective) authoritative_system = get_constant(db, parser, co.AuthoritativeSystem, args.authoritative_system) if ou_perspective is None: logger.error('No valid OU perspective given') return None if authoritative_system is None: logger.error('No valid authoritative system given') return None if args.commit: logger.info('Changes will be committed') else: logger.info('Dryrun mode, no changes will be committed') valid_fagperson_fields = ['work_title', 'phone', 'fax', 'mobile'] if args.no_extra_fields: fagperson_fields = {x: False for x in valid_fagperson_fields} elif args.fagperson_fields: fagperson_fields = {x: False for x in valid_fagperson_fields} for field in args.fagperson_fields: if field in fagperson_fields: fagperson_fields[field] = True else: fagperson_fields = None syncer = HR2FSSyncer(person_affs, fagperson_affs, authoritative_system, ou_perspective, db, fs, co, fagperson_export_fields=fagperson_fields, use_cache=True, email_cache=args.email_cache, commit=args.commit) syncer.sync_to_fs() if args.commit: logger.info('Committing FS db') fs.db.commit() else: logger.info('Rolling back changes in the FS db') fs.db.rollback() logger.info('Done syncing to FS')
def main(): global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \ with_cell, extra_contact_fields cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-l', '--logger-name', dest='logger', help='Logger instance to use (default: cronjob)', default='cronjob') args = parser.parse_args() if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [ field_entry.strip() for field_entry in extra_fields_unparsed ] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger = Factory.get_logger(args.logger) logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() stream = AtomicFileWriter(args.filename) xmlwriter = xmlprinter.xmlprinter( stream, indent_level=2, # human-friendly output data_mode=True, input_encoding="latin1") generate_report(args.institution) stream.close()
def person_student_info(self, operator, person_id): person = self._get_person(*self._map_person_id(person_id)) self.ba.can_get_student_info(operator.get_entity_id(), person) fnr = person.get_external_id(id_type=self.const.externalid_fodselsnr, source_system=self.const.system_fs) if not fnr: raise CerebrumError("No matching fnr from FS") fodselsdato, pnum = fodselsnr.del_fnr(fnr[0]['external_id']) ret = [] try: fs_db = make_fs() except database.DatabaseError as e: self.logger.warn("Can't connect to FS (%s)" % e) raise CerebrumError("Can't connect to FS, try later") har_opptak = set() for row in fs_db.student.get_studierett(fodselsdato, pnum): har_opptak.add(row['studieprogramkode']) ret.append( { 'studprogkode': row['studieprogramkode'], 'studierettstatkode': row['studierettstatkode'], 'studentstatkode': row['studentstatkode'], 'studieretningkode': row['studieretningkode'], 'dato_tildelt': self._ticks_to_date( row['dato_studierett_tildelt']), 'dato_gyldig_til': self._ticks_to_date( row['dato_studierett_gyldig_til']), 'privatist': row['status_privatist'], } ) for row in fs_db.student.get_eksamensmeldinger(fodselsdato, pnum): programmer = [] for row2 in fs_db.info.get_emne_i_studieprogram(row['emnekode']): if row2['studieprogramkode'] in har_opptak: programmer.append(row2['studieprogramkode']) ret.append( { 'ekskode': row['emnekode'], 'programmer': ",".join(programmer), 'dato': self._ticks_to_date(row['dato_opprettet']), } ) for row in fs_db.student.get_utdanningsplan(fodselsdato, pnum): ret.append( { 'studieprogramkode': row['studieprogramkode'], 'terminkode_bekreft': row['terminkode_bekreft'], 'arstall_bekreft': row['arstall_bekreft'], 'dato_bekreftet': self._ticks_to_date( row['dato_bekreftet']), } ) for row in fs_db.student.get_semreg(fodselsdato, pnum): ret.append( { 'regformkode': row['regformkode'], 'betformkode': row['betformkode'], 'dato_endring': self._ticks_to_date(row['dato_endring']), 'dato_regform_endret': self._ticks_to_date( row['dato_regform_endret']), } ) return ret
def main(inargs=None): global cerebrum_db, constants, fs_db, xmlwriter global with_email, with_cell, extra_contact_fields parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', type=Cerebrum.utils.argutils.UnicodeType(), help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-o', '--encoding', dest='encoding', default='iso8859-1', help='Override the default encoding (iso8859-1)') Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) logger.info('Start of script %s', parser.prog) logger.debug("args: %r", args) cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [field_entry.strip() for field_entry in extra_fields_unparsed] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell _cache_id_types() fs_db = make_fs() with AtomicStreamRecoder(args.filename, mode='w', encoding=args.encoding) as stream: xmlwriter = xmlprinter.xmlprinter(stream, indent_level=2, # human-friendly output data_mode=True) generate_report(args.institution, args.encoding) logger.info('Report written to %s', stream.name) logger.info('Done with script %s', parser.prog)
def main(): Cerebrum.logutils.autoconf('cronjob') logger.info("Starting import from FS") try: opts, args = getopt.getopt(sys.argv[1:], "psrefonuUE", ["datadir=", "personinfo-file=", "studprog-file=", "roleinfo-file=", "undenh-file=", "emneinfo-file=", "fnr-update-file=", "netpubl-file=", "ou-file=", "misc-func=", "misc-file=", "misc-tag=", "evukursinfo-file=", "student-undenh-file=", "db-user="******"db-service=", "institution=" ]) except getopt.GetoptError: usage() sys.exit(2) db_user = None db_service = None institution_number = cereconf.DEFAULT_INSTITUSJONSNR for o, val in opts: if o in ('--db-user',): db_user = val elif o in ('--db-service',): db_service = val elif o in ('--institution',): institution_number = val fs = make_fs(user=db_user, database=db_service) filepaths = FilePaths(opts) fsimporter = ImportFromFsNmh(fs) misc_tag = None misc_func = None for o, val in opts: try: if o in ('-p',): fsimporter.write_person_info(filepaths.person_file) elif o in ('-s',): fsimporter.write_studprog_info(filepaths.studprog_file) elif o in ('-r',): fsimporter.write_role_info(filepaths.role_file) elif o in ('-e',): fsimporter.write_emne_info(filepaths.emne_info_file) elif o in ('-f',): fsimporter.write_fnrupdate_info(filepaths.fnr_update_file) elif o in ('-o',): fsimporter.write_ou_info(institution_number, filepaths.ou_file) elif o in ('-n',): fsimporter.write_netpubl_info(filepaths.netpubl_file) elif o in ('-u',): fsimporter.write_undenh_metainfo(filepaths.undervenh_file) elif o in ('-E',): fsimporter.write_evukurs_info(filepaths.evu_kursinfo_file) elif o in ('-U',): fsimporter.write_undenh_student(filepaths.undenh_student_file) # We want misc-* to be able to produce multiple file in one # script-run elif o in ('--misc-func',): misc_func = val elif o in ('--misc-tag',): misc_tag = val elif o in ('--misc-file',): misc_file = set_filepath(filepaths.datadir, val) fsimporter.write_misc_info(misc_file, misc_tag, misc_func) except FileChangeTooBigError as msg: logger.error("Manual intervention required: %s", msg) logger.info("Done with import from FS")
def main(): Cerebrum.logutils.autoconf('cronjob') logger.info("Starting import from FS") try: opts, args = getopt.getopt(sys.argv[1:], "psrefonuUE", [ "datadir=", "personinfo-file=", "studprog-file=", "roleinfo-file=", "emneinfo-file=", "fnr-update-file=", "netpubl-file=", "ou-file=", "misc-func=", "misc-file=", "misc-tag=", "undenh-file=", "evukursinfo-file=", "student-undenh-file=", "db-user="******"db-service=", "institution=" ]) except getopt.GetoptError: usage() sys.exit(2) db_user = None db_service = None institution_number = cereconf.DEFAULT_INSTITUSJONSNR for o, val in opts: if o in ('--db-user', ): db_user = val elif o in ('--db-service', ): db_service = val elif o in ('--institution', ): institution_number = val fs = make_fs(user=db_user, database=db_service) file_paths = FilePaths(opts) fsimporter = ImportFromFsUia(fs) misc_tag = None misc_func = None for o, val in opts: try: if o in ('-p', ): fsimporter.write_person_info(file_paths.person_file) elif o in ('-s', ): fsimporter.write_studprog_info(file_paths.studprog_file) elif o in ('-r', ): fsimporter.write_role_info(file_paths.role_file) elif o in ('-e', ): fsimporter.write_emne_info(file_paths.emne_info_file) elif o in ('-f', ): fsimporter.write_fnrupdate_info(file_paths.fnr_update_file) elif o in ('-o', ): fsimporter.write_ou_info(institution_number, file_paths.ou_file) elif o in ('-n', ): fsimporter.write_netpubl_info(file_paths.netpubl_file) elif o in ('-u', ): fsimporter.write_undenh_metainfo(file_paths.undervenh_file) elif o in ('-E', ): fsimporter.write_evukurs_info(file_paths.evu_kursinfo_file) elif o in ('-U', ): fsimporter.write_undenh_student(file_paths.undenh_student_file) # We want misc-* to be able to produce multiple file in one # script-run elif o in ('--misc-func', ): misc_func = val elif o in ('--misc-tag', ): misc_tag = val elif o in ('--misc-file', ): misc_file = set_filepath(file_paths.datadir, val) fsimporter.write_misc_info(misc_file, misc_tag, misc_func) except FileChangeTooBigError as msg: logger.error("Manual intervention required: %s", msg) logger.info("Done with import from FS")
def main(inargs=None): global cerebrum_db, constants, fs_db, xmlwriter global with_email, with_cell, extra_contact_fields # Sorry, but the alternative is to rewrite this whole thing. global transliterate parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--out-file', dest='filename', help='XML-file to be generated', required=True) parser.add_argument('-i', '--institution', dest='institution', type=Cerebrum.utils.argutils.UnicodeType(), help='Name of institution to put in report', required=True) parser.add_argument('-e', '--with-email', dest='with_email', action='store_true', default=False, help='Include email info') parser.add_argument('-c', '--with-cellular', dest='with_cell', action='store_true', default=False, help='Include cellphone data') parser.add_argument('-x', '--extra-contact-fields', dest='extra_contact_fields', default=None, help=('Add extra contact-fields to the export. ' 'Format: xml_name:contact_type:source_system. ' 'contact_type and source_system must be valid ' 'constant names.')) parser.add_argument('-o', '--encoding', dest='encoding', default='iso8859-1', help='Override the default encoding (iso8859-1)') parser.add_argument('-r', '--errors', dest='encoding_errors', default='strict', help=('Override default encoding error handler ' '(strict). Common handlers: strict, ignore, ' 'replace. See Python Codec Base Classes for all ' 'supported handlers.')) Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) logger.info('Start of script %s', parser.prog) logger.debug("args: %r", args) cerebrum_db = Factory.get("Database")() constants = Factory.get("Constants")(cerebrum_db) transliterate = for_encoding(args.encoding) if args.extra_contact_fields is not None: extra_fields_unparsed = args.extra_contact_fields.split(',') extra_fields_unparsed = [ field_entry.strip() for field_entry in extra_fields_unparsed ] extra_contact_fields = [] for unparsed_field in extra_fields_unparsed: field_raw_data = unparsed_field.split(':') field_dict = dict() field_dict['xml_name'] = field_raw_data[0] field_dict['contact_type'] = field_raw_data[1] field_dict['source_system'] = field_raw_data[2] extra_contact_fields.append(field_dict) else: extra_contact_fields = None logger.info("generating ABC export") with_email = args.with_email with_cell = args.with_cell encoding_errors = args.encoding_errors _cache_id_types() fs_db = make_fs() with AtomicStreamRecoder(args.filename, mode='w', encoding=args.encoding) as stream: xmlwriter = xmlprinter.xmlprinter( stream, indent_level=2, # human-friendly output data_mode=True, encoding_errors=encoding_errors) generate_report(args.institution, args.encoding) logger.info('Report written to %s', stream.name) logger.info('Done with script %s', parser.prog)
"hf:", ["help", "file="]) except getopt.GetoptError, error: usage(message=error.msg) return 1 output_stream = options["output"] for opt, val in opts: if opt in ('-h', '--help',): usage() return 0 if opt in ('-f', '--file',): options["output"] = val fs_db = make_fs() student_rows = fs_db.student.list_aktiv() if options["output"] != sys.stdout: output_stream = open(options["output"], "w") for student_row in student_rows: name = "%s %s" % (student_row["fornavn"], student_row["etternavn"]) no_ssn = "%06d%05d" % (student_row["fodselsdato"], student_row["personnr"]) card_ID = "01221%06d0" % student_row["studentnr_tildelt"] uname = "%06d" % student_row["studentnr_tildelt"] output_stream.write("%s\n" % ";".join((name, no_ssn, card_ID, uname))) if output_stream != sys.stdout: output_stream.close()
def main(): """Argparser and script run.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument( '-p', '--person-affiliations', dest='person_affs', action='append', required=True, help='List of person affiliations to use. On the form <affiliation> ' 'or <affiliation>/<status>. ' 'affiliation_ansatt/affiliation_status_ansatt_vit' ) parser.add_argument( '-f', '--fagperson-affiliation', dest='fagperson_affs', action='append', required=True, help='TODO Fagperson aff' ) parser.add_argument( '-a', '--authoritative-system', dest='authoritative_system', required=True, help='TODO Authoritative system' ) parser.add_argument( '-o', '--ou-perspective', dest='ou_perspective', required=True, help='TODO The OU perspective' ) parser.add_argument( '-e', '--fagperson-fields', dest='fagperson_fields', action='append', choices=['work_title', 'phone', 'fax', 'mobile'], help='Fagperson data fields to be exported. Default is all' ) parser.add_argument( '-n', '--no-extra-fields', action='store_true', dest='no_extra_fields', help='Do not export any of the "extra" fagperson fields (work_title, ' 'phone, fax, mobile)' ) parser.add_argument( '-m', '--with-cache-email', action='store_true', dest='email_cache', help='Cache e-mail addresses' ) parser.add_argument( '-c', '--commit', action='store_true', dest='commit', help='Write data to FS' ) db = Factory.get("Database")() co = Factory.get("Constants")(db) fs = make_fs() Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args() Cerebrum.logutils.autoconf('cronjob', args) logger.info('START {0}'.format(parser.prog)) def parse_affiliation_string(affiliation): """Splits string into aff and status.""" if affiliation is None: return None if len(affiliation.split("/")) == 1: aff, status = ( co.human2constant(affiliation, co.PersonAffiliation), None) elif len(affiliation.split("/")) == 2: aff, status = affiliation.split("/") aff, status = (co.human2constant(aff, co.PersonAffiliation), co.human2constant(status, co.PersonAffStatus)) if aff is None or status is None: return None else: logger.error("Wrong syntax for affiliation %s", affiliation) return None return aff, status person_affs = [parse_affiliation_string(x) for x in args.person_affs] fagperson_affs = [parse_affiliation_string(x) for x in args.fagperson_affs] ou_perspective = get_constant(db, parser, co.OUPerspective, args.ou_perspective) authoritative_system = get_constant(db, parser, co.AuthoritativeSystem, args.authoritative_system) if ou_perspective is None: logger.error('No valid OU perspective given') return None if authoritative_system is None: logger.error('No valid authoritative system given') return None if args.commit: logger.info('Changes will be committed') else: logger.info('Dryrun mode, no changes will be committed') valid_fagperson_fields = ['work_title', 'phone', 'fax', 'mobile'] if args.no_extra_fields: fagperson_fields = {x: False for x in valid_fagperson_fields} elif args.fagperson_fields: fagperson_fields = {x: False for x in valid_fagperson_fields} for field in args.fagperson_fields: if field in fagperson_fields: fagperson_fields[field] = True else: fagperson_fields = None syncer = HR2FSSyncer(person_affs, fagperson_affs, authoritative_system, ou_perspective, db, fs, co, fagperson_export_fields=fagperson_fields, use_cache=True, email_cache=args.email_cache, commit=args.commit) syncer.sync_to_fs() if args.commit: logger.info('Committing FS db') fs.db.commit() else: logger.info('Rolling back changes in the FS db') fs.db.rollback() logger.info('Done syncing to FS')