def write_kull_info(self, kull_info_file): """Lag en fil med informasjon om alle studentenes kulldeltakelse registrert i FS. Spesifikt, lister vi opp alle deltagelser ved: - kullklasser - kull """ logger.info("Writing kull info to '%s'", kull_info_file) f = SimilarSizeWriter(kull_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for xml_tag, generator in ( ("kullklasse", self.fs.undervisning.list_studenter_alle_kullklasser), ("kulldeltaker", self.fs.undervisning.list_studenter_alle_kull), ("kull", self.fs.info.list_kull)): for row in generator(): keys = row.keys() f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n") f.write("</data>\n") f.close()
def main(): filename = pj(cereconf.LDAP['dump_dir'], 'webaccounts.ldif') spread = base = None try: opts, args = getopt.getopt(sys.argv[1:], 'hf:b:s:', [ 'help', 'filename=', 'spread=', 'base=']) except getopt.GetoptError: usage(1) for opt, val in opts: if opt in ('-h', '--help',): usage() elif opt in ('-f', '--filename'): filename = val elif opt in ('-s', '--spread'): str2const = dict() for c in dir(co): tmp = getattr(co, c) str2const[str(tmp)] = tmp spread = str2const[val] elif opt in ('-b', '--base'): base = val if not (spread and base): print spread, base usage(1) print "foo" f = SimilarSizeWriter(filename, 'w') f.max_pct_change = 90 dump_accounts(f, spread, base) f.close()
def write_edu_info(self, edu_info_file): """Lag en fil med informasjon om alle studentenes 'aktiviteter' registrert i FS. Spesifikt, lister vi opp alle deltagelser ved: - undenh - undakt - kullklasser - kull """ logger.info("Writing edu info for all students") f = SimilarSizeWriter(edu_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for xml_tag, generator in ( ("undenh", self.fs.undervisning.list_studenter_alle_undenh), ("undakt", self.fs.undervisning.list_studenter_alle_undakt), ("kullklasse", self.fs.undervisning.list_studenter_alle_kullklasser), ("kull", self.fs.undervisning.list_studenter_alle_kull)): logger.debug("Processing %s entries", xml_tag) for row in generator(): keys = row.keys() f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n") f.write("</data>\n") f.close()
def main(): global db, co, ac, p, ou, et, logger logger = Factory.get_logger("cronjob") db = Factory.get('Database')() co = Factory.get('Constants')(db) ac = Factory.get('Account')(db) p = Factory.get('Person')(db) ou = Factory.get('OU')(db) et = Email.EmailTarget(db) txt_path = "/cerebrum/var/cache/txt" options, rest = getopt.getopt(sys.argv[1:], "t:", [ "txt-path=", ]) for option, value in options: if option in ("-t", "--txt-path"): txt_path = value # Load dicts with misc info. get_account_info() get_person_contact() # Dump OFK info f = SimilarSizeWriter("%s/ofk.txt" % txt_path, "w") f.max_pct_change = 10 users = process_txt_file(f) f.close()
def ldif_outfile(tree, filename=None, default=None, explicit_default=False, max_change=None, module=cereconf): """(Open and) return LDIF outfile for <tree>. Use <filename> if specified, otherwise module.LDAP_<tree>['file'] unless <explicit_default>, otherwise return <default> (an open filehandle) if that is not None. (explicit_default should be set if <default> was opened from a <filename> argument and not from module.LDAP*['file'].) When opening a file, use SimilarSizeWriter where close() fails if the resulting file has changed more than <max_change>, or module.LDAP_<tree>['max_change'], or module.LDAP['max_change']. If max_change is unset or >= 100, just open the file normally. """ if not (filename or explicit_default): filename = getattr(module, 'LDAP_' + tree).get('file') if filename: filename = os.path.join(module.LDAP['dump_dir'], filename) if filename: if max_change is None: max_change = ldapconf(tree, 'max_change', default=ldapconf( None, 'max_change', default=100, module=module), module=module) if max_change < 100: f = SimilarSizeWriter(filename, 'w') f.max_pct_change = max_change else: f = AtomicFileWriter(filename, 'w') return f if default: return default raise _Errors.CerebrumError( 'Outfile not specified and LDAP_{0}["file"] not set'.format(tree))
def write_undenh_student(self, undenh_student_file): """Skriv oversikt over personer oppmeldt til undervisningsenheter. Tar med data for alle undervisingsenheter i inneværende+neste semester.""" logger.info("Writing undenh_student info to '%s'", undenh_student_file) f = SimilarSizeWriter(undenh_student_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for semester in ('current', 'next'): cols, undenh = self._ext_cols( self.fs.undervisning.list_undervisningenheter(sem=semester)) for u in undenh: u_attr = {} for k in ('institusjonsnr', 'emnekode', 'versjonskode', 'terminnr', 'terminkode', 'arstall'): u_attr[k] = u[k] student_cols, student = self._ext_cols( self.fs.undervisning.list_studenter_underv_enhet(**u_attr)) for s in student: s_attr = u_attr.copy() for k in ('fodselsdato', 'personnr'): s_attr[k] = s[k] f.write(xml.xmlify_dbrow({}, (), 'student', extra_attr=s_attr) + "\n") f.write("</data>\n") f.close()
def main(): global db, co, ac, p, ou, et, logger logger = Factory.get_logger("cronjob") db = Factory.get('Database')() co = Factory.get('Constants')(db) ac = Factory.get('Account')(db) p = Factory.get('Person')(db) ou = Factory.get('OU')(db) et = Email.EmailTarget(db) txt_path = "/cerebrum/var/cache/txt" options, rest = getopt.getopt(sys.argv[1:], "t:", ["txt-path=",]) for option, value in options: if option in ("-t", "--txt-path"): txt_path = value # Load dicts with misc info. get_account_info() get_person_contact() # Dump OFK info f = SimilarSizeWriter("%s/ofk.txt" % txt_path, "w") f.max_pct_change = 10 users = process_txt_file(f) f.close()
def main(): filename = pj(cereconf.LDAP['dump_dir'], 'webaccounts.ldif') spread = base = None try: opts, args = getopt.getopt(sys.argv[1:], 'hf:b:s:', ['help', 'filename=', 'spread=', 'base=']) except getopt.GetoptError: usage(1) for opt, val in opts: if opt in ( '-h', '--help', ): usage() elif opt in ('-f', '--filename'): filename = val elif opt in ('-s', '--spread'): str2const = dict() for c in dir(co): tmp = getattr(co, c) str2const[str(tmp)] = tmp spread = str2const[val] elif opt in ('-b', '--base'): base = val if not (spread and base): print spread, base usage(1) print "foo" f = SimilarSizeWriter(filename, 'w') f.max_pct_change = 90 dump_accounts(f, spread, base) f.close()
def write_netpubl_info(outfile): """Lager fil med informasjon om status nettpublisering""" f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = _ext_cols(fs.person.list_status_nettpubl()) for n in nettpubl: f.write(xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def generate_filegroup(self, filename): logger.debug("generate_group: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 groups = self._exported_groups.keys() groups.sort() for group_id in groups: group_name = self._exported_groups[group_id] tmp = posix_group.illegal_name(group_name) if tmp or len(group_name) > 8: logger.warn("Bad groupname %s %s" % (group_name, tmp)) continue try: group_members, user_members = self._expand_group(group_id) except Errors.NotFoundError: logger.warn("Group %s has no GID", group_id) continue tmp_users = self._filter_illegal_usernames(user_members, group_name) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, tmp_users)) f.write(self._wrap_line(group_name, ",".join(tmp_users), ':*:%i:' % self._group.posix_gid)) if e_o_f: f.write('E_O_F\n') f.close()
def main(): opts, rest = getopt.getopt(sys.argv[1:], "o:", ("output=",)) filename = None for option, value in opts: if option in ("-o", "--output"): filename = value f = SimilarSizeWriter(filename, "w") f.max_pct_change = 50 output_file(sort_list(generate_list()), f) f.close()
def main(): opts, rest = getopt.getopt(sys.argv[1:], "o:", ("output=", )) filename = None for option, value in opts: if option in ("-o", "--output"): filename = value f = SimilarSizeWriter(filename, "w", encoding='UTF-8') f.max_pct_change = 50 output_file(sorted(generate_list()), f) f.close()
def write_role_info(self, role_file): """Lager fil med informasjon om alle roller definer i FS.PERSONROLLE""" logger.info("Writing role info to '%s'", role_file) f = SimilarSizeWriter(role_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, role = self._ext_cols( self.fs.undervisning.list_alle_personroller()) for r in role: f.write( xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'rolle') + "\n") f.write("</data>\n") f.close()
def write_emne_info(self, emne_info_file): """Lager fil med informasjon om alle definerte emner""" logger.info("Writing emne info to '%s'", emne_info_file) f = SimilarSizeWriter(emne_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, dta = self._ext_cols(self.fs.info.list_emner()) for t in dta: f.write( xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'emne') + "\n") f.write("</data>\n") f.close()
def generate_filegroup(self, filename): logger.debug("generate_group: %s" % filename) f = SimilarSizeWriter(filename, "w", encoding='UTF-8') f.max_pct_change = 5 groups = self._exported_groups.keys() groups.sort() for group_id in groups: group_name = self._exported_groups[group_id] tmp = posix_group.illegal_name(group_name) if tmp or len(group_name) > 8: logger.warn("Bad groupname %s %s" % (group_name, tmp)) continue try: group_members, user_members = self._expand_group(group_id) except Errors.NotFoundError: logger.warn("Group %s has no GID", group_id) continue tmp_users = self._filter_illegal_usernames(user_members, group_name) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, tmp_users)) f.write(self._wrap_line(group_name, ",".join(tmp_users), ':*:%i:' % self._group.posix_gid)) if e_o_f: f.write('E_O_F\n') f.close()
def write_netpubl_info(self, netpubl_file): """Lager fil med informasjon om status nettpublisering""" logger.info("Writing nettpubl info to '%s'", netpubl_file) f = SimilarSizeWriter(netpubl_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = self._ext_cols(self.fs.person.list_status_nettpubl()) for n in nettpubl: f.write(xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def write_evukurs_info(self, evu_kursinfo_file): """Skriv data om alle EVU-kurs (vi trenger dette bl.a. for å bygge EVU-delen av CF).""" logger.info("Writing evukurs info to '%s'", evu_kursinfo_file) f = SimilarSizeWriter(evu_kursinfo_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, evukurs = self._ext_cols(self.fs.evu.list_kurs()) for ek in evukurs: f.write( xml.xmlify_dbrow( ek, xml.conv_colnames(cols), "evukurs") + "\n") f.write("</data>\n") f.close()
def write_misc_info(self, misc_file, tag, func_name): """Lager fil med data fra gitt funksjon i access_FS""" logger.info("Writing misc info to '%s'", misc_file) f = SimilarSizeWriter(misc_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") func = reduce( lambda obj, attr: getattr(obj, attr), func_name.split('.'), self.fs) cols, dta = self._ext_cols(func()) for t in dta: self.fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n") f.write("</data>\n") f.close()
def write_edu_info(outfile): """Lager en fil med undervisningsinformasjonen til alle studenter. For hver student, lister vi opp alle tilknytningene til undenh, undakt, evu, kursakt og kull. Hovedproblemet i denne metoden er at vi må bygge en enorm dict med all undervisningsinformasjon. Denne dicten bruker mye minne. Advarsel: vi gjør ingen konsistenssjekk på at undervisningselementer nevnt i outfile vil faktisk finnes i andre filer genererert av dette skriptet. Mao. det er fullt mulig at en student S er registrert ved undakt U1, samtidig som U1 ikke er nevnt i undervisningsaktiveter.xml. fs.undervisning.list_studenter_alle_kull() <- kull deltagelse fs.undervisning.list_studenter_alle_undenh() <- undenh deltagelse fs.undervisning.list_studenter_alle_undakt() <- undakt deltagelse fs.evu.list_studenter_alle_kursakt() <- kursakt deltagelse fs.evu.list() <- evu deltagelse """ f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") for triple in (("kull", None, fs.undervisning.list_studenter_alle_kull), ("undenh", None, fs.undervisning.list_studenter_alle_undenh), ("undakt", None, fs.undervisning.list_studenter_alle_undakt), ("evu", ("fodselsdato", "personnr", "etterutdkurskode", "kurstidsangivelsekode"), fs.evu.list), ("kursakt", None, fs.evu.list_studenter_alle_kursakt)): kind, fields, selector = triple logger.debug("Processing %s entries", kind) for row in selector(): if fields is None: tmp_row = row keys = row.keys() else: tmp_row = dict((f, row[f]) for f in fields) keys = fields f.write(xml.xmlify_dbrow(tmp_row, keys, kind) + '\n') f.write("</data>\n") f.close()
def write_netgroup(self, filename, e_o_f=False, include_persons=False): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 netgroups = self.generate_netgroup(include_persons=include_persons) for group_name, members in netgroups: f.write(self._wrap_line(group_name, members, ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def write_undenh_metainfo(self, undervenh_file): """Skriv metadata om undervisningsenheter for inneværende+neste semester.""" logger.info("Writing undenh_meta info to '%s'", undervenh_file) f = SimilarSizeWriter(undervenh_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<undervenhet>\n") for semester in ('current', 'next'): cols, undenh = self._ext_cols( self.fs.undervisning.list_undervisningenheter(sem=semester)) for u in undenh: f.write( xml.xmlify_dbrow(u, xml.conv_colnames(cols), 'undenhet') + "\n") f.write("</undervenhet>\n") f.close()
def write_to_file(ips_by_mac, file): """Writes all relevant data to selected output file. @type ips_by_mac: dict @param ips_by_mac: A dictionary where the keys are MAC-addresses and the values are lists of IP-addresses associated with each MAC-address. @type file: string @param file: Path/name of the file where the data should be written to """ all_macs = ips_by_mac.keys() all_macs.sort() logger.info("Writing to export-file: '%s'" % file) output_stream = SimilarSizeWriter(file, "w") output_stream.max_pct_change = 10 for mac in all_macs: output_stream.write("%-18s %s\n" % (mac, ",".join(ips_by_mac[mac]))) logger.info("Done writing to export-file") output_stream.close()
def list_quotas(fname, hostname, diskname, spread): f = SimilarSizeWriter(fname, "w") f.max_pct_change = 10 disk = Factory.get("Disk")(db) if diskname: disk.find_by_path(diskname) list_disk_quotas(f, disk.entity_id, spread) elif hostname: host = Factory.get("Host")(db) host.find_by_name(hostname) for row in disk.list(host_id=host.entity_id, spread=spread): list_disk_quotas(f, row['disk_id'], spread) else: for row in disk.list_traits(co.trait_disk_quota): list_disk_quotas(f, row['entity_id'], spread) f.close()
def write_netgroup(self, filename, e_o_f=False): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 netgroups = self.generate_netgroup() for group_name, members in netgroups: f.write(self._wrap_line(group_name, members, ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def open(self, which): fname = getattr(self.opts, which) if fname: if which == 'ldif': f = LDIFWriter('POSIX', fname, module=posixconf) if self.opts.user_spread: f.write_container() else: f = SimilarSizeWriter(fname, "w") f.max_pct_change = 10 return f
def generate_netgroup(self, filename): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 for group_id in self._exported_groups.keys(): group_name = self._exported_groups[group_id] group_members, user_members = self._expand_group(group_id) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, user_members)) f.write(self._wrap_line(group_name, self._format_members( group_members, user_members, group_name), ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def write_filegroup(self, filename, e_o_f=False): """Write the filegroups to the given filename. If e_o_f is True, "E_O_F" is written after every written file group. """ logger.debug("write_filegroup: %s" % filename) with closing(SimilarSizeWriter(filename, "w")) as f: f.max_pct_change = 5 for group_name, gid, users in self.generate_filegroup(): f.write( self._wrap_line(group_name, ",".join(users), ':*:%i:' % gid)) if e_o_f: f.write('E_O_F\n')
def main(): try: import argparse except ImportError: from Cerebrum.extlib import argparse filenames = ('atoms', 'roles', 'hostpolicies', 'relationships') parser = argparse.ArgumentParser(description="Produce host policy files") for filename in filenames: parser.add_argument('--%s' % filename, dest=filename, default=None, metavar='FILE', help='Write %s to FILE' % filename) opts = parser.parse_args() action = False streams = [] for filename, process in ((opts.atoms, process_atoms), (opts.roles, process_roles), (opts.hostpolicies, process_hostpolicies), (opts.relationships, process_relationships)): if filename: stream = SimilarSizeWriter(filename) stream.max_pct_change = 90 process(stream) streams.append(stream) action = True # Don't close streams (commit) until all files have been generated for stream in streams: stream.close() if not action: parser.error('No dump specified, got nothing to do')
def main(): try: import argparse except ImportError: from Cerebrum.extlib import argparse filenames = ('atoms', 'roles', 'hostpolicies', 'relationships') parser = argparse.ArgumentParser(description="Produce host policy files") for filename in filenames: parser.add_argument('--%s' % filename, dest=filename, default=None, metavar='FILE', help='Write %s to FILE' % filename) opts = parser.parse_args() action = False streams = [] for filename, process in ((opts.atoms, process_atoms), (opts.roles, process_roles), (opts.hostpolicies, process_hostpolicies), (opts.relationships, process_relationships)): if filename: stream = SimilarSizeWriter(filename, 'w', encoding='latin-1') stream.max_pct_change = 90 process(stream) streams.append(stream) action = True # Don't close streams (commit) until all files have been generated for stream in streams: stream.close() if not action: parser.error('No dump specified, got nothing to do')
def write_to_file(ips_by_mac, fname): """Writes all relevant data to selected output file. @type ips_by_mac: dict @param ips_by_mac: A dictionary where the keys are MAC-addresses and the values are lists of IP-addresses associated with each MAC-address. @type fname: string @param fname: Path/name of the file where the data should be written to """ logger.info("Writing to export-file: '%s'", fname) with SimilarSizeWriter(fname, "w", encoding='ASCII') as output_stream: output_stream.max_pct_change = 10 for mac, ips in sorted(ips_by_mac.items()): output_stream.write("%-18s %s\n" % (mac, ",".join(ips))) logger.info("Done writing to export-file")
def main(inargs=None): parser = argparse.ArgumentParser(description=__doc__) # TODO: change to the actual filename... parser.add_argument('-t', '--txt-path', dest='export_dir', type=writable_dir_type, default=DEFAULT_EXPORT_DIR, metavar='DIR', help='Write export data to %(metavar)s') parser.add_argument('-e', '--encoding', dest='codec', default=DEFAULT_ENCODING, type=Cerebrum.utils.argutils.codec_type, help="output file encoding, defaults to %(default)s") Cerebrum.logutils.options.install_subparser(parser) args = parser.parse_args(inargs) Cerebrum.logutils.autoconf('cronjob', args) logger.info('Start of script %s', parser.prog) logger.debug("args: %r", args) filename = os.path.join(args.export_dir, DEFAULT_FILENAME) db = Factory.get('Database')() persons = PersonLookup(db) # Dump OFK info with SimilarSizeWriter(filename, mode="w", encoding=args.codec.name) as f: f.max_pct_change = 10 write_csv_export(f, persons) logger.info('Report written to %s', filename) logger.info('Done with script %s', parser.prog)
def write_person_info(self, person_file): """Lager fil med informasjon om alle personer registrert i FS som vi muligens også ønsker å ha med i Cerebrum. En person kan forekomme flere ganger i filen.""" # TBD: Burde vi cache alle data, slik at vi i stedet kan lage en # fil der all informasjon om en person er samlet under en egen # <person> tag? logger.info("Writing person info to '%s'", person_file) f = SimilarSizeWriter(person_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") # Aktive studenter cols, students = self._ext_cols(self.fs.student.list_aktiv()) for s in students: f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n") # Eksamensmeldinger cols, students = self._ext_cols( self.fs.student.list_eksamensmeldinger()) for s in students: f.write(xml.xmlify_dbrow( s, xml.conv_colnames(cols), 'eksamen') + "\n") # Aktive fagpersoner ved Hiøf cols, fagperson = self._ext_cols( self.fs.undervisning.list_fagperson_semester()) for p in fagperson: f.write( xml.xmlify_dbrow( p, xml.conv_colnames(cols), 'fagperson') + "\n") f.write("</data>\n") f.close()
@type limit_lines: Float""" try: inFile = open(inFilePath, 'r') # Can we read from input? assert (os.access(outFilePath, os.W_OK)) # Is output writable? except IOError, err: logger.error(err) raise err logger.info("Reading from: %s (%s bytes)", inFilePath, os.path.getsize(inFilePath)) logger.info("Comparing to: %s (%s bytes)", outFilePath, os.path.getsize(outFilePath)) if limit_percentage: ssw = SimilarSizeWriter(outFilePath, mode='w') ssw.max_pct_change = limit_percentage else: ssw = SimilarLineCountWriter(outFilePath, mode='w') ssw.max_line_change = limit_lines # read from input, write to temporary output file for line in inFile: ssw.write(line) try: # close() checks that changes are within limits ssw.close() except FileChangeTooBigError as err: logger.error("Changes are too big, leaving behind temporary file %s", ssw._tmpname)
def write_ou_info(outfile): """Lager fil med informasjon om alle OU-er""" logger.info("Writing OU info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, ouer = _ext_cols(fs.info.list_ou(cereconf.DEFAULT_INSTITUSJONSNR)) # TODO for o in ouer: sted = {} for fs_col, xml_attr in ( ('faknr', 'fakultetnr'), ('instituttnr', 'instituttnr'), ('gruppenr', 'gruppenr'), ('stedakronym', 'akronym'), ('stedakronym', 'forkstednavn'), ('stednavn_bokmal', 'stednavn'), ('faknr_org_under', 'fakultetnr_for_org_sted'), ('instituttnr_org_under', 'instituttnr_for_org_sted'), ('gruppenr_org_under', 'gruppenr_for_org_sted'), ('adrlin1', 'adresselinje1_intern_adr'), ('adrlin2', 'adresselinje2_intern_adr'), ('postnr', 'poststednr_intern_adr'), ('adrlin1_besok', 'adresselinje1_besok_adr'), ('adrlin2_besok', 'adresselinje2_besok_adr'), ('postnr_besok', 'poststednr_besok_adr')): if o[fs_col] is not None: sted[xml_attr] = xml.escape_xml_attr(o[fs_col]) komm = [] for fs_col, typekode in ( ('telefonnr', 'EKSTRA TLF'), ('faxnr', 'FAX')): if o[fs_col]: # Skip NULLs and empty strings komm.append({'kommtypekode': xml.escape_xml_attr(typekode), 'kommnrverdi': xml.escape_xml_attr(o[fs_col])}) # TODO: Kolonnene 'url' og 'bibsysbeststedkode' hentes ut fra # FS, men tas ikke med i outputen herfra. f.write('<sted ' + ' '.join(["%s=%s" % item for item in sted.items()]) + '>\n') for k in komm: f.write('<komm ' + ' '.join(["%s=%s" % item for item in k.items()]) + ' />\n') f.write('</sted>\n') f.write("</data>\n") f.close()
def write_regkort_info(outfile): """Lager fil med informasjon om semesterregistreringer for inneværende semester""" logger.info("Writing regkort info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, regkort = _ext_cols(fs.student.list_semreg()) for r in regkort: f.write(xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'regkort') + "\n") f.write("</data>\n") f.close()
def write_person_info(outfile): """Lager fil med informasjon om alle personer registrert i FS som vi muligens også ønsker å ha med i Cerebrum. En person kan forekomme flere ganger i filen.""" # TBD: Burde vi cache alle data, slik at vi i stedet kan lage en # fil der all informasjon om en person er samlet under en egen # <person> tag? logger.info("Writing person info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") # Fagpersoner cols, fagpersoner = _ext_cols(fs.undervisning.list_fagperson_semester()) for p in fagpersoner: f.write(xml.xmlify_dbrow(p, xml.conv_colnames(cols), 'fagperson') + "\n") # Studenter med opptak, privatister (=opptak i studiepgraommet # privatist) og Alumni cols, students = _ext_cols(fs.student.list()) for s in students: # The Oracle driver thinks the result of a union of ints is float fix_float(s) f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'opptak') + "\n") # Privatister, privatistopptak til studieprogram eller emne-privatist cols, students = _ext_cols(fs.student.list_privatist()) for s in students: fix_float(s) f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'privatist_studieprogram') + "\n") cols, students = _ext_cols(fs.student.list_privatist_emne()) for s in students: f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'privatist_emne') + "\n") # Aktive studenter cols, students = _ext_cols(fs.student.list_aktiv()) for s in students: f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n") # Aktive emnestudenter cols, students = _ext_cols(fs.student.list_aktiv_emnestud()) for s in students: f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'emnestud') + "\n") # Semester-registrering cols, students = _ext_cols(fs.student.list_semreg()) for s in students: f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'regkort') + "\n") # Eksamensmeldinger cols, students = _ext_cols(fs.student.list_eksamensmeldinger()) for s in students: f.write(xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'eksamen') + "\n") # Drgradsstudenter med opptak cols, drstudents = _ext_cols(fs.student.list_drgrad()) for d in drstudents: f.write(xml.xmlify_dbrow(d, xml.conv_colnames(cols), 'drgrad') + "\n") # EVU students # En del EVU studenter vil være gitt av søket over cols, evustud = _ext_cols(fs.evu.list()) for e in evustud: f.write(xml.xmlify_dbrow(e, xml.conv_colnames(cols), 'evu') + "\n") # Studenter i permisjon (også dekket av GetStudinfOpptak) cols, permstud = _ext_cols(fs.student.list_permisjon()) for p in permstud: f.write(xml.xmlify_dbrow(p, xml.conv_colnames(cols), 'permisjon') + "\n") ## ## STA har bestemt at personer med tilbud ikke skal ha tilgang til noen IT-tjenester ## inntil videre. Derfor slutter vi på nåværende tidspunkt å hente ut informasjon om ## disse. Ettersom det er usikkert om dette vil endre seg igjen i nær fremtid lar vi ## koden ligge for nå. ## ## # Personer som har fått tilbud ## cols, tilbudstud = _ext_cols(fs.student.list_tilbud()) ## for t in tilbudstud: ## f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'tilbud') + "\n") f.write("</data>\n") f.close()
def main(argv): global logger logger = Factory.get_logger("cronjob") root_ou = None output_file = None perspective = None source_system = None tag = None spread = None #export all OUs args, junk = getopt.getopt(argv[1:], "o:r:p:s:t:", ("output-file=", "root-ou=", "perspective=", "source-system=", "tag=", "spread=",)) for option, value in args: if option in ("-o", "--output-file",): output_file = value elif option in ("-r", "--root-ou",): root_ou = value elif option in ("-p", "--perspective"): perspective = value elif option in ("-s", "--source-system",): source_system = value elif option in ("-t", "--tag",): tag = value elif option in ("--spread",): spread = value if output_file is None: logger.error("No output file name specified.") sys.exit(1) if root_ou is None: logger.error("No root OU is specified.") sys.exit(1) if tag is None: logger.error("No tag is specified. Can't deduce value for <kilde>") sys.exit(1) const = Factory.get("Constants")() if (not perspective or not const.human2constant(perspective, const.OUPerspective)): logger.error("Bogus perspective '%s'. Available options are: %s", perspective, ", ".join(str(x) for x in const.fetch_constants(const.OUPerspective))) sys.exit(1) perspective = const.human2constant(perspective, const.OUPerspective) if (not source_system or not const.human2constant(source_system, const.AuthoritativeSystem)): logger.error("Bogus source '%s'. Available options are: %s", source_system, ", ".join(str(x) for x in const.fetch_constants(const.AuthoritativeSystem))) sys.exit(1) source_system = const.human2constant(source_system, const.AuthoritativeSystem) spread = const.human2constant(spread, const.Spread) root_ou_obj = find_root_ou(root_ou) if spread and not root_ou_obj.has_spread(spread): logger.error('Root OU %s does not have %s spread. To export all OUs ' 'run the script without --spread option.' %(root_ou, spread)) if len(root_ou_obj.list_all_with_spread(spread)) == 0: logger.error('No OU has %s spread. To be exported to Cristin an OU' ' must have this spread.' %spread) sys.exit(1) sink = SimilarSizeWriter(output_file) sink.max_pct_change = 15 output_xml(sink, tag, root_ou_obj, perspective, source_system, spread) sink.close()
def write_betalt_papir_info(outfile): """Lager fil med informasjon om alle som enten har fritak fra å betale kopiavgift eller har betalt kopiavgiften""" logger.info("Writing betaltpapir info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.betaling.list_kopiavgift_data(kun_fritak=False, semreg=True)) for t in dta: fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'betalt') + "\n") f.write("</data>\n") f.close()
def write_misc_info(outfile, tag, func_name): """Lager fil med data fra gitt funksjon i access_FS""" logger.info("Writing misc info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") if tag == 'aktivitet': f.max_pct_change = 20 elif tag == 'enhet': f.max_pct_change = 15 else: f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") # It's still not foolproof, but hopefully much more sane than simply # eval'ing. components = func_name.split(".") next = fs for c in components: next = getattr(next, c) cols, dta = _ext_cols(next()) for t in dta: fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n") f.write("</data>\n") f.close()
def write_personrole_info(outfile): """Lager fil med informasjon om alle roller definer i FS.PERSONROLLE""" logger.info("Writing personrolle info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 20 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.undervisning.list_alle_personroller()) for t in dta: f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'rolle') + "\n") f.write("</data>\n") f.close()
def write_emne_info(outfile): """Lager fil med informasjon om alle definerte emner""" logger.info("Writing emne info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.info.list_emner()) for t in dta: f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'emne') + "\n") f.write("</data>\n") f.close()
def write_netpubl_info(outfile): """Lager fil med informasjon om status nettpublisering""" logger.info("Writing nettpubl info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = _ext_cols(fs.person.list_status_nettpubl()) for n in nettpubl: f.write(xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def write_person_info(self, person_file): """Lager fil med informasjon om alle personer registrert i FS som vi muligens også ønsker å ha med i Cerebrum. En person kan forekomme flere ganger i filen.""" # TBD: Burde vi cache alle data, slik at vi i stedet kan lage en # fil der all informasjon om en person er samlet under en egen # <person> tag? logger.info("Writing person info to '%s'", person_file) f = SimilarSizeWriter(person_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") # Aktive studenter cols, students = self._ext_cols(self.fs.student.list_aktiv()) for s in students: self.fix_float(s) f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n") # Eksamensmeldinger cols, students = self._ext_cols( self.fs.student.list_eksamensmeldinger()) for s in students: f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'eksamen') + "\n") # EVU students # En del EVU studenter vil være gitt av søket over cols, students = self._ext_cols(self.fs.evu.list()) for e in students: f.write(xml.xmlify_dbrow(e, xml.conv_colnames(cols), 'evu') + "\n") # Privatister cols, students = self._ext_cols(self.fs.student.list_privatist()) for s in students: f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'privatist_studieprogram') + "\n") f.write("</data>\n") f.close()
def write_forkurs_info(outfile): from mx.DateTime import now logger.info("Writing pre-course file to '{}'".format(outfile)) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 cols, course_attendants = _ext_cols(fs.forkurs.list()) f.write(xml.xml_hdr + "<data>\n") for a in course_attendants: f.write('<regkort fodselsdato="{}" personnr="{}" dato_endring="{}" dato_opprettet="{}"/>\n'.format(a['fodselsdato'], a['personnr'], str(now()), str(now()))) f.write('<emnestud fodselsdato="{}" personnr="{}" etternavn="{}" fornavn="{}" adrlin2_semadr="" postnr_semadr="" adrlin3_semadr="" adrlin2_hjemsted="" postnr_hjemsted="" adrlin3_hjemsted="" sprakkode_malform="NYNORSK" kjonn="X" studentnr_tildelt="{}" emnekode="FORGLU" versjonskode="1" terminkode="VÅR" arstall="2016" telefonlandnr_mobil="{}" telefonnr_mobil="{}"/>\n'.format( a['fodselsdato'], a['personnr'], a['etternavn'], a['fornavn'], a['studentnr_tildelt'], a['telefonlandnr'], a['telefonnr'] )) f.write("</data>\n") f.close()
def write_passwd(self, filename, shadow_file, e_o_f=False): logger.debug("write_passwd: filename=%r, shadow_file=%r, spread=%r", filename, shadow_file, self.spread) f = SimilarSizeWriter(filename, "w", encoding='latin-1') f.max_pct_change = 10 if shadow_file: s = SimilarSizeWriter(shadow_file, "w") s.max_pct_change = 10 user_rows = sorted(self.generate_passwd(), key=operator.itemgetter(0)) for row in user_rows: uname = row[0] if self.auth_method is None and row[1] != '*locked': # substitute pwdcrypt with an 'x' if auth_method is None passwd = 'x' else: passwd = row[1] rest = row[2:] if shadow_file: s.write("%s:%s:::\n" % (uname, passwd)) if not passwd[0] == '*': passwd = "!!" line = join([uname, passwd] + rest) f.write(line + "\n") if e_o_f: f.write('E_O_F\n') f.close() if shadow_file: s.close()
def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'd:f:o:t:a:', ['delim=', 'file=', 'out=', 'tag=', 'append=']) except getopt.GetoptError: usage(2) big_xml = {} for opt, val in opts: if opt in ('-t', '--tag'): tag = val elif opt in ('-d', '--delim'): delim = val.split(":") elif opt in ('-f', '--file'): CollectParser(val, big_xml, delim) elif opt in ('-a', '--append'): CollectParser(val, big_xml, delim, True) elif opt in ('-o', '--out'): f = SimilarSizeWriter(val, "w") f.max_pct_change = 10 xml = XMLHelper() f.write(xml.xml_hdr + "<data>\n") for bx_key in big_xml.keys(): bx_delim = bx_key.split("¦") f.write("<%s %s>\n" % (tag, " ".join([ "%s=%s" % (delim[n], xml.escape_xml_attr(bx_delim[n])) for n in range(len(delim)) ]))) for tmp_tag in big_xml[bx_key]: tmp = tmp_tag['TagName'] del (tmp_tag['TagName']) f.write(" <%s %s/>\n" % (tmp, " ".join([ "%s=%s" % (tk, xml.escape_xml_attr(tmp_tag[tk])) for tk in tmp_tag.keys() ]))) f.write("</%s>\n" % tag) f.write("</data>\n") f.close()
if opt in ( '-r', '--role-file', ): options["rolefile"] = val if opt in ('--no-import', ): options["import"] = False if opt in ('--no-export', ): options["export"] = False if options["import"]: import_data() if options["export"]: if options["output"] != sys.stdout: output_stream = SimilarSizeWriter(options["output"], "w") output_stream.max_pct_change = 10 export_data(output_stream) if output_stream != sys.stdout: output_stream.close() return 0 if __name__ == "__main__": logger.info("Starting program '%s'" % progname) return_value = main() logger.info("Program '%s' finished" % progname) sys.exit(return_value)
def write_topic_info(outfile): """Lager fil med informasjon om alle XXX""" # TODO: Denne filen blir endret med det nye opplegget :-( logger.info("Writing topic info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, topics = _ext_cols(fs.student.list_eksamensmeldinger()) for t in topics: # The Oracle driver thinks the result of a union of ints is float fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'topic') + "\n") f.write("</data>\n") f.close()
options["host"] = val if opt in ('--dryrun',): options["dryrun"] = True if opt in ('-r', '--role-file',): options["rolefile"] = val if opt in ('--no-import',): options["import"] = False if opt in ('--no-export',): options["export"] = False if options["import"]: import_data() if options["export"]: if options["output"] != sys.stdout: output_stream = SimilarSizeWriter(options["output"], "w") output_stream.max_pct_change = 10 export_data(output_stream) if output_stream != sys.stdout: output_stream.close() return 0 if __name__ == "__main__": logger.info("Starting program '%s'" % progname) return_value = main() logger.info("Program '%s' finished" % progname) sys.exit(return_value)
def main(): try: opts, args = getopt.getopt(sys.argv[1:], 'd:f:o:t:a:', ['delim=', 'file=', 'out=', 'tag=', 'append=']) except getopt.GetoptError: usage(2) big_xml = {} for opt, val in opts: if opt in ('-t', '--tag'): tag = val elif opt in ('-d', '--delim'): delim = val.split(":") elif opt in ('-f', '--file'): CollectParser(val, big_xml, delim) elif opt in ('-a', '--append'): CollectParser(val, big_xml, delim, True) elif opt in ('-o', '--out'): f = SimilarSizeWriter(val, "w") f.max_pct_change = 50 xml = XMLHelper() f.write(xml.xml_hdr + "<data>\n") for bx_key in big_xml.keys(): bx_delim = bx_key.split("¦") f.write("<%s %s>\n" % ( tag, " ".join(["%s=%s" % (delim[n], xml.escape_xml_attr(bx_delim[n])) for n in range(len(delim))]))) for tmp_tag in big_xml[bx_key]: tmp = tmp_tag['TagName'] del(tmp_tag['TagName']) f.write(" <%s %s/>\n" % ( tmp, " ".join( ["%s=%s" % (tk, xml.escape_xml_attr(tmp_tag[tk])) for tk in tmp_tag.keys()]))) f.write("</%s>\n" % tag) f.write("</data>\n") f.close()