def main(): filename = pj(cereconf.LDAP['dump_dir'], 'webaccounts.ldif') spread = base = None try: opts, args = getopt.getopt(sys.argv[1:], 'hf:b:s:', ['help', 'filename=', 'spread=', 'base=']) except getopt.GetoptError: usage(1) for opt, val in opts: if opt in ( '-h', '--help', ): usage() elif opt in ('-f', '--filename'): filename = val elif opt in ('-s', '--spread'): str2const = dict() for c in dir(co): tmp = getattr(co, c) str2const[str(tmp)] = tmp spread = str2const[val] elif opt in ('-b', '--base'): base = val if not (spread and base): print spread, base usage(1) print "foo" f = SimilarSizeWriter(filename, 'w') f.max_pct_change = 90 dump_accounts(f, spread, base) f.close()
def generate_filegroup(self, filename): logger.debug("generate_group: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 groups = self._exported_groups.keys() groups.sort() for group_id in groups: group_name = self._exported_groups[group_id] tmp = posix_group.illegal_name(group_name) if tmp or len(group_name) > 8: logger.warn("Bad groupname %s %s" % (group_name, tmp)) continue try: group_members, user_members = self._expand_group(group_id) except Errors.NotFoundError: logger.warn("Group %s has no GID", group_id) continue tmp_users = self._filter_illegal_usernames(user_members, group_name) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, tmp_users)) f.write(self._wrap_line(group_name, ",".join(tmp_users), ':*:%i:' % self._group.posix_gid)) if e_o_f: f.write('E_O_F\n') f.close()
def write_to_file(ips_by_mac, file): """Writes all relevant data to selected output file. @type ips_by_mac: dict @param ips_by_mac: A dictionary where the keys are MAC-addresses and the values are lists of IP-addresses associated with each MAC-address. @type file: string @param file: Path/name of the file where the data should be written to """ all_macs = ips_by_mac.keys() all_macs.sort() logger.info("Writing to export-file: '%s'" % file) output_stream = SimilarSizeWriter(file, "w") output_stream.max_pct_change = 10 for mac in all_macs: output_stream.write("%-18s %s\n" % (mac, ",".join(ips_by_mac[mac]))) logger.info("Done writing to export-file") output_stream.close()
def write_passwd(self, filename, shadow_file, e_o_f=False): logger.debug("write_passwd: filename=%r, shadow_file=%r, spread=%r", filename, shadow_file, self.spread) f = SimilarSizeWriter(filename, "w", encoding='latin-1') f.max_pct_change = 10 if shadow_file: s = SimilarSizeWriter(shadow_file, "w") s.max_pct_change = 10 user_rows = sorted(self.generate_passwd(), key=operator.itemgetter(0)) for row in user_rows: uname = row[0] if self.auth_method is None and row[1] != '*locked': # substitute pwdcrypt with an 'x' if auth_method is None passwd = 'x' else: passwd = row[1] rest = row[2:] if shadow_file: s.write("%s:%s:::\n" % (uname, passwd)) if not passwd[0] == '*': passwd = "!!" line = join([uname, passwd] + rest) f.write(line + "\n") if e_o_f: f.write('E_O_F\n') f.close() if shadow_file: s.close()
def generate_filegroup(self, filename): logger.debug("generate_group: %s" % filename) f = SimilarSizeWriter(filename, "w", encoding='UTF-8') f.max_pct_change = 5 groups = self._exported_groups.keys() groups.sort() for group_id in groups: group_name = self._exported_groups[group_id] tmp = posix_group.illegal_name(group_name) if tmp or len(group_name) > 8: logger.warn("Bad groupname %s %s" % (group_name, tmp)) continue try: group_members, user_members = self._expand_group(group_id) except Errors.NotFoundError: logger.warn("Group %s has no GID", group_id) continue tmp_users = self._filter_illegal_usernames(user_members, group_name) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, tmp_users)) f.write(self._wrap_line(group_name, ",".join(tmp_users), ':*:%i:' % self._group.posix_gid)) if e_o_f: f.write('E_O_F\n') f.close()
def write_passwd(self, filename, shadow_file, e_o_f=False): logger.debug("write_passwd: " + str((filename, shadow_file, self.spread))) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 10 if shadow_file: s = SimilarSizeWriter(shadow_file, "w") s.max_pct_change = 10 user_lines = self.generate_passwd() for l in user_lines: uname = l[0] if self.auth_method == 'NOCRYPT' and l[1] != '*locked': # substitute pwdcrypt with an 'x' if auth_method given to gen_nismaps # is NOCRYPT. Jazz, 2010-05-31 passwd = 'x' else: passwd = l[1] rest = l[2:] if shadow_file: s.write("%s:%s:::\n" % (uname, passwd)) if not passwd[0] == '*': passwd = "!!" line = self.join([uname, passwd] + rest) f.write(line + "\n") if e_o_f: f.write('E_O_F\n') f.close() if shadow_file: s.close()
def main(): global db, co, ac, p, ou, et, logger logger = Factory.get_logger("cronjob") db = Factory.get('Database')() co = Factory.get('Constants')(db) ac = Factory.get('Account')(db) p = Factory.get('Person')(db) ou = Factory.get('OU')(db) et = Email.EmailTarget(db) txt_path = "/cerebrum/var/cache/txt" options, rest = getopt.getopt(sys.argv[1:], "t:", [ "txt-path=", ]) for option, value in options: if option in ("-t", "--txt-path"): txt_path = value # Load dicts with misc info. get_account_info() get_person_contact() # Dump OFK info f = SimilarSizeWriter("%s/ofk.txt" % txt_path, "w") f.max_pct_change = 10 users = process_txt_file(f) f.close()
def write_kull_info(self, kull_info_file): """Lag en fil med informasjon om alle studentenes kulldeltakelse registrert i FS. Spesifikt, lister vi opp alle deltagelser ved: - kullklasser - kull """ logger.info("Writing kull info to '%s'", kull_info_file) f = SimilarSizeWriter(kull_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for xml_tag, generator in ( ("kullklasse", self.fs.undervisning.list_studenter_alle_kullklasser), ("kulldeltaker", self.fs.undervisning.list_studenter_alle_kull), ("kull", self.fs.info.list_kull)): for row in generator(): keys = row.keys() f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n") f.write("</data>\n") f.close()
def write_undenh_student(self, undenh_student_file): """Skriv oversikt over personer oppmeldt til undervisningsenheter. Tar med data for alle undervisingsenheter i inneværende+neste semester.""" logger.info("Writing undenh_student info to '%s'", undenh_student_file) f = SimilarSizeWriter(undenh_student_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for semester in ('current', 'next'): cols, undenh = self._ext_cols( self.fs.undervisning.list_undervisningenheter(sem=semester)) for u in undenh: u_attr = {} for k in ('institusjonsnr', 'emnekode', 'versjonskode', 'terminnr', 'terminkode', 'arstall'): u_attr[k] = u[k] student_cols, student = self._ext_cols( self.fs.undervisning.list_studenter_underv_enhet(**u_attr)) for s in student: s_attr = u_attr.copy() for k in ('fodselsdato', 'personnr'): s_attr[k] = s[k] f.write(xml.xmlify_dbrow({}, (), 'student', extra_attr=s_attr) + "\n") f.write("</data>\n") f.close()
def write_edu_info(self, edu_info_file): """Lag en fil med informasjon om alle studentenes 'aktiviteter' registrert i FS. Spesifikt, lister vi opp alle deltagelser ved: - undenh - undakt - kullklasser - kull """ logger.info("Writing edu info for all students") f = SimilarSizeWriter(edu_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for xml_tag, generator in ( ("undenh", self.fs.undervisning.list_studenter_alle_undenh), ("undakt", self.fs.undervisning.list_studenter_alle_undakt), ("kullklasse", self.fs.undervisning.list_studenter_alle_kullklasser), ("kull", self.fs.undervisning.list_studenter_alle_kull)): logger.debug("Processing %s entries", xml_tag) for row in generator(): keys = row.keys() f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n") f.write("</data>\n") f.close()
def main(): global db, co, ac, p, ou, et, logger logger = Factory.get_logger("cronjob") db = Factory.get('Database')() co = Factory.get('Constants')(db) ac = Factory.get('Account')(db) p = Factory.get('Person')(db) ou = Factory.get('OU')(db) et = Email.EmailTarget(db) txt_path = "/cerebrum/var/cache/txt" options, rest = getopt.getopt(sys.argv[1:], "t:", ["txt-path=",]) for option, value in options: if option in ("-t", "--txt-path"): txt_path = value # Load dicts with misc info. get_account_info() get_person_contact() # Dump OFK info f = SimilarSizeWriter("%s/ofk.txt" % txt_path, "w") f.max_pct_change = 10 users = process_txt_file(f) f.close()
def write_forkurs_info(self, pre_course_file): from mx.DateTime import now logger.info("Writing pre-course file to '%s'", pre_course_file) f = SimilarSizeWriter(pre_course_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 cols, course_attendants = self._ext_cols(self.fs.forkurs.list()) f.write(xml.xml_hdr + "<data>\n") for a in course_attendants: f.write( '<regkort fodselsdato="{}" personnr="{}" dato_endring="{}" ' 'dato_opprettet="{}"/>\n'.format(a['fodselsdato'], a['personnr'], str(now()), str(now()))) f.write('<emnestud fodselsdato="{}" personnr="{}" etternavn="{}" ' 'fornavn="{}" adrlin2_semadr="" postnr_semadr="" ' 'adrlin3_semadr="" adrlin2_hjemsted="" postnr_hjemsted="" ' 'adrlin3_hjemsted="" sprakkode_malform="NYNORSK" ' 'kjonn="X" studentnr_tildelt="{}" emnekode="FORGLU" ' 'versjonskode="1" terminkode="VÅR" arstall="2016" ' 'telefonlandnr_mobil="{}" telefonnr_mobil="{}"/>\n'.format( a['fodselsdato'], a['personnr'], a['etternavn'], a['fornavn'], a['studentnr_tildelt'], a['telefonlandnr'], a['telefonnr'])) f.write("</data>\n") f.close()
def main(): filename = pj(cereconf.LDAP['dump_dir'], 'webaccounts.ldif') spread = base = None try: opts, args = getopt.getopt(sys.argv[1:], 'hf:b:s:', [ 'help', 'filename=', 'spread=', 'base=']) except getopt.GetoptError: usage(1) for opt, val in opts: if opt in ('-h', '--help',): usage() elif opt in ('-f', '--filename'): filename = val elif opt in ('-s', '--spread'): str2const = dict() for c in dir(co): tmp = getattr(co, c) str2const[str(tmp)] = tmp spread = str2const[val] elif opt in ('-b', '--base'): base = val if not (spread and base): print spread, base usage(1) print "foo" f = SimilarSizeWriter(filename, 'w') f.max_pct_change = 90 dump_accounts(f, spread, base) f.close()
def write_undenh_student(self, undenh_student_file): """Skriv oversikt over personer oppmeldt til undervisningsenheter. Tar med data for alle undervisingsenheter i inneværende+neste semester.""" logger.info("Writing undenh_student info to '%s'", undenh_student_file) f = SimilarSizeWriter(undenh_student_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for semester in ('current', 'next'): cols, undenh = self._ext_cols( self.fs.undervisning.list_undervisningenheter(sem=semester)) for u in undenh: u_attr = {} for k in ('institusjonsnr', 'emnekode', 'versjonskode', 'terminnr', 'terminkode', 'arstall'): u_attr[k] = u[k] student_cols, student = self._ext_cols( self.fs.undervisning.list_studenter_underv_enhet(**u_attr)) for s in student: s_attr = u_attr.copy() for k in ('fodselsdato', 'personnr'): s_attr[k] = s[k] f.write( xml.xmlify_dbrow({}, (), 'student', extra_attr=s_attr) + "\n") f.write("</data>\n") f.close()
def write_netpubl_info(outfile): """Lager fil med informasjon om status nettpublisering""" f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = _ext_cols(fs.person.list_status_nettpubl()) for n in nettpubl: f.write(xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def write_ou_info(self, institution_number, ou_file): """Lager fil med informasjon om alle OU-er""" logger.info("Writing OU info to '%s'", ou_file) f = SimilarSizeWriter(ou_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, ouer = self._ext_cols(self.fs.info.list_ou(institution_number)) for o in ouer: sted = {} for fs_col, xml_attr in ( ('faknr', 'fakultetnr'), ('instituttnr', 'instituttnr'), ('gruppenr', 'gruppenr'), ('stedakronym', 'akronym'), ('stedakronym', 'forkstednavn'), ('stednavn_bokmal', 'stednavn'), ('stedkode_konv', 'stedkode_konv'), ('faknr_org_under', 'fakultetnr_for_org_sted'), ('instituttnr_org_under', 'instituttnr_for_org_sted'), ('gruppenr_org_under', 'gruppenr_for_org_sted'), ('adrlin1', 'adresselinje1_intern_adr'), ('adrlin2', 'adresselinje2_intern_adr'), ('postnr', 'poststednr_intern_adr'), ('adrlin1_besok', 'adresselinje1_besok_adr'), ('adrlin2_besok', 'adresselinje2_besok_adr'), ('postnr_besok', 'poststednr_besok_adr')): if o[fs_col] is not None: sted[xml_attr] = xml.escape_xml_attr(o[fs_col]) komm = [] for fs_col, typekode in (('telefonnr', 'EKSTRA TLF'), ('faxnr', 'FAX'), ('emailadresse', 'EMAIL'), ('url', 'URL')): if o[fs_col]: # Skip NULLs and empty strings komm.append({ 'kommtypekode': xml.escape_xml_attr(typekode), 'kommnrverdi': xml.escape_xml_attr(o[fs_col]) }) # TODO: Kolonnene 'url' og 'bibsysbeststedkode' hentes ut fra # FS, men tas ikke med i outputen herfra. f.write('<sted ' + ' '.join(["%s=%s" % item for item in sted.items()]) + '>\n') for k in komm: f.write('<komm ' + ' '.join(["%s=%s" % item for item in k.items()]) + ' />\n') f.write('</sted>\n') f.write("</data>\n") f.close()
def write_personrole_info(outfile): """Lager fil med informasjon om alle roller definer i FS.PERSONROLLE""" logger.info("Writing personrolle info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 20 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.undervisning.list_alle_personroller()) for t in dta: f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'rolle') + "\n") f.write("</data>\n") f.close()
def main(): opts, rest = getopt.getopt(sys.argv[1:], "o:", ("output=",)) filename = None for option, value in opts: if option in ("-o", "--output"): filename = value f = SimilarSizeWriter(filename, "w") f.max_pct_change = 50 output_file(sort_list(generate_list()), f) f.close()
def main(): opts, rest = getopt.getopt(sys.argv[1:], "o:", ("output=", )) filename = None for option, value in opts: if option in ("-o", "--output"): filename = value f = SimilarSizeWriter(filename, "w", encoding='UTF-8') f.max_pct_change = 50 output_file(sorted(generate_list()), f) f.close()
def write_emne_info(outfile): """Lager fil med informasjon om alle definerte emner""" logger.info("Writing emne info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.info.list_emner()) for t in dta: f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'emne') + "\n") f.write("</data>\n") f.close()
def main(): # legacy log configuration Cerebrum.logutils.autoconf('cronjob', None) try: opts, args = getopt.getopt( sys.argv[1:], 'd:f:o:t:a:', ['delim=', 'file=', 'out=', 'tag=', 'append=']) except getopt.GetoptError: usage(2) logger.info('Start') big_xml = {} for opt, val in opts: if opt in ('-t', '--tag'): tag = val logger.debug('set tag=%r', tag) elif opt in ('-d', '--delim'): delim = val.split(":") logger.debug('set delim=%r', delim) elif opt in ('-f', '--file'): CollectParser(val, big_xml, delim) logger.debug('collecting data from %r with delim=%r', val, delim) elif opt in ('-a', '--append'): CollectParser(val, big_xml, delim, True) logger.debug('appending data from %r with delim=%r', val, delim) elif opt in ('-o', '--out'): logger.debug('writing data to %r', val) f = SimilarSizeWriter(val, "w") f.max_pct_change = 50 xml = XMLHelper() f.write(xml.xml_hdr + "<data>\n") for bx_key in big_xml.keys(): bx_delim = bx_key.split("¦") f.write("<%s %s>\n" % ( tag, " ".join(["%s=%s" % (delim[n], xml.escape_xml_attr(bx_delim[n])) for n in range(len(delim))]))) for tmp_tag in big_xml[bx_key]: tmp = tmp_tag['TagName'] del(tmp_tag['TagName']) f.write(" <%s %s/>\n" % ( tmp, " ".join( ["%s=%s" % (tk, xml.escape_xml_attr(tmp_tag[tk])) for tk in tmp_tag.keys()]))) f.write("</%s>\n" % tag) f.write("</data>\n") f.close() logger.info('Wrote merged xml to %r', val) logger.info('Done')
def write_netgroup(self, filename, e_o_f=False, include_persons=False): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 netgroups = self.generate_netgroup(include_persons=include_persons) for group_name, members in netgroups: f.write(self._wrap_line(group_name, members, ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def write_emne_info(self, emne_info_file): """Lager fil med informasjon om alle definerte emner""" logger.info("Writing emne info to '%s'", emne_info_file) f = SimilarSizeWriter(emne_info_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, dta = self._ext_cols(self.fs.info.list_emner()) for t in dta: f.write( xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'emne') + "\n") f.write("</data>\n") f.close()
def write_netgroup(self, filename, e_o_f=False): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 netgroups = self.generate_netgroup() for group_name, members in netgroups: f.write(self._wrap_line(group_name, members, ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def write_regkort_info(outfile): """Lager fil med informasjon om semesterregistreringer for inneværende semester""" logger.info("Writing regkort info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, regkort = _ext_cols(fs.student.list_semreg()) for r in regkort: f.write(xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'regkort') + "\n") f.write("</data>\n") f.close()
def write_ou_info(self, institution_number, ou_file): """Lager fil med informasjon om alle OU-er""" logger.info("Writing OU info to '%s'", ou_file) f = SimilarSizeWriter(ou_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, ouer = self._ext_cols( self.fs.info.list_ou(institution_number)) for o in ouer: sted = {} for fs_col, xml_attr in ( ('faknr', 'fakultetnr'), ('instituttnr', 'instituttnr'), ('gruppenr', 'gruppenr'), ('stedakronym', 'akronym'), ('stedakronym', 'forkstednavn'), ('stednavn_bokmal', 'stednavn'), ('stedkode_konv', 'stedkode_konv'), ('faknr_org_under', 'fakultetnr_for_org_sted'), ('instituttnr_org_under', 'instituttnr_for_org_sted'), ('gruppenr_org_under', 'gruppenr_for_org_sted'), ('adrlin1', 'adresselinje1_intern_adr'), ('adrlin2', 'adresselinje2_intern_adr'), ('postnr', 'poststednr_intern_adr'), ('adrlin1_besok', 'adresselinje1_besok_adr'), ('adrlin2_besok', 'adresselinje2_besok_adr'), ('postnr_besok', 'poststednr_besok_adr')): if o[fs_col] is not None: sted[xml_attr] = xml.escape_xml_attr(o[fs_col]) komm = [] for fs_col, typekode in ( ('telefonnr', 'EKSTRA TLF'), ('faxnr', 'FAX'), ('emailadresse', 'EMAIL'), ('url', 'URL') ): if o[fs_col]: # Skip NULLs and empty strings komm.append( {'kommtypekode': xml.escape_xml_attr(typekode), 'kommnrverdi': xml.escape_xml_attr(o[fs_col])}) # TODO: Kolonnene 'url' og 'bibsysbeststedkode' hentes ut fra # FS, men tas ikke med i outputen herfra. f.write('<sted ' + ' '.join(["%s=%s" % item for item in sted.items()]) + '>\n') for k in komm: f.write('<komm ' + ' '.join(["%s=%s" % item for item in k.items()]) + ' />\n') f.write('</sted>\n') f.write("</data>\n") f.close()
def write_role_info(self, role_file): """Lager fil med informasjon om alle roller definer i FS.PERSONROLLE""" logger.info("Writing role info to '%s'", role_file) f = SimilarSizeWriter(role_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, role = self._ext_cols( self.fs.undervisning.list_alle_personroller()) for r in role: f.write( xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'rolle') + "\n") f.write("</data>\n") f.close()
def write_regkort_info(self, regkort_file): """Lager fil med informasjon om semesterregistreringer for inneværende semester""" logger.info("Writing regkort info to '%s'", regkort_file) f = SimilarSizeWriter(regkort_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, regkort = self._ext_cols(self.fs.student.list_semreg()) for r in regkort: f.write( xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'regkort') + "\n") f.write("</data>\n") f.close()
def write_netpubl_info(self, netpubl_file): """Lager fil med informasjon om status nettpublisering""" logger.info("Writing nettpubl info to '%s'", netpubl_file) f = SimilarSizeWriter(netpubl_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = self._ext_cols(self.fs.person.list_status_nettpubl()) for n in nettpubl: f.write( xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def write_betalt_papir_info(outfile): """Lager fil med informasjon om alle som enten har fritak fra å betale kopiavgift eller har betalt kopiavgiften""" logger.info("Writing betaltpapir info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, dta = _ext_cols(fs.betaling.list_kopiavgift_data(kun_fritak=False, semreg=True)) for t in dta: fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'betalt') + "\n") f.write("</data>\n") f.close()
def write_netpubl_info(self, netpubl_file): """Lager fil med informasjon om status nettpublisering""" logger.info("Writing nettpubl info to '%s'", netpubl_file) f = SimilarSizeWriter(netpubl_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, nettpubl = self._ext_cols(self.fs.person.list_status_nettpubl()) for n in nettpubl: f.write(xml.xmlify_dbrow(n, xml.conv_colnames(cols), 'nettpubl') + "\n") f.write("</data>\n") f.close()
def write_topic_info(outfile): """Lager fil med informasjon om alle XXX""" # TODO: Denne filen blir endret med det nye opplegget :-( logger.info("Writing topic info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") cols, topics = _ext_cols(fs.student.list_eksamensmeldinger()) for t in topics: # The Oracle driver thinks the result of a union of ints is float fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'topic') + "\n") f.write("</data>\n") f.close()
def write_misc_info(self, misc_file, tag, func_name): """Lager fil med data fra gitt funksjon i access_FS""" logger.info("Writing misc info to '%s'", misc_file) f = SimilarSizeWriter(misc_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") func = reduce(lambda obj, attr: getattr(obj, attr), func_name.split('.'), self.fs) cols, dta = self._ext_cols(func()) for t in dta: self.fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n") f.write("</data>\n") f.close()
def write_edu_info(self, edu_file): """Lager en fil med undervisningsinformasjonen til alle studenter. For hver student, lister vi opp alle tilknytningene til undenh, undakt, evu, kursakt og kull. Hovedproblemet i denne metoden er at vi må bygge en enorm dict med all undervisningsinformasjon. Denne dicten bruker mye minne. Advarsel: vi gjør ingen konsistenssjekk på at undervisningselementer nevnt i outfile vil faktisk finnes i andre filer genererert av dette skriptet. Mao. det er fullt mulig at en student S er registrert ved undakt U1, samtidig som U1 ikke er nevnt i undervisningsaktiveter.xml. fs.undervisning.list_studenter_alle_kull() <- kull deltagelse fs.undervisning.list_studenter_alle_undenh() <- undenh deltagelse fs.undervisning.list_studenter_alle_undakt() <- undakt deltagelse fs.evu.list_studenter_alle_kursakt() <- kursakt deltagelse fs.evu.list() <- evu deltagelse """ logger.info("Writing edu info to '%s'", edu_file) f = SimilarSizeWriter(edu_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") for triple in (("kull", None, self.fs.undervisning.list_studenter_alle_kull), ("undenh", None, self.fs.undervisning.list_studenter_alle_undenh), ("undakt", None, self.fs.undervisning.list_studenter_alle_undakt), ("evu", ("fodselsdato", "personnr", "etterutdkurskode", "kurstidsangivelsekode"), self.fs.evu.list), ("kursakt", None, self.fs.evu.list_studenter_alle_kursakt)): kind, fields, selector = triple logger.debug("Processing %s entries", kind) for row in selector(): if fields is None: tmp_row = row keys = row.keys() else: tmp_row = dict((f, row[f]) for f in fields) keys = fields f.write(xml.xmlify_dbrow(tmp_row, keys, kind) + '\n') f.write("</data>\n") f.close()
def write_evukurs_info(self, evu_kursinfo_file): """Skriv data om alle EVU-kurs (vi trenger dette bl.a. for å bygge EVU-delen av CF).""" logger.info("Writing evukurs info to '%s'", evu_kursinfo_file) f = SimilarSizeWriter(evu_kursinfo_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, evukurs = self._ext_cols(self.fs.evu.list_kurs()) for ek in evukurs: f.write( xml.xmlify_dbrow( ek, xml.conv_colnames(cols), "evukurs") + "\n") f.write("</data>\n") f.close()
def write_misc_info(self, misc_file, tag, func_name): """Lager fil med data fra gitt funksjon i access_FS""" logger.info("Writing misc info to '%s'", misc_file) f = SimilarSizeWriter(misc_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") func = reduce( lambda obj, attr: getattr(obj, attr), func_name.split('.'), self.fs) cols, dta = self._ext_cols(func()) for t in dta: self.fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n") f.write("</data>\n") f.close()
def write_edu_info(outfile): """Lager en fil med undervisningsinformasjonen til alle studenter. For hver student, lister vi opp alle tilknytningene til undenh, undakt, evu, kursakt og kull. Hovedproblemet i denne metoden er at vi må bygge en enorm dict med all undervisningsinformasjon. Denne dicten bruker mye minne. Advarsel: vi gjør ingen konsistenssjekk på at undervisningselementer nevnt i outfile vil faktisk finnes i andre filer genererert av dette skriptet. Mao. det er fullt mulig at en student S er registrert ved undakt U1, samtidig som U1 ikke er nevnt i undervisningsaktiveter.xml. fs.undervisning.list_studenter_alle_kull() <- kull deltagelse fs.undervisning.list_studenter_alle_undenh() <- undenh deltagelse fs.undervisning.list_studenter_alle_undakt() <- undakt deltagelse fs.evu.list_studenter_alle_kursakt() <- kursakt deltagelse fs.evu.list() <- evu deltagelse """ f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 15 f.write(xml.xml_hdr + "<data>\n") for triple in (("kull", None, fs.undervisning.list_studenter_alle_kull), ("undenh", None, fs.undervisning.list_studenter_alle_undenh), ("undakt", None, fs.undervisning.list_studenter_alle_undakt), ("evu", ("fodselsdato", "personnr", "etterutdkurskode", "kurstidsangivelsekode"), fs.evu.list), ("kursakt", None, fs.evu.list_studenter_alle_kursakt)): kind, fields, selector = triple logger.debug("Processing %s entries", kind) for row in selector(): if fields is None: tmp_row = row keys = row.keys() else: tmp_row = dict((f, row[f]) for f in fields) keys = fields f.write(xml.xmlify_dbrow(tmp_row, keys, kind) + '\n') f.write("</data>\n") f.close()
def write_undenh_metainfo(self, undervenh_file): """Skriv metadata om undervisningsenheter for inneværende+neste semester.""" logger.info("Writing undenh_meta info to '%s'", undervenh_file) f = SimilarSizeWriter(undervenh_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<undervenhet>\n") for semester in ('current', 'next'): cols, undenh = self._ext_cols( self.fs.undervisning.list_undervisningenheter(sem=semester)) for u in undenh: f.write( xml.xmlify_dbrow(u, xml.conv_colnames(cols), 'undenhet') + "\n") f.write("</undervenhet>\n") f.close()
def write_evukurs_info(self, evu_kursinfo_file): """Skriv data om alle EVU-kurs (vi trenger dette bl.a. for å bygge EVU-delen av CF).""" logger.info("Writing evukurs info to '%s'", evu_kursinfo_file) f = SimilarSizeWriter(evu_kursinfo_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") cols, evukurs = self._ext_cols(self.fs.evu.list_kurs()) for ek in evukurs: f.write( xml.xmlify_dbrow(ek, xml.conv_colnames(cols), "evukurs") + "\n") f.write("</data>\n") f.close()
def write_person_info(self, person_file): """Lager fil med informasjon om alle personer registrert i FS som vi muligens også ønsker å ha med i Cerebrum. En person kan forekomme flere ganger i filen.""" # TBD: Burde vi cache alle data, slik at vi i stedet kan lage en # fil der all informasjon om en person er samlet under en egen # <person> tag? logger.info("Writing person info to '%s'", person_file) f = SimilarSizeWriter(person_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") # Aktive studenter cols, students = self._ext_cols(self.fs.student.list_aktiv()) for s in students: self.fix_float(s) f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n") # Eksamensmeldinger cols, students = self._ext_cols( self.fs.student.list_eksamensmeldinger()) for s in students: f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'eksamen') + "\n") # EVU students # En del EVU studenter vil være gitt av søket over cols, students = self._ext_cols(self.fs.evu.list()) for e in students: f.write( xml.xmlify_dbrow(e, xml.conv_colnames(cols), 'evu') + "\n") # Aktive fagpersoner cols, fagperson = self._ext_cols( self.fs.undervisning.list_fagperson_semester()) for p in fagperson: f.write( xml.xmlify_dbrow( p, xml.conv_colnames(cols), 'fagperson') + "\n") f.write("</data>\n") f.close()
def generate_netgroup(self, filename): logger.debug("generate_netgroup: %s" % filename) f = SimilarSizeWriter(filename, "w") f.max_pct_change = 5 for group_id in self._exported_groups.keys(): group_name = self._exported_groups[group_id] group_members, user_members = self._expand_group(group_id) logger.debug("%s -> g=%s, u=%s" % ( group_id, group_members, user_members)) f.write(self._wrap_line(group_name, self._format_members( group_members, user_members, group_name), ' ', is_ng=True)) if e_o_f: f.write('E_O_F\n') f.close()
def list_quotas(fname, hostname, diskname, spread): f = SimilarSizeWriter(fname, "w") f.max_pct_change = 10 disk = Factory.get("Disk")(db) if diskname: disk.find_by_path(diskname) list_disk_quotas(f, disk.entity_id, spread) elif hostname: host = Factory.get("Host")(db) host.find_by_name(hostname) for row in disk.list(host_id=host.entity_id, spread=spread): list_disk_quotas(f, row['disk_id'], spread) else: for row in disk.list_traits(co.trait_disk_quota): list_disk_quotas(f, row['entity_id'], spread) f.close()
def write_person_info(self, person_file): """Lager fil med informasjon om alle personer registrert i FS som vi muligens også ønsker å ha med i Cerebrum. En person kan forekomme flere ganger i filen.""" # TBD: Burde vi cache alle data, slik at vi i stedet kan lage en # fil der all informasjon om en person er samlet under en egen # <person> tag? logger.info("Writing person info to '%s'", person_file) f = SimilarSizeWriter(person_file, mode='w', encoding=XML_ENCODING) f.max_pct_change = 50 f.write(xml.xml_hdr + "<data>\n") # Aktive studenter cols, students = self._ext_cols(self.fs.student.list_aktiv()) for s in students: self.fix_float(s) f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n") # Eksamensmeldinger cols, students = self._ext_cols( self.fs.student.list_eksamensmeldinger()) for s in students: f.write( xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'eksamen') + "\n") # EVU students # En del EVU studenter vil være gitt av søket over cols, students = self._ext_cols(self.fs.evu.list()) for e in students: f.write(xml.xmlify_dbrow(e, xml.conv_colnames(cols), 'evu') + "\n") # Aktive fagpersoner cols, fagperson = self._ext_cols( self.fs.undervisning.list_fagperson_semester()) for p in fagperson: f.write( xml.xmlify_dbrow(p, xml.conv_colnames(cols), 'fagperson') + "\n") f.write("</data>\n") f.close()
def main(): try: opts, args = getopt.getopt(sys.argv[1:], 'd:f:o:t:a:', ['delim=', 'file=', 'out=', 'tag=', 'append=']) except getopt.GetoptError: usage(2) big_xml = {} for opt, val in opts: if opt in ('-t', '--tag'): tag = val elif opt in ('-d', '--delim'): delim = val.split(":") elif opt in ('-f', '--file'): CollectParser(val, big_xml, delim) elif opt in ('-a', '--append'): CollectParser(val, big_xml, delim, True) elif opt in ('-o', '--out'): f = SimilarSizeWriter(val, "w") f.max_pct_change = 50 xml = XMLHelper() f.write(xml.xml_hdr + "<data>\n") for bx_key in big_xml.keys(): bx_delim = bx_key.split("¦") f.write("<%s %s>\n" % ( tag, " ".join(["%s=%s" % (delim[n], xml.escape_xml_attr(bx_delim[n])) for n in range(len(delim))]))) for tmp_tag in big_xml[bx_key]: tmp = tmp_tag['TagName'] del(tmp_tag['TagName']) f.write(" <%s %s/>\n" % ( tmp, " ".join( ["%s=%s" % (tk, xml.escape_xml_attr(tmp_tag[tk])) for tk in tmp_tag.keys()]))) f.write("</%s>\n" % tag) f.write("</data>\n") f.close()
def write_forkurs_info(outfile): from mx.DateTime import now logger.info("Writing pre-course file to '{}'".format(outfile)) f = SimilarSizeWriter(outfile, "w") f.max_pct_change = 10 cols, course_attendants = _ext_cols(fs.forkurs.list()) f.write(xml.xml_hdr + "<data>\n") for a in course_attendants: f.write('<regkort fodselsdato="{}" personnr="{}" dato_endring="{}" dato_opprettet="{}"/>\n'.format(a['fodselsdato'], a['personnr'], str(now()), str(now()))) f.write('<emnestud fodselsdato="{}" personnr="{}" etternavn="{}" fornavn="{}" adrlin2_semadr="" postnr_semadr="" adrlin3_semadr="" adrlin2_hjemsted="" postnr_hjemsted="" adrlin3_hjemsted="" sprakkode_malform="NYNORSK" kjonn="X" studentnr_tildelt="{}" emnekode="FORGLU" versjonskode="1" terminkode="VÅR" arstall="2016" telefonlandnr_mobil="{}" telefonnr_mobil="{}"/>\n'.format( a['fodselsdato'], a['personnr'], a['etternavn'], a['fornavn'], a['studentnr_tildelt'], a['telefonlandnr'], a['telefonnr'] )) f.write("</data>\n") f.close()
def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'd:f:o:t:a:', ['delim=', 'file=', 'out=', 'tag=', 'append=']) except getopt.GetoptError: usage(2) big_xml = {} for opt, val in opts: if opt in ('-t', '--tag'): tag = val elif opt in ('-d', '--delim'): delim = val.split(":") elif opt in ('-f', '--file'): CollectParser(val, big_xml, delim) elif opt in ('-a', '--append'): CollectParser(val, big_xml, delim, True) elif opt in ('-o', '--out'): f = SimilarSizeWriter(val, "w") f.max_pct_change = 10 xml = XMLHelper() f.write(xml.xml_hdr + "<data>\n") for bx_key in big_xml.keys(): bx_delim = bx_key.split("¦") f.write("<%s %s>\n" % (tag, " ".join([ "%s=%s" % (delim[n], xml.escape_xml_attr(bx_delim[n])) for n in range(len(delim)) ]))) for tmp_tag in big_xml[bx_key]: tmp = tmp_tag['TagName'] del (tmp_tag['TagName']) f.write(" <%s %s/>\n" % (tmp, " ".join([ "%s=%s" % (tk, xml.escape_xml_attr(tmp_tag[tk])) for tk in tmp_tag.keys() ]))) f.write("</%s>\n" % tag) f.write("</data>\n") f.close()
def write_misc_info(outfile, tag, func_name): """Lager fil med data fra gitt funksjon i access_FS""" logger.info("Writing misc info to '%s'" % outfile) f = SimilarSizeWriter(outfile, "w") if tag == 'aktivitet': f.max_pct_change = 20 elif tag == 'enhet': f.max_pct_change = 15 else: f.max_pct_change = 10 f.write(xml.xml_hdr + "<data>\n") # It's still not foolproof, but hopefully much more sane than simply # eval'ing. components = func_name.split(".") next = fs for c in components: next = getattr(next, c) cols, dta = _ext_cols(next()) for t in dta: fix_float(t) f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n") f.write("</data>\n") f.close()