def ldif_user(self, data): passwd = data.passwd if passwd is not None: # Quarantined passwd = '{crypt}' + passwd else: passwd = '{crypt}*Invalid' for uauth in filter(self.auth_format.has_key, self.a_meth): #method = int(self.co.auth_type_crypt3_des) try: #if uauth in self.auth_format.keys(): fmt = self.auth_format[uauth]['format'] if fmt: passwd = fmt % self.auth_data[data.account_id][uauth] #passwd_attr = self.auth_format[uauth]['attr'] else: passwd = self.auth_data[data.account_id][uauth] except KeyError: pass # else: # break entry = {'objectClass': ['top','account','posixAccount'], 'cn': (iso2utf(data.cn),), 'uid': (data.uname,), 'uidNumber': (data.uid,), 'gidNumber': (data.gid,), 'homeDirectory': (data.home,), 'userPassword': (passwd,), 'loginShell': (data.shell,), 'gecos': (data.gecos,)} return ','.join((('uid=' + data.uname), self.user_dn)), entry
def ldif_user(self, data): passwd = data.passwd if passwd is not None: # Quarantined passwd = '{crypt}' + passwd else: passwd = '{crypt}*Invalid' for uauth in filter(self.auth_format.has_key, self.a_meth): #method = int(self.co.auth_type_crypt3_des) try: #if uauth in self.auth_format.keys(): fmt = self.auth_format[uauth]['format'] if fmt: passwd = fmt % self.auth_data[data.account_id][uauth] #passwd_attr = self.auth_format[uauth]['attr'] else: passwd = self.auth_data[data.account_id][uauth] except KeyError: pass # else: # break entry = { 'objectClass': ['top', 'account', 'posixAccount'], 'cn': (iso2utf(data.cn), ), 'uid': (data.uname, ), 'uidNumber': (data.uid, ), 'gidNumber': (data.gid, ), 'homeDirectory': (data.home, ), 'userPassword': (passwd, ), 'loginShell': (data.shell, ), 'gecos': (data.gecos, ) } return ','.join((('uid=' + data.uname), self.user_dn)), entry
def make_ou_entry(self, ou_id, parent_dn): # Changes from superclass: # Add object class norEduOrgUnit and its attributes norEduOrgAcronym, # cn, norEduOrgUnitUniqueIdentifier, norEduOrgUniqueIdentifier. # If a DN is not unique, prepend the norEduOrgUnitUniqueIdentifier. self.ou.clear() self.ou.find(ou_id) if self.test_omit_ou(): return parent_dn, None name_variants = (self.const.ou_name_acronym, self.const.ou_name_short, self.const.ou_name, self.const.ou_name_display) var2pref = dict([(v, i) for i, v in enumerate(name_variants)]) ou_names = {} for row in self.ou.search_name_with_language( entity_id=self.ou.entity_id, name_language=self.languages, name_variant=name_variants): name = iso2utf(row["name"].strip()) if name: pref = var2pref[int(row['name_variant'])] lnames = ou_names.setdefault(pref, []) lnames.append((int(row['name_language']), name)) if not ou_names: self.logger.warn("No names could be located for ou_id=%s", ou_id) return parent_dn, None ldap_ou_id = self.get_orgUnitUniqueID() self.ou_uniq_id2ou_id[ldap_ou_id] = ou_id self.ou_id2ou_uniq_id[ou_id] = ldap_ou_id entry = { 'objectClass': ['top', 'organizationalUnit', 'norEduOrgUnit'], self.FEIDE_attr_ou_id: (ldap_ou_id,)} if 0 in ou_names: self.add_lang_names(entry, 'norEduOrgAcronym', ou_names[0]) ou_names = [names for ou_pref, names in sorted(ou_names.items())] for names in ou_names: self.add_lang_names(entry, 'ou', names) self.add_lang_names(entry, 'cn', ou_names[-1]) entry.update(self.FEIDE_ou_common_attrs) if self.FEIDE_class_obsolete: entry['objectClass'].append(self.FEIDE_class_obsolete) if self.norEduOrgUniqueID: entry['norEduOrgUniqueNumber'] = self.norEduOrgUniqueID entry['norEduOrgUnitUniqueNumber'] = (ldap_ou_id,) dn = self.make_ou_dn(entry, parent_dn or self.ou_dn) if not dn: return parent_dn, None for attr in entry.keys(): if attr == 'ou' or attr.startswith('ou;'): entry[attr] = self.attr_unique(entry[attr], normalize_string) self.fill_ou_entry_contacts(entry) self.update_ou_entry(entry) return dn, entry
def make_ou_entry(self, ou_id, parent_dn): # Changes from superclass: # Add object class norEduOrgUnit and its attributes norEduOrgAcronym, # cn, norEduOrgUnitUniqueIdentifier, norEduOrgUniqueIdentifier. # If a DN is not unique, prepend the norEduOrgUnitUniqueIdentifier. self.ou.clear() self.ou.find(ou_id) if self.test_omit_ou(): return parent_dn, None name_variants = (self.const.ou_name_acronym, self.const.ou_name_short, self.const.ou_name, self.const.ou_name_display) var2pref = dict([(v, i) for i, v in enumerate(name_variants)]) ou_names = {} for row in self.ou.search_name_with_language( entity_id=self.ou.entity_id, name_language=self.languages, name_variant=name_variants): name = iso2utf(row["name"].strip()) if name: pref = var2pref[int(row['name_variant'])] lnames = ou_names.setdefault(pref, []) lnames.append((int(row['name_language']), name)) if not ou_names: self.logger.warn("No names could be located for ou_id=%s", ou_id) return parent_dn, None ldap_ou_id = self.get_orgUnitUniqueID() self.ou_uniq_id2ou_id[ldap_ou_id] = ou_id self.ou_id2ou_uniq_id[ou_id] = ldap_ou_id entry = { 'objectClass': ['top', 'organizationalUnit', 'norEduOrgUnit'], self.FEIDE_attr_ou_id: (ldap_ou_id, ) } if 0 in ou_names: self.add_lang_names(entry, 'norEduOrgAcronym', ou_names[0]) ou_names = [names for ou_pref, names in sorted(ou_names.items())] for names in ou_names: self.add_lang_names(entry, 'ou', names) self.add_lang_names(entry, 'cn', ou_names[-1]) entry.update(self.FEIDE_ou_common_attrs) if self.FEIDE_class_obsolete: entry['objectClass'].append(self.FEIDE_class_obsolete) if self.norEduOrgUniqueID: entry['norEduOrgUniqueNumber'] = self.norEduOrgUniqueID entry['norEduOrgUnitUniqueNumber'] = (ldap_ou_id, ) dn = self.make_ou_dn(entry, parent_dn or self.ou_dn) if not dn: return parent_dn, None for attr in entry.keys(): if attr == 'ou' or attr.startswith('ou;'): entry[attr] = self.attr_unique(entry[attr], normalize_string) self.fill_ou_entry_contacts(entry) self.update_ou_entry(entry) return dn, entry
def gen_undervisningsenhet(cgi, sip, out): # uioEduOffering - Undervisningsenhet (instansiering av et emne) # access_FS.py:Undervisning.list_undervisningenheter # # uioEduCourseCode, uioEduCourseAdministrator, uioEduCourseLevel, # uioEduCourseName - som for Undervisningsaktivitet # uioEduCourseOffering - urn:mace:uio.no:offering:<noe> n = 0 ret = {} top_dn = ldapconf('KURS', 'dn') for entry in sip.undervisningsenheter: emne = sip.emnekode2info.get(entry['emnekode']) if not emne: # warned erlier continue aktivitet_id = {} for persontype, role in interesting_fs_roles: args = [entry[x] for x in CerebrumGroupInfo.id_key_seq] args.append(persontype) args = [x.lower() for x in args] entity_id = cgi.find_group_by_undervisningsenhet(*args) if entity_id is not None: aktivitet_id["%i" % entity_id] = role # if len(aktivitet_id) != 2: # continue keys = aktivitet_id.keys() keys.sort() urn = 'urn:mace:uio.no:offering:enhet-%s' % "_".join(keys) out.write(entry_string("cn=ue-%i,%s" % (n, top_dn), { 'objectClass': ("top", "uioEduOffering"), 'uioEduCourseCode': (iso2utf(entry['emnekode']),), 'uioEduCourseAdministrator': (iso2utf(emne['sko']),), 'uioEduCourseLevel': (iso2utf(emne['studienivakode']),), 'uioEduCourseName': (iso2utf(emne['emnenavn_bokmal']),), 'uioEduCourseInstitution': (iso2utf(emne['institusjonsnr']),), 'uioEduCourseVersion': (iso2utf(emne['versjonskode']),), 'uioEduOfferingTermCode': (iso2utf(entry['terminkode']),), 'uioEduOfferingYear': (iso2utf(entry['arstall']),), 'uioEduOfferingTermNumber': (iso2utf(entry['terminnr']),), 'uioEduCourseOffering': (iso2utf(urn),)})) n += 1 ret[urn] = aktivitet_id return ret
def dump_ldif(file_handle): for row in group.search(spread=co.spread_ldap_group): group.clear() group.find(int(row['group_id'])) dn = "cn=%s,%s" % (row['name'], top_dn) for mbr in group.search_members(group_id=group.entity_id, member_type=co.entity_person): mbr2grp.setdefault(int(mbr["member_id"]), []).append(dn) file_handle.write(entry_string(dn, { 'objectClass': ("top", "uioGroup"), 'description': (iso2utf(row['description']),)}))
def get_fagmiljo(self): """Returns a dict mapping from person_id to 'fagmiljø'. NMH wants 'fagmiljø' exported, which is retrieved from SAP as 'utvalg' and stored in a trait for each person. We blindly treat them as plaintext. """ return dict( (row['entity_id'], iso2utf(row['strval'])) for row in self.person.list_traits(self.const.trait_fagmiljo))
def ldif_filegroup(self, group_id, posix_gid, members): """Create the group-entry attributes""" name = self.filegroups[group_id] entry = {'objectClass': ('top', 'posixGroup'), 'cn': (name,), 'gidNumber': (str(posix_gid),), 'memberUid': members} desc = self.group2desc(group_id) if desc: # latin1_to_iso646_60 later entry['description'] = (iso2utf(desc),) return ','.join(('cn=' + name, self.fgrp_dn)), entry
def init_person_titles(self): """Extends the person_titles dict with employment titles available via the PersonEmployment module.""" self.__super.init_person_titles() timer = make_timer(self.logger, 'Fetching personal employment titles...') employments = self.person.search_employment(main_employment=True) for emp in employments: if emp['person_id'] not in self.person_titles: title = [(self.const.language_nb, iso2utf(emp['description']))] self.person_titles[emp['person_id']] = title timer("...personal employment titles done.")
def read_vacation(self): mail_vaca = Email.EmailVacation(self._db) for row in mail_vaca.list_email_active_vacations(): t_id = int(row['target_id']) insert = False if t_id in self.targ2vacation: if row['start_date'] > self.targ2vacation[t_id][1]: insert = True else: insert = True if insert: self.targ2vacation[t_id] = (iso2utf(row['vacation_text']), row['start_date'], row['end_date'])
def dump_ldif(file_handle): for row in group.search(spread=co.spread_ldap_group): group.clear() group.find(int(row['group_id'])) dn = "cn=%s,%s" % (row['name'], top_dn) for mbr in group.search_members(group_id=group.entity_id, member_type=co.entity_person): mbr2grp.setdefault(int(mbr["member_id"]), []).append(dn) file_handle.write( entry_string( dn, { 'objectClass': ("top", "uioGroup"), 'description': (iso2utf(row['description']), ) }))
def ldif_filegroup(self, group_id, posix_gid, members): """Create the group-entry attributes""" name = self.filegroups[group_id] entry = { 'objectClass': ('top', 'posixGroup'), 'cn': (name, ), 'gidNumber': (str(posix_gid), ), 'memberUid': members } desc = self.group2desc(group_id) if desc: # latin1_to_iso646_60 later entry['description'] = (iso2utf(desc), ) return ','.join(('cn=' + name, self.fgrp_dn)), entry
def init_person_titles(self): # Change from original: Search titles first by system_lookup_order, # then within each system let personal title override work title. timer = make_timer(self.logger, 'Fetching personal titles...') titles = defaultdict(dict) for name_type in (self.const.personal_title, self.const.work_title): for row in self.person.search_name_with_language( entity_type=self.const.entity_person, name_variant=name_type, name_language=self.languages): titles[int(row['entity_id'])].setdefault( int(row['name_language']), iso2utf(row['name'])) self.person_titles = dict([(p_id, t.items()) for p_id, t in titles.items()]) timer("...personal titles done.")
def get_fagomrade(self): """NMH wants 'fagomrade' exported, which consists one or more 'fagfelt'. This field is stored in a trait for each person. The trait string value is a pickled list of strings. """ person2fagfelt = dict() for row in self.person.list_traits(self.const.trait_fagomrade_fagfelt): try: fagfelt = pickle.loads(row['strval']) except Exception, exc: self.logger.warn( "Could not unpickle trait_fagomrade_fagfelt for person:%s, %s", row['entity_id'], exc) continue person2fagfelt[row['entity_id']] = [iso2utf(f) for f in fagfelt]
def main(): db = Factory.get('Database')() co = Factory.get('Constants')(db) arecord = ARecord.ARecord(db) dns_owner = DnsOwner.DnsOwner(db) get_id_mac = itemgetter('dns_owner_id', 'mac_adr') get_id_name = itemgetter('dns_owner_id', 'name') get_trait = itemgetter('entity_id', 'code', 'strval') trait2attr = { int(co.trait_dns_comment): 'uioHostComment', int(co.trait_dns_contact): 'uioHostContact' } ldif = LDIFWriter('HOSTS', None) ldif.write_container() base_dn = ldif.getconf('dn') id2attrs = defaultdict(dict) for entity_id, code, strval in imap( get_trait, dns_owner.list_traits(code=trait2attr.keys())): if strval: id2attrs[int(entity_id)][trait2attr[code]] = (iso2utf(strval), ) arecords = defaultdict(set) for owner_id, mac in imap(get_id_mac, arecord.list_ext()): if mac: arecords[int(owner_id)].add(mac) done = set() for owner_id, name in sorted(imap(get_id_name, dns_owner.list())): owner_id, name = int(owner_id), name.rstrip('.') # We have both lowercase and uppercase versions of some host # names. Ignore one, hostnames are case-insensitive in LDAP. key = name.lower() if key not in done: done.add(key) entry = { 'host': (name, ), 'objectClass': ['uioHostinfo'], 'uioHostMacAddr': arecords.get(owner_id, ()) } entry.update(id2attrs.get(owner_id, ())) ldif.write_entry("host=%s,%s" % (name, base_dn), entry) ldif.close()
def write_subnet_ldif(): DN = ldapconf('SUBNETS', 'dn') startAttr, endAttr, objectClasses = ldapconf('SUBNETS', 'rangeSchema') objectClasses = ('top', 'ipNetwork') + tuple(objectClasses) db = Factory.get('Database')() f = ldif_outfile('SUBNETS') f.write(container_entry_string('SUBNETS')) for row in Subnet(db).search(): cn = "%s/%s" % (row['subnet_ip'], row['subnet_mask']) desc = row['description'] f.write(entry_string("cn=%s,%s" % (cn, DN), { 'objectClass': objectClasses, 'description': (desc and (iso2utf(desc),) or ()), 'ipNetworkNumber': (row['subnet_ip'],), 'ipNetmaskNumber': (netmask_to_ip(row['subnet_mask']),), startAttr: (str(int(row['ip_min'])),), endAttr: (str(int(row['ip_max'])),)})) end_ldif_outfile('SUBNETS', f)
def main(): db = Factory.get('Database')() co = Factory.get('Constants')(db) arecord = ARecord.ARecord(db) dns_owner = DnsOwner.DnsOwner(db) get_id_mac = itemgetter('dns_owner_id', 'mac_adr') get_id_name = itemgetter('dns_owner_id', 'name') get_trait = itemgetter('entity_id', 'code', 'strval') trait2attr = {int(co.trait_dns_comment): 'uioHostComment', int(co.trait_dns_contact): 'uioHostContact'} ldif = LDIFWriter('HOSTS', None) ldif.write_container() base_dn = ldif.getconf('dn') id2attrs = defaultdict(dict) for entity_id, code, strval in imap(get_trait, dns_owner.list_traits( code=trait2attr.keys())): if strval: id2attrs[int(entity_id)][trait2attr[code]] = (iso2utf(strval),) arecords = defaultdict(set) for owner_id, mac in imap(get_id_mac, arecord.list_ext()): if mac: arecords[int(owner_id)].add(mac) done = set() for owner_id, name in sorted(imap(get_id_name, dns_owner.list())): owner_id, name = int(owner_id), name.rstrip('.') # We have both lowercase and uppercase versions of some host # names. Ignore one, hostnames are case-insensitive in LDAP. key = name.lower() if key not in done: done.add(key) entry = { 'host': (name,), 'objectClass': ['uioHostinfo'], 'uioHostMacAddr': arecords.get(owner_id, ())} entry.update(id2attrs.get(owner_id, ())) ldif.write_entry("host=%s,%s" % (name, base_dn), entry) ldif.close()
def make_address(self, sep, p_o_box, address_text, postal_number, city, country): # Changes from superclass: # Weird algorithm for when to use p_o_box. # Append "Blindern" to postbox. if country: country = self.const.Country(country).country if (p_o_box and int(postal_number or 0) / 100 == 3): address_text = "Pb. %s - Blindern" % p_o_box else: address_text = (address_text or "").strip() post_nr_city = None if city or (postal_number and country): post_nr_city = " ".join( filter(None, (postal_number, (city or "").strip()))) val = "\n".join(filter(None, (address_text, post_nr_city, country))) if sep == '$': val = postal_escape_re.sub(hex_escape_match, val) return iso2utf(val.replace("\n", sep))
def make_address(self, sep, p_o_box, address_text, postal_number, city, country): # Changes from superclass: # Weird algorithm for when to use p_o_box. # Append "Blindern" to postbox. if country: country = self.const.Country(country).country if (p_o_box and int(postal_number or 0) / 100 == 3): address_text = "Pb. %s - Blindern" % p_o_box else: address_text = (address_text or "").strip() post_nr_city = None if city or (postal_number and country): post_nr_city = " ".join(filter(None, (postal_number, (city or "").strip()))) val = "\n".join(filter(None, (address_text, post_nr_city, country))) if sep == '$': val = postal_escape_re.sub(hex_escape_match, val) return iso2utf(val.replace("\n", sep))
def read_vacation(self): mail_vaca = Email.EmailVacation(self._db) for row in mail_vaca.list_email_active_vacations(): t_id = int(row['target_id']) # exchange-relatert-jazz # if the target is recorded as having spread_exchange_acc # the whole row is skipped because we don't want to # export vacation messages for such targets to LDAP if t_id in self.targ2spread: continue insert = False if t_id in self.targ2vacation: if row['start_date'] > self.targ2vacation[t_id][1]: insert = True else: insert = True if insert: self.targ2vacation[t_id] = (iso2utf(row['vacation_text']), row['start_date'], row['end_date'])
def write_subnet_ldif(): DN = ldapconf('SUBNETS', 'dn') startAttr, endAttr, objectClasses = ldapconf('SUBNETS', 'rangeSchema') objectClasses = ('top', 'ipNetwork') + tuple(objectClasses) db = Factory.get('Database')() f = ldif_outfile('SUBNETS') f.write(container_entry_string('SUBNETS')) for row in Subnet(db).search(): cn = "%s/%s" % (row['subnet_ip'], row['subnet_mask']) desc = row['description'] f.write( entry_string( "cn=%s,%s" % (cn, DN), { 'objectClass': objectClasses, 'description': (desc and (iso2utf(desc), ) or ()), 'ipNetworkNumber': (row['subnet_ip'], ), 'ipNetmaskNumber': (netmask_to_ip(row['subnet_mask']), ), startAttr: (str(int(row['ip_min'])), ), endAttr: (str(int(row['ip_max'])), ) })) end_ldif_outfile('SUBNETS', f)
def make_person_entry(self, row, person_id): """Override the production of a person entry to output. NMH needs more data for their own use, e.g. to be used by their web pages.""" dn, entry, alias_info = self.__super.make_person_entry(row, person_id) if dn: urns = entry.setdefault('eduPersonEntitlement', set()) # Add fagomrade/fagfelt, if registered for the person: fagf = self.pe2fagomr.get(person_id, []) for f in fagf: urns.add('urn:mace:feide.no:nmh.no:fagomrade:{}'.format(f)) # Add fagmiljø: fagm = self.pe2fagmiljo.get(person_id) if fagm: urns.add('urn:mace:feide.no:nmh.no:fagmiljo:{}'.format(fagm)) # Add study programs study_programs = self.pe2study_program.get(person_id, []) for program in study_programs: urn = 'urn:mace:feide.no:nmh.no:studies/studyprogram/{}/{}'.format( program['studieprogramkode'], program['arstall_kull']) urns.add(iso2utf(urn)) return dn, entry, alias_info
def gen_undervisningsaktivitet(cgi, sip, out): # uioEduSection - Undervisningsaktivitet (instansiering av gruppe, # kollokvia, lab, skrivekurs, forelesning) # access_FS.py:Undervisning.list_aktiviteter # # uioEduCourseCode - FS.emne.emnekode # uioEduCourseAdministrator - (FS.emne.*_reglement (6 siffer)). # uioEduCourseLevel - (FS.emne.studienivakode) # uioEduCourseName - (FS.emne.emnenavn_bokmal) # uioEduCourseSectionName - (FS.undaktivitet.aktivitetsnavn) # uioEduCourseOffering - urn:mace:uio.no:section:<noe> n = 0 ret = {} top_dn = ldapconf('KURS', 'dn') for entry in sip.undervisningsaktiviteter: try: emne = sip.emnekode2info[entry['emnekode']] except KeyError: logger.warn( "Undervisningsaktivitet %s er ikke knyttet til gyldig emne", entry['emnekode']) continue if 'emnenavn_bokmal' not in emne: logger.warn("Undervisningsaktivitet %s uten enhet?" % repr(entry)) continue aktivitet_id = {} for persontype, role in interesting_fs_roles: args = [entry[x] for x in CerebrumGroupInfo.id_key_seq] args.extend((entry['aktivitetkode'], persontype)) args = [x.lower() for x in args] entity_id = cgi.find_group_by_undervisningsaktivitet(*args) if entity_id is not None: aktivitet_id["%i" % entity_id] = role # if len(aktivitet_id) != 2: # continue keys = aktivitet_id.keys() keys.sort() urn = 'urn:mace:uio.no:section:aktivitet-%s' % "_".join(keys) # urn = 'urn:mace:uio.no:section:aktivitet-%s' % aktivitet_id out.write(entry_string("cn=ua-%i,%s" % (n, top_dn), { 'objectClass': ("top", "uioEduSection"), 'uioEduCourseCode': (iso2utf(entry['emnekode']),), 'uioEduCourseAdministrator': (iso2utf(emne['sko']),), 'uioEduCourseLevel': (iso2utf(emne['studienivakode']),), 'uioEduCourseName': (iso2utf(emne['emnenavn_bokmal']),), 'uioEduCourseSectionName': (iso2utf(entry['aktivitetsnavn']),), 'uioEduCourseInstitution': (iso2utf(emne['institusjonsnr']),), 'uioEduCourseVersion': (iso2utf(emne['versjonskode']),), 'uioEduCourseSectionCode': (iso2utf(entry['aktivitetkode']),), 'uioEduOfferingTermCode': (iso2utf(entry['terminkode']),), 'uioEduOfferingYear': (iso2utf(entry['arstall']),), 'uioEduOfferingTermNumber': (iso2utf(entry['terminnr']),), 'uioEduCourseOffering': (iso2utf(urn),)})) n += 1 ret[urn] = aktivitet_id return ret