def add_spread(self, spread): # Avoid circular import dependency from Cerebrum.modules import PosixGroup # When adding a NIS-spread, assert that group is a PosixGroup if int(spread) in (self.const.spread_uio_nis_fg, self.const.spread_ifi_nis_fg, self.const.spread_hpc_nis_fg): pg = PosixGroup.PosixGroup(self._db) try: pg.clear() pg.find(self.entity_id) except Errors.NotFoundError: raise Errors.RequiresPosixError( "Can't add NIS-spread to non-POSIX group.") tmp = pg.illegal_name(pg.group_name) if tmp: raise self._db.IntegrityError( "Illegal name for filegroup, {0}.".format(tmp)) # When adding a Shared mailbox spread, assert that the group is a # distribution group. if spread == self.const.spread_exchange_shared_mbox: if not self.has_spread(self.const.spread_exchange_group): raise Errors.CerebrumError( "Can't add shared mailbox spread to a " "non-distribution group") # # (Try to) perform the actual spread addition. self.__super.add_spread(spread)
def __init__(self, db): self.db = db self.co = Factory.get('Constants')(db) self.clconst = Factory.get('CLConstants')(db) self.group = Factory.get('Group')(db) self.posix_user = Factory.get('PosixUser')(db) self.posix_group = PosixGroup.PosixGroup(db)
def maybe_create(self, group_name): try: return get_group(group_name) except Errors.NotFoundError: description = "IT role group (%s)" % group_name pg = PosixGroup.PosixGroup(db) pg.populate( creator_id=self.group_creator(), visibility=const.group_visibility_internal, name=self.group_name, description=description, # TODO: # Are these groups: # - internal? They have group_visibility_internal for some # reason - do they have some internal usage in Cerebrum as # well? # - automatic? They seem to be maintained from this script, but # the script never removes members.. group_type=const.group_type_unknown, ) pg.write_db() logger.info("Created group: name=%s, id=%d, gid=%d, desc='%s'", pg.group_name, pg.entity_id, pg.posix_gid, pg.description) if self.buildadmins: pg.add_spread(const.spread_uit_ad_lit_admingroup) else: pg.add_spread(const.spread_uit_ad_group) return pg
def main(): global db, constants, account_init, group, posixgroup global default_creator_id global dryrun, logger logger = Factory.get_logger("console") try: opts, args = getopt.getopt(sys.argv[1:], 'f:d', ['file=', 'dryrun']) except getopt.GetoptError: usage() dryrun = False for opt, val in opts: if opt in ('-d', '--dryrun'): dryrun = True elif opt in ('-f', '--file'): infile = val db = Factory.get('Database')() db.cl_init(change_program='import_groups') constants = Factory.get('Constants')(db) account_init = Factory.get('Account')(db) account_init.find_by_name(cereconf.INITIAL_ACCOUNTNAME) default_creator_id = account_init.entity_id group = Factory.get('Group')(db) posixgroup = PosixGroup.PosixGroup(db) process_line(infile)
def get_group(id): gr = PosixGroup.PosixGroup(db) if isinstance(id, int): gr.find(id) else: gr.find_by_name(id) return gr
def main(self): self.parse_options() self.db = Factory.get('Database')() self.co = Factory.get('Constants')(self.db) self.group = Factory.get('Group')(self.db) self.posix_user = Factory.get('PosixUser')(self.db) self.posix_group = PosixGroup.PosixGroup(self.db) self._namecachedtime = mx.DateTime.now() self._num = 0 self.posix_users = [] self.e_id2name = {} self.p_id2name = {} self.auth_data = {} self.disk_tab = {} self.shell_tab = {} self.quarantines = {} self.filegroups = {} self.netgroups = {} self.host_netgroups = {} self.account2def_group = {} self.g_id2gid = {} self.a_id2owner = {} self.a_id2home = {} self._names = set() self.setup() self.generate_files()
def get_group(id, grtype="Group"): if grtype == "Group": group = Utils.Factory.get('Group')(db) elif grtype == "PosixGroup": group = PosixGroup.PosixGroup(db) group.clear() group.find(id) return group
def sync_filegroup(fgname, group, course, act): posix_group = PosixGroup.PosixGroup(db) # Make the group last a year or so. To avoid changing the database # every night, we only change expire date if it has less than three # month to live. expdate = DateTime.TimestampFromTicks( int(time.time() + 12 * 31 * 24 * 3600)) refreshdate = DateTime.TimestampFromTicks( int(time.time() + 3 * 31 * 24 * 3600)) try: fgroup = get_group(fgname) except Errors.NotFoundError: logger.info("Created new file group %s", fgname) posix_group.populate(group_creator, co.group_visibility_all, fgname, "Gruppelærere %s gruppe %s" % (course.upper(), act), expire_date=expdate) posix_group.write_db() else: posix_group.find(fgroup.entity_id) # make sure the group is alive if posix_group.expire_date and posix_group.expire_date < refreshdate: logger.info("Extending life of %s from %s to %s", fgname, posix_group.expire_date, refreshdate) posix_group.expire_date = expdate posix_group.write_db() uptodate = False for row in posix_group.search_members(group_id=posix_group.entity_id, member_filter_expired=False): member_type = int(row["member_type"]) member_id = int(row["member_id"]) if member_type != co.entity_group: logger.info("Removing member %d from %s", member_id, fgname) posix_group.remove_member(member_id) elif member_id != group.entity_id: logger.info("Removing group member %d from %s", member_id, fgname) posix_group.remove_member(member_id) else: uptodate = True if not uptodate: logger.info("Adding %s to %s", group.group_name, fgname) if not posix_group.has_member(group.entity_id): posix_group.add_member(group.entity_id) # finally check the spread. we leave any additionally added spreads # alone. uptodate = False for r in posix_group.get_spread(): if int(r['spread']) == int(co.spread_ifi_nis_fg): uptodate = True break if not uptodate: logger.info("Adding NIS_fg@ifi to %s", fgname) posix_group.add_spread(co.spread_ifi_nis_fg) return posix_group
def __init__(self, group_spread, member_spread): super(FileGroup, self).__init__( co.account_namespace, co.entity_account, group_spread, member_spread) self._group = PosixGroup.PosixGroup(db) self._account2def_group = {} for row in posix_user.list_extended_posix_users(): self._account2def_group[int(row['account_id'])] = int(row['posix_gid']) logger.debug("__init__ done")
def init_filegroup(self): """Initiate modules and constants for posixgroup""" from Cerebrum.modules import PosixGroup self.posgrp = PosixGroup.PosixGroup(self.db) self.fgrp_dn = LDIFutils.ldapconf('FILEGROUP', 'dn') self.filegroupcache = defaultdict(dict) self.cache_account2name() self.cache_group2gid() self.cache_groups_and_users()
def write_db(self): """Write PosixUser instance to database.""" self.__super.write_db() if not self.__updated: return is_new = not self.__in_db primary_group = PosixGroup.PosixGroup(self._db) primary_group.find(self.gid_id) # TBD: should Group contain a utility function to add a member # if it's not a member already? There are many occurences of # code like this, and but none of them implement all the # robustness below. if not primary_group.has_member(self.entity_id): primary_group.add_member(self.entity_id) if is_new: self.execute( """ INSERT INTO [:table schema=cerebrum name=posix_user] (account_id, posix_uid, gid, gecos, shell) VALUES (:a_id, :u_id, :gid, :gecos, :shell)""", { 'a_id': self.entity_id, 'u_id': self.posix_uid, 'gid': self.gid_id, 'gecos': self.gecos, 'shell': int(self.shell) }) else: self.execute( """ UPDATE [:table schema=cerebrum name=posix_user] SET posix_uid=:u_id, gid=:gid, gecos=:gecos, shell=:shell WHERE account_id=:a_id""", { 'a_id': self.entity_id, 'u_id': self.posix_uid, 'gid': self.gid_id, 'gecos': self.gecos, 'shell': int(self.shell) }) self._db.log_change(self.entity_id, self.const.posix_promote, None, change_params={ 'uid': int(self.posix_uid), 'gid': int(self.gid_id), 'shell': int(self.shell), 'gecos': self.gecos }) del self.__in_db self.__in_db = True self.__updated = [] return is_new
def __init__(self, db, logger, u_sprd=None, g_sprd=None, n_sprd=None, fd=None): """ Initiate database and import modules. Spreads are given in initiation and general constants which is used in more than one method. """ timer = make_timer(logger, 'Initing PosixLDIF...') from Cerebrum.modules import PosixGroup self.db = db self.logger = logger self.const = Factory.get('Constants')(self.db) self.grp = Factory.get('Group')(self.db) self.posuser = Factory.get('PosixUser')(self.db) self.posgrp = PosixGroup.PosixGroup(self.db) self.user_dn = LDIFutils.ldapconf('USER', 'dn', None) # This is an odd one -- if set to False, then id2uname should be # populated with users exported in the users export -- which makes the # group exports filter group members by *actually* exported users... self.get_name = True self.fd = fd self.spread_d = {} # Validate spread from arg or from cereconf for x, y in zip(['USER', 'FILEGROUP', 'NETGROUP'], [u_sprd, g_sprd, n_sprd]): spread = LDIFutils.map_spreads( y or getattr(cereconf, 'LDAP_' + x).get('spread'), list) if spread: self.spread_d[x.lower()] = spread if 'user' not in self.spread_d: raise Errors.ProgrammingError( "Must specify spread-value as 'arg' or in cereconf") self.account2name = dict() self.group2gid = dict() self.groupcache = defaultdict(dict) self.group2groups = defaultdict(set) self.group2users = defaultdict(set) self.group2persons = defaultdict(list) self.shell_tab = dict() self.quarantines = dict() self.user_exporter = UserExporter(self.db) if len(self.spread_d['user']) > 1: logger.warning('Exporting users with multiple spreads, ' 'ignoring homedirs from %r', self.spread_d['user'][1:]) self.homedirs = HomedirResolver(db, self.spread_d['user'][0]) self.owners = OwnerResolver(db) auth_attr = LDIFutils.ldapconf('USER', 'auth_attr', None) self.user_password = AuthExporter.make_exporter( db, auth_attr['userPassword']) timer('... done initing PosixLDIF.')
def __init__(self, group_spread, member_spread): super(FileGroup, self).__init__(co.account_namespace, co.entity_account, group_spread, member_spread) self._group = PosixGroup.PosixGroup(db) user_exporter = UserExporter(db) gid2posix_gid = user_exporter.make_posix_gid_cache() self._account2posix_gid = {} for row in posix_user.list_posix_users(filter_expired=True): self._account2posix_gid[row['account_id']] = gid2posix_gid[ row['gid']] logger.debug("__init__ done")
def __init__(self, db, perspective): """ :param db: Cerebrum database object :param basestring perspective: OU Perspective e.g 'FS' """ # Dicts self.group_dict = {} self.members_dict = {} self.ou_affiliates_dict = {} self.group_description_dict = {} self.stedkode_dict = {} self.description_group_dict = {} self.members_delete_dict = {} self.group_delete_list = [] # Default values self.default_start_ou = 3 # Universitetet i Tromsø # self.default_spread = self.co.spread_uit_ad_group # Initialize database objects self._db = db self._co = Factory.get('Constants')(self._db) self.ou = Factory.get('OU')(self._db) self.gr = PosixGroup.PosixGroup(self._db) self.ac = Factory.get('Account')(self._db) self.perspective = self._co.OUPerspective(perspective) # Load creator id (global) self.ac.find_by_name(cereconf.INITIAL_ACCOUNTNAME) self.default_creator = self.ac.entity_id self.ac.clear() # Used to determine which aff_code_statuses correspond to which # container group self.member_type_mappings = { 'VITENSKAPELIG': [ int(self._co.affiliation_tilknyttet_fagperson), int(self._co.affiliation_manuell_gjesteforsker), int(self._co.affiliation_status_ansatt_vitenskapelig)], 'TEKNISK': [int(self._co.affiliation_status_ansatt_tekadm), ], 'STUDENT': [int(self._co.affiliation_status_student_aktiv), ], 'DRGRAD': [int(self._co.affiliation_status_student_drgrad), ], }
def main(): global db, constants, account_init, account_member, group, spread global posixgroup global default_creator_id, group_member global dryrun, logger logger = Factory.get_logger("console") try: opts, args = getopt.getopt(sys.argv[1:], 'f:ds:', ['file=', 'dryrun', 'spread=']) except getopt.GetoptError: usage() dryrun = False for opt, val in opts: if opt in ('-d', '--dryrun'): dryrun = True elif opt in ('-f', '--file'): infile = val elif opt in ('-s', '--spread'): spread = val if spread not in [ 'spread_nis_fg', ]: usage() db = Factory.get('Database')() db.cl_init(change_program='import_fg') constants = Factory.get('Constants')(db) account_init = Factory.get('Account')(db) account_init.find_by_name(cereconf.INITIAL_ACCOUNTNAME) default_creator_id = account_init.entity_id account_member = Factory.get('Account')(db) group = Factory.get('Group')(db) group_member = Factory.get('Group')(db) posixgroup = PosixGroup.PosixGroup(db) db.cl_init(change_program='import_groups') if spread == "spread_nis_fg": spread = constants.spread_nis_fg else: usage() process_line(infile, spread)
def __init__(self, db, logger, u_sprd=None, g_sprd=None, n_sprd=None, fd=None): """ Initiate database and import modules. Spreads are given in initiation and general constants which is used in more than one method. """ timer = make_timer(logger, 'Initing PosixLDIF...') from Cerebrum.modules import PosixGroup self.db = db self.logger = logger self.const = Factory.get('Constants')(self.db) self.grp = Factory.get('Group')(self.db) self.posuser = Factory.get('PosixUser')(self.db) self.posgrp = PosixGroup.PosixGroup(self.db) self.user_dn = LDIFutils.ldapconf('USER', 'dn', None) self.get_name = True self.fd = fd self.spread_d = {} # Validate spread from arg or from cereconf for x, y in zip(['USER', 'FILEGROUP', 'NETGROUP'], [u_sprd, g_sprd, n_sprd]): spread = LDIFutils.map_spreads( y or getattr(cereconf, 'LDAP_' + x).get('spread'), list) if spread: self.spread_d[x.lower()] = spread if 'user' not in self.spread_d: raise Errors.ProgrammingError( "Must specify spread-value as 'arg' or in cereconf") self.account2name = dict() self.groupcache = defaultdict(dict) self.group2groups = defaultdict(set) self.group2users = defaultdict(set) self.group2persons = defaultdict(list) timer('... done initing PosixLDIF.')
def _post_process_config(self): # Config parsing complete. Convert config-settings to # database references etc. profilename2profile = {} self.using_priority = False for p in self.profiles: if profilename2profile.has_key(p.name): self.add_error("Duplicate profile-name {}".format(p.name)) profilename2profile[p.name] = p p.post_config(self.lookup_helper, self) if p.priority is not None: self.using_priority = True self.profilename2profile[p.name] = p for p in self.profiles: p.expand_super(profilename2profile) if self.using_priority and p.priority is None: self.add_error("Priority used, but not defined for {}".format( p.name)) self.select_tool = SelectTool(self.profiles, self._logger, self) # Change keys in group_defs from name to entity_id pg = PosixGroup.PosixGroup(self.autostud.db) tmp = {} for k in self.group_defs.keys(): id = self.lookup_helper.get_group(k) t = self.group_defs[k] try: pg.clear() pg.find(id) t['is_posix'] = True except Errors.NotFoundError: t['is_posix'] = False tmp[id] = t self.group_defs = tmp if self._errors: self._logger.fatal( "The configuration file has errors, refusing to " "continue: \n{}".format("\n".join(self._errors))) sys.exit(1)
def add_spread(self, spread): # FIXME, jazz 2008-07-28: we should move this check into PosixGroup # and establish a cereconf.POSIX_GROUP_SPREAD or something # Avoid circular import dependency from Cerebrum.modules import PosixGroup # When adding a NIS-spread, assert that group is a PosixGroup if int(spread) == self.const.spread_nis_fg: pg = PosixGroup.PosixGroup(self._db) try: pg.clear() pg.find(self.entity_id) except Errors.NotFoundError: raise Errors.RequiresPosixError( "Can't add NIS-spread to non-POSIX group.") tmp = pg.illegal_name(pg.group_name) if tmp: raise self._db.IntegrityError( "Illegal name for filegroup, {0}.".format(tmp)) # # (Try to) perform the actual spread addition. ret = self.__super.add_spread(spread)
from Cerebrum.Utils import Factory from Cerebrum.modules import PosixGroup from Cerebrum.modules.posix.UserExporter import HomedirResolver from Cerebrum.modules.posix.UserExporter import OwnerResolver from Cerebrum.modules.posix.UserExporter import UserExporter from Cerebrum.utils import transliterate from Cerebrum.utils.atomicfile import SimilarSizeWriter from Cerebrum.utils.funcwrap import memoize from six import text_type db = Factory.get('Database')() co = Factory.get('Constants')(db) clconst = Factory.get('CLConstants')(db) logger = logging.getLogger(__name__) posix_user = Factory.get('PosixUser')(db) posix_group = PosixGroup.PosixGroup(db) # The "official" NIS max line length (consisting of key + NUL + value # + NUL) is 1024; however, some implementations appear to have lower # limits. # # Specifically, on Solaris 9 makedbm(1M) chokes on lines longer than # 1018 characters. Other systems might be even more limited. MAX_LINE_LENGTH = 1000 class NISMapException(Exception): pass class UserSkipQuarantine(NISMapException):
def process_ou_groups(self, ou, perspective): """ Recursive function that will create groups and add spreads and members to them. """ logger.info("Now processing OU %s (%s)", ou.entity_id, ou_name(self._co, ou)) gr = PosixGroup.PosixGroup(self._db) aux_gr = PosixGroup.PosixGroup(self._db) # TODO: Should these groups have a custom uit group_type? group_type = self._co.group_type_unknown if ou.entity_id not in self.group_dict: logger.info("Create PosixGroup and give spread") # create group and give spread gr_name = ou_name(self._co, ou) + ' (' + self.stedkode_dict[ ou.entity_id] + ')' gr.populate( creator_id=self.default_creator, visibility=self._co.group_visibility_all, name=gr_name, description='ou_group:' + self.stedkode_dict[ou.entity_id], group_type=group_type, ) gr.write_db() # SPREAD DISABLED UNTIL AD IS READY. RMI000 - 20080207 # gr.add_spread(self.default_spread) current_group = gr.entity_id self.group_dict[ou.entity_id] = current_group self.group_description_dict[current_group] = gr.description self.description_group_dict[gr.description] = current_group else: # remove from delete dict logger.info("PosixGroup already exists") current_group = self.group_dict[ou.entity_id] self.group_delete_list.remove(current_group) # if loaded ou has parent ou if ou.get_parent(perspective): logger.info( 'OU has parent - checking if group needs to be made member of ' 'parent group.') parent_group = self.group_dict[ou.get_parent(perspective)] # if group corresponding to loaded ou is not member of group # corresponding to parent ou if parent_group not in self.members_dict: self.members_dict[parent_group] = [] if not (current_group, None) in self.members_dict[parent_group]: # add group member logger.info( "Add current group (%s) as member of parent group (%s)", current_group, parent_group) gr.clear() gr.find(parent_group) gr.add_member(current_group) self.members_dict[parent_group].append((current_group, None)) else: # remove member from members_delete_dict logger.info( "Current group already member of supposed parent group") self.members_delete_dict[parent_group].remove( (current_group, None)) # for each affiliated person on loaded ou if ou.entity_id not in self.ou_affiliates_dict: logger.info("No affiliates on current OU") else: logger.info("Cycling affiliates of current OU") for affiliate in self.ou_affiliates_dict[ou.entity_id]: # if person and its affiliation does not exist on group # corresponding to loaded ou, add membership* if current_group not in self.members_dict: self.members_dict[current_group] = [] if affiliate not in self.members_dict[current_group]: # add membership logger.info( "Affiliate %s is not member - will be added as group " "member, type %s", affiliate[0], affiliate[1]) container_exists = False for aux_affiliate in self.members_dict[current_group]: if aux_affiliate[1] == affiliate[1]: container_exists = True break # kbj005 17.12.2014: # We can have a situation where members_dict[current_group] # has no members of the type in affiliate[1] (e.g. 'STUDENT'), # but a container for this type # (e.g. 'ou_group:319100:STUDENT') exists in the database. # This situation will not be detected by the above test, and # container_exists will be False although the container does # exist in the database, leading to an error when the script # tries to create a group in the database that already exists. # description_group_dict contains info about all ou and # container groups in the database, so checking if the # container is in description_group_dict is a safer way to # find out if it exists in the database. # NOTE: The test above here could probably be replaced with the # test below here, thus avoiding the need for two tests. if not container_exists: if ('ou_group:' + self.stedkode_dict[ou.entity_id] + ':' + affiliate[1] in self.description_group_dict): container_exists = True # Create container group if not container_exists: gr.clear() gr_name = ( ou_name(self._co, ou) + ' (' + self.stedkode_dict[ou.entity_id] + ')' + ' - ' + affiliate[1]) gr_desc = ('ou_group:' + self.stedkode_dict[ou.entity_id] + ':' + affiliate[1]) gr.populate( creator_id=self.default_creator, visibility=self._co.group_visibility_all, name=gr_name, description=gr_desc, group_type=group_type, ) gr.write_db() # SPREAD DISABLED UNTIL AD IS READY. RMI000 - 20080207 # gr.add_spread(self.default_spread) aux_gr.clear() aux_gr.find(current_group) aux_gr.add_member(gr.entity_id) self.members_dict[current_group].append( (gr.entity_id, None)) else: gr.clear() gr_desc = ('ou_group:' + self.stedkode_dict[ou.entity_id] + ':' + affiliate[1]) gr.find(self.description_group_dict[gr_desc]) gr.add_member(affiliate[0]) self.members_dict[current_group].append(affiliate) self.group_description_dict[gr.entity_id] = gr.description self.description_group_dict[gr.description] = gr.entity_id else: # remove member from members_delete_dict self.members_delete_dict[current_group].remove(affiliate) # for each child of loaded ou, run recursive function children = ou.list_children(perspective) for ou_id in children: ou.clear() ou.find(ou_id) self.process_ou_groups(ou, perspective) return
def clean_up_ou_groups(self): """Function that will remove obsolete groups and members""" gr = PosixGroup.PosixGroup(self._db) # Remove all members remaining in members_delete_dict for group_id in self.members_delete_dict.keys(): for member in self.members_delete_dict[group_id]: member_id = member[0] member_type = member[1] if member_type is None: working_group = group_id else: working_group = self.description_group_dict[ self.group_description_dict[group_id] + ':' + member_type] gr.clear() gr.find(working_group) logger.info("Removing old member %s from group %s", member_id, working_group) gr.remove_member(member_id) # Remove all empty ou-groups:*:TEKNISK/VITENSKPELIG/STUDENT/EVT for member_type in self.member_type_mappings: logger.info("Searching empty container groups for " + member_type) groups = gr.search(description='ou_group:%:' + member_type) for group in groups: gr.clear() gr.find(group[0]) if not list(gr.search_members(group_id=gr.entity_id)): obsolete_group = gr.entity_id obsolete_group_name = gr.get_name(self._co.group_namespace) logger.info("Expiring empty container group %s (%s)", obsolete_group, obsolete_group_name) gr.expire_date = mx.DateTime.now() logger.info( "Removing spread for empty container group %s (%s)", obsolete_group, obsolete_group_name) # SPREAD DISABLED UNTIL AD IS READY. RMI000 - 20080207 # gr.delete_spread(self.default_spread) gr.write_db() logger.info("Prefixing its group_name with its group_id") gr.update_entity_name(self._co.group_namespace, '#' + str( obsolete_group) + '# ' + obsolete_group_name) for old_parent in gr.search(member_id=obsolete_group, indirect_members=False): gr.clear() gr.find(old_parent["group_id"]) logger.info( "Removing its membership from parent group " "%s (%s)", gr.entity_id, gr.get_name(self._co.group_namespace)) gr.remove_member(obsolete_group) gr.write_db() # Remove all groups remaining in group_delete_list for group_id in self.group_delete_list: gr.clear() gr.find(group_id) logger.info( "Expiring unused OU group %s (%s)", group_id, gr.description) gr.expire_date = mx.DateTime.now() logger.info("Removing spread for unused OU group %s (%s)", group_id, gr.description) # SPREAD DISABLED UNTIL AD IS READY. RMI000 - 20080207 # gr.delete_spread(self.default_spread) gr.write_db() logger.info("Prefixing its group_name with its group_id") gr.update_entity_name(self._co.group_namespace, '#' + str(group_id) + '# ' + gr.get_name( self._co.group_namespace)) return
def process(self): # Load group_dict - {ou_id:group_id} # group_delete_list - [ou_id,] # group_description_dict - {group_id:description} logger.info("Loading dict: ou > group_id") stedkoder = self.ou.get_stedkoder() groups = self.gr.search(description='ou_group:*') for stedkode in stedkoder: self.stedkode_dict[stedkode['ou_id']] = ( str(stedkode['fakultet']).zfill(2) + str(stedkode['institutt']).zfill(2) + str(stedkode['avdeling']).zfill(2) ) for group in groups: # Cache group description no matter what group_id = group['group_id'] group_desc = group['description'] self.group_description_dict[group_id] = group_desc self.description_group_dict[group_desc] = group_id # Skip group caching if group is a container for account members skip = False for possible_aff in self.member_type_mappings: if group['description'].find(possible_aff) > -1: skip = True break if skip is True: continue # OU groups are cached elems = group['description'].split(':') if len(elems) == 2: self.group_delete_list.append(group['group_id']) # Only OU groups (and not containers) are cached! for stedkode in stedkoder: if len(elems) == 2 and elems[1] == str( stedkode['fakultet']).zfill( 2) + \ str(stedkode['institutt']).zfill(2) + \ str(stedkode['avdeling']).zfill(2): self.group_dict[stedkode['ou_id']] = group['group_id'] break # Load members dict - {group_id:(member_id, member_type)} # member_type is: GRUPPE, TEKNISK, VITENSKAPELIG, STUDENT, GJEST... logger.info("Loading dict: group_id > members") aux_group = PosixGroup.PosixGroup(self._db) working_group = PosixGroup.PosixGroup(self._db) for group in groups: group_id = group['group_id'] # Skip member caching if group is a container for account members skip = False for possible_aff in self.member_type_mappings: if group['description'].find(possible_aff) > -1: skip = True break if skip is True: continue working_group.clear() working_group.find(group_id) for member in working_group.search_members( group_id=working_group.entity_id): # For each account member container, fill inn members for OU # group member_is_ou = True member_type = int(member["member_type"]) member_id = int(member["member_id"]) for possible_aff in self.member_type_mappings: if (member_type == self._co.entity_group and self.group_description_dict[member_id] == self.group_description_dict[ group_id] + ':' + possible_aff): aux_group.clear() aux_group.find(member_id) for aux_member in aux_group.search_members( group_id=aux_group.entity_id): aux_member_id = int(aux_member["member_id"]) aux_member_type = int(aux_member["member_type"]) if aux_member_type == self._co.entity_account: if group_id not in self.members_dict: self.members_dict[group_id] = [] self.members_delete_dict[group_id] = [] self.members_dict[group_id].append( (aux_member_id, possible_aff)) self.members_delete_dict[group_id].append( (aux_member_id, possible_aff)) member_is_ou = False break if member_is_ou: if group_id not in self.members_dict: self.members_dict[group_id] = [] self.members_delete_dict[group_id] = [] self.members_dict[group_id].append((member_id, None)) self.members_delete_dict[group_id].append( (member_id, None)) # 159419L: [(159933L, None), (159938L, None)] # Load person to account dict (for local use) primary_acs = self.ac.list_accounts_by_type(primary_only=True) person2acc = {} for primary_ac in primary_acs: person2acc[primary_ac['person_id']] = primary_ac['account_id'] # Load ou affiliates dict - {ou_id:[(person_id, affiliation),]} logger.info("Loading dict: ou_id > affiliates") self.ou_affiliates_dict = {} pe = Factory.get('Person')(self._db) affs = pe.list_affiliations() for aff in affs: for possible_aff, status in self.member_type_mappings.items(): if aff['status'] in status: if aff['person_id'] in person2acc: tmp_acc_id = person2acc[aff['person_id']] else: logger.debug( 'Could not find primary account for person: %s', aff['person_id']) break aff_tuple = (tmp_acc_id, possible_aff) # Skip repeated entries and avoid creating entries for an # OU until it has affiliates if (aff['ou_id'] not in self.ou_affiliates_dict or aff_tuple not in self.ou_affiliates_dict[aff['ou_id']]): if aff['ou_id'] not in self.ou_affiliates_dict: self.ou_affiliates_dict[aff['ou_id']] = [] self.ou_affiliates_dict[aff['ou_id']].append(aff_tuple) continue # Get start OU from ou_structure tree self.ou.clear() self.ou.find(self.default_start_ou) # Enter recursive function to process groups logger.info("Starting to process groups, first out is: %s", ou_name(self._co, self.ou)) self.process_ou_groups(ou=self.ou, perspective=self.perspective) logger.info("Finished processing groups") # Delete groups belonging to no longer imported OUs and removing old # members logger.info( "Starting to clean up the mess left behind " "(Please read: expiring and deleting obsolete elements)") self.clean_up_ou_groups() logger.info("Finished cleaning up")