class group_add(LDAPCreate): __doc__ = _('Create a new group.') msg_summary = _('Added group "%(value)s"') takes_options = LDAPCreate.takes_options + ( Flag('nonposix', cli_name='nonposix', doc=_('Create as a non-POSIX group'), default=False, ), Flag('external', cli_name='external', doc=_('Allow adding external non-IPA members from trusted domains'), default=False, ), ) def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options): # As both 'external' and 'nonposix' options have default= set for # them, they will always be present in options dict, thus we can # safely reference the values assert isinstance(dn, DN) if options['external']: entry_attrs['objectclass'].append('ipaexternalgroup') if 'gidnumber' in options: raise errors.MutuallyExclusiveError(reason=_('gid cannot be set for external group')) elif not options['nonposix']: entry_attrs['objectclass'].append('posixgroup') if 'gidnumber' not in options: entry_attrs['gidnumber'] = baseldap.DNA_MAGIC return dn
def get_options(self): for option in super(user_del, self).get_options(): yield option yield Flag( 'preserve?', include='cli', doc=_('Delete a user, keeping the entry available for future use'), ) yield Flag( 'no_preserve?', include='cli', doc=_('Delete a user'), )
class plugins(LocalOrRemote): __doc__ = _('Show all loaded plugins.') msg_summary = ngettext('%(count)d plugin loaded', '%(count)d plugins loaded', 0) takes_options = LocalOrRemote.takes_options + (Flag( 'all', cli_name='all', doc= _('retrieve and print all attributes from the server. Affects command output.' ), exclude='webui', flags=['no_output'], default=True, ), ) has_output = ( Output('result', dict, 'Dictionary mapping plugin names to bases'), Output( 'count', type=int, doc=_('Number of plugins loaded'), ), summary, ) def execute(self, **options): plugins = sorted(self.api.plugins, key=lambda o: o.plugin) return dict( result=dict((p.plugin, p.bases) for p in plugins), count=len(plugins), )
class automountlocation_import(LDAPQuery): __doc__ = _('Import automount files for a specific location.') takes_args = (Str( 'masterfile', label=_('Master file'), doc=_('Automount master file.'), ), ) takes_options = (Flag( 'continue?', cli_name='continue', doc= _('Continuous operation mode. Errors are reported but the process continues.' ), ), ) def __read_mapfile(self, filename): try: fp = open(filename, 'r') map = fp.readlines() fp.close() except IOError, e: if e.errno == 2: raise errors.NotFound(reason=_('File %(file)s not found') % {'file': filename}) else: raise return map
class plugins(LocalOrRemote): __doc__ = _('Show all loaded plugins.') msg_summary = ngettext('%(count)d plugin loaded', '%(count)d plugins loaded', 0) takes_options = LocalOrRemote.takes_options + (Flag( 'all', cli_name='all', doc=_('retrieve and print all attributes from the server. ' 'Affects command output.'), exclude='webui', flags=['no_option', 'no_output'], default=True, ), ) has_output = ( Output('result', dict, 'Dictionary mapping plugin names to bases'), Output( 'count', type=int, doc=_('Number of plugins loaded'), ), summary, ) def execute(self, **options): result = {} for namespace in self.api: for plugin in self.api[namespace](): cls = type(plugin) key = '{}.{}'.format(cls.__module__, cls.__name__) result.setdefault(key, []).append(namespace) return dict(result=result, )
class service_add(LDAPCreate): __doc__ = _('Add a new IPA new service.') msg_summary = _('Added service "%(value)s"') member_attributes = ['managedby'] has_output_params = LDAPCreate.has_output_params + output_params takes_options = LDAPCreate.takes_options + ( Flag('force', label=_('Force'), doc=_('force principal name even if not in DNS'), ), ) def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options): assert isinstance(dn, DN) (service, hostname, realm) = split_principal(keys[-1]) if service.lower() == 'host' and not options['force']: raise errors.HostService() try: hostresult = api.Command['host_show'](hostname)['result'] except errors.NotFound: raise errors.NotFound( reason=_("The host '%s' does not exist to add a service to.") % hostname) self.obj.validate_ipakrbauthzdata(entry_attrs) cert = options.get('usercertificate') if cert: dercert = x509.normalize_certificate(cert) x509.verify_cert_subject(ldap, hostname, dercert) entry_attrs['usercertificate'] = dercert if not options.get('force', False): # We know the host exists if we've gotten this far but we # really want to discourage creating services for hosts that # don't exist in DNS. util.validate_host_dns(self.log, hostname) if not 'managedby' in entry_attrs: entry_attrs['managedby'] = hostresult['dn'] # Enforce ipaKrbPrincipalAlias to aid case-insensitive searches # as krbPrincipalName/krbCanonicalName are case-sensitive in Kerberos # schema entry_attrs['ipakrbprincipalalias'] = keys[-1] # Objectclass ipakrbprincipal providing ipakrbprincipalalias is not in # in a list of default objectclasses, add it manually entry_attrs['objectclass'].append('ipakrbprincipal') update_krbticketflags(ldap, entry_attrs, attrs_list, options, False) return dn def post_callback(self, ldap, dn, entry_attrs, *keys, **options): set_kerberos_attrs(entry_attrs, options) rename_ipaallowedtoperform_from_ldap(entry_attrs, options) return dn
class cert(BaseCertObject): takes_params = BaseCertObject.takes_params + ( Str( 'status', label=_('Status'), flags={'no_create', 'no_update', 'no_search'}, ), Flag( 'revoked', label=_('Revoked'), flags={'no_create', 'no_update', 'no_search'}, ), Int( 'revocation_reason', label=_('Revocation reason'), doc=_('Reason for revoking the certificate (0-10). Type ' '"ipa help cert" for revocation reason details. '), minvalue=0, maxvalue=10, flags={'no_create', 'no_update'}, ), ) def get_params(self): for param in super(cert, self).get_params(): if param.name == 'serial_number': param = param.clone(primary_key=True) elif param.name in ('certificate', 'issuer'): param = param.clone(flags=param.flags - {'no_search'}) yield param for owner in self._owners(): yield owner.primary_key.clone_rename( 'owner_{0}'.format(owner.name), required=False, multivalue=True, primary_key=False, label=_("Owner %s") % owner.object_name, flags={'no_create', 'no_update', 'no_search'}, ) def _owners(self): for name in ('user', 'host', 'service'): yield self.api.Object[name] def _fill_owners(self, obj): dns = obj.pop('owner', None) if dns is None: return for owner in self._owners(): container_dn = DN(owner.container_dn, self.api.env.basedn) name = 'owner_' + owner.name for dn in dns: if dn.endswith(container_dn, 1): value = owner.get_primary_key_from_dn(dn) obj.setdefault(name, []).append(value)
class user_find(baseuser_find): __doc__ = _('Search for users.') member_attributes = ['memberof'] has_output_params = baseuser_find.has_output_params + user_output_params msg_summary = ngettext('%(count)d user matched', '%(count)d users matched', 0) takes_options = LDAPSearch.takes_options + (Flag( 'whoami', label=_('Self'), doc=_('Display user record for current Kerberos principal'), ), ) def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *keys, **options): assert isinstance(base_dn, DN) self.pre_common_callback(ldap, filter, attrs_list, base_dn, scope, *keys, **options) if options.get('whoami'): return ("(&(objectclass=posixaccount)(krbprincipalname=%s))"%\ getattr(context, 'principal'), base_dn, scope) preserved = options.get('preserved', False) if preserved is None: base_dn = self.api.env.basedn scope = ldap.SCOPE_SUBTREE elif preserved: base_dn = DN(self.obj.delete_container_dn, self.api.env.basedn) else: base_dn = DN(self.obj.active_container_dn, self.api.env.basedn) return (filter, base_dn, scope) def post_callback(self, ldap, entries, truncated, *args, **options): if options.get('pkey_only', False): return truncated if options.get('preserved', False) is None: base_dns = ( DN(self.obj.active_container_dn, self.api.env.basedn), DN(self.obj.delete_container_dn, self.api.env.basedn), ) entries[:] = [ e for e in entries if any( e.dn.endswith(bd) for bd in base_dns) ] self.post_common_callback(ldap, entries, lockout=False, **options) for entry in entries: self.obj.get_preserved_attribute(entry, options) return truncated
class aci_add(crud.Create): """ Create new ACI. """ NO_CLI = True msg_summary = _('Created ACI "%(value)s"') takes_options = ( _prefix_option, Flag( 'test?', doc=_('Test the ACI syntax but don\'t write anything'), default=False, ), ) def execute(self, aciname, **kw): """ Execute the aci-create operation. Returns the entry as it will be created in LDAP. :param aciname: The name of the ACI being added. :param kw: Keyword arguments for the other LDAP attributes. """ assert 'aciname' not in kw ldap = self.api.Backend.ldap2 newaci = _make_aci(ldap, None, aciname, kw) entry = ldap.get_entry(self.api.env.basedn, ['aci']) acis = _convert_strings_to_acis(entry.get('aci', [])) for a in acis: # FIXME: add check for permission_group = permission_group if a.isequal(newaci) or newaci.name == a.name: raise errors.DuplicateEntry() newaci_str = unicode(newaci) entry.setdefault('aci', []).append(newaci_str) if not kw.get('test', False): ldap.update_entry(entry) if kw.get('raw', False): result = dict(aci=unicode(newaci_str)) else: result = _aci_to_kw(ldap, newaci, kw.get('test', False)) return dict( result=result, value=pkey_to_value(aciname, kw), )
def get_options(self): for option in super(CertMethod, self).get_options(): yield option for o in self.has_output: if isinstance(o, (output.Entry, output.ListOfEntries)): yield Flag( 'no_members', doc=_("Suppress processing of membership attributes."), exclude='webui', flags={'no_output'}, ) break
class netgroup_find(LDAPSearch): __doc__ = _('Search for a netgroup.') member_attributes = ['member', 'memberuser', 'memberhost', 'memberof'] has_output_params = LDAPSearch.has_output_params + output_params msg_summary = ngettext('%(count)d netgroup matched', '%(count)d netgroups matched', 0) takes_options = LDAPSearch.takes_options + ( Flag( 'private', exclude='webui', flags=['no_option', 'no_output'], ), Flag( 'managed', cli_name='managed', doc=_('search for managed groups'), default_from=lambda private: private, ), ) def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *args, **options): assert isinstance(base_dn, DN) # Do not display private mepManagedEntry netgroups by default # If looking for managed groups, we need to omit the negation search filter search_kw = {} search_kw['objectclass'] = ['mepManagedEntry'] if not options['managed']: local_filter = ldap.make_filter(search_kw, rules=ldap.MATCH_NONE) else: local_filter = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL) filter = ldap.combine_filters((local_filter, filter), rules=ldap.MATCH_ALL) return (filter, base_dn, scope)
class automember_find_orphans(LDAPSearch): __doc__ = _(""" Search for orphan automember rules. The command might need to be run as a privileged user user to get all orphan rules. """) takes_options = group_type + (Flag( 'remove?', doc=_("Remove orphan automember rules"), ), ) msg_summary = ngettext('%(count)d rules matched', '%(count)d rules matched', 0) def execute(self, *keys, **options): results = super().execute(*keys, **options) remove_option = options.get('remove') pkey_only = options.get('pkey_only', False) ldap = self.obj.backend orphans = [] for entry in results["result"]: am_dn_entry = entry['automembertargetgroup'][0] # Make DN for --raw option if not isinstance(am_dn_entry, DN): am_dn_entry = DN(am_dn_entry) try: ldap.get_entry(am_dn_entry) except errors.NotFound: if pkey_only: # For pkey_only remove automembertargetgroup del (entry['automembertargetgroup']) orphans.append(entry) if remove_option: ldap.delete_entry(entry['dn']) results["result"][:] = orphans results["count"] = len(orphans) return results def pre_callback(self, ldap, filters, attrs_list, base_dn, scope, *args, **options): assert isinstance(base_dn, DN) scope = ldap.SCOPE_SUBTREE ndn = DN(('cn', options['type']), base_dn) if options.get('pkey_only', False): # For pkey_only add automembertargetgroup attrs_list.append('automembertargetgroup') return filters, ndn, scope
class user_find(baseuser_find): __doc__ = _('Search for users.') member_attributes = ['memberof'] has_output_params = baseuser_find.has_output_params + user_output_params takes_options = LDAPSearch.takes_options + (Flag( 'whoami', label=_('Self'), doc=_('Display user record for current Kerberos principal'), ), ) def execute(self, *args, **options): # assure the manager attr is a dn, not just a bare uid manager = options.get('manager') if manager is not None: options['manager'] = self.obj.normalize_manager( manager, self.obj.active_container_dn) # Ensure that the RADIUS config link is a dn, not just the name cl = 'ipatokenradiusconfiglink' if cl in options: options[cl] = self.api.Object['radiusproxy'].get_dn(options[cl]) return super(user_find, self).execute(self, *args, **options) def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *keys, **options): assert isinstance(base_dn, DN) if options.get('whoami'): return ("(&(objectclass=posixaccount)(krbprincipalname=%s))"%\ getattr(context, 'principal'), base_dn, scope) return (filter, base_dn, scope) def post_callback(self, ldap, entries, truncated, *args, **options): if options.get('pkey_only', False): return truncated for attrs in entries: self.obj.convert_manager(attrs, **options) self.obj.get_password_attributes(ldap, attrs.dn, attrs) convert_nsaccountlock(attrs) convert_sshpubkey_post(ldap, attrs.dn, attrs) return truncated msg_summary = ngettext('%(count)d user matched', '%(count)d users matched', 0)
class host_enable_mail(LDAPQuery): __doc__ = _('Enable mail sending for the host.') has_output = output.standard_value msg_summary = _('Enabled mail sending for host "%(value)s"') takes_options = ( Str('primarymail?', cli_name='primary_mail', label=_('Primary mail address')), Str('sendalias*', cli_name='send_alias', label=_('Allowed sender aliases')), Flag('cansendexternally', cli_name='can_send_externally', label='Can send mails to external locations', default=False), ) def execute(self, *args, **kw): dn = self.obj.get_dn(*args, **kw) entry = self.obj.backend.get_entry(dn, ['objectclass', 'serverhostname']) if 'mailsenderentity' not in entry['objectclass']: entry['objectclass'].append('mailsenderentity') else: raise errors.AlreadyActive() if 'primarymail' not in kw: config = self.obj.backend.get_ipa_config() entry['primarymail'] = normalize_and_validate_email( entry['serverhostname'], config) else: entry['primarymail'] = kw['primarymail'] if 'sendalias' in kw: entry['sendalias'] = list(kw['sendalias']) entry['cansendexternally'] = kw['cansendexternally'] self.obj.backend.update_entry(entry) return dict( result=True, value=pkey_to_value(args[0], kw), )
class cert_revoke(VirtualCommand): __doc__ = _('Revoke a certificate.') takes_args = _serial_number has_output_params = (Flag( 'revoked', label=_('Revoked'), ), ) operation = "revoke certificate" # FIXME: The default is 0. Is this really an Int param? takes_options = (Int('revocation_reason', label=_('Reason'), doc=_('Reason for revoking the certificate (0-10)'), minvalue=0, maxvalue=10, default=0, autofill=True), ) def execute(self, serial_number, **kw): ca_enabled_check() hostname = None try: self.check_access() except errors.ACIError as acierr: self.debug( "Not granted by ACI to revoke certificate, looking at principal" ) try: # Let cert_show() handle verifying that the subject of the # cert we're dealing with matches the hostname in the principal result = api.Command['cert_show']( unicode(serial_number))['result'] except errors.NotImplementedError: pass revocation_reason = kw['revocation_reason'] if revocation_reason == 7: raise errors.CertificateOperationError( error=_('7 is not a valid revocation reason')) return dict(result=self.Backend.ra.revoke_certificate( serial_number, revocation_reason=revocation_reason))
class dhcpconfig(LDAPObject): """ DHCP object. """ container_dn = DN(('ou', 'dhcp')) object_name = _('DHCP configuration') object_class = ['dhcpservice'] default_attributes = ['cn', 'dhcpStatements', 'dhcpoption', 'dhcpcomments'] allow_rename = False label = _('DHCP configuration') label_singular = _('DHCP configuration') takes_params = ( Str('cn', cli_name='config', label=_('Config'), doc=_('DHCP Configuration - There should be just one, called "config".'), primary_key=True, ), Str('dhcpstatements*', cli_name='statements', label=_('Statements'), doc=_('A DHCP configuration statement other than option'), ), Str('dhcpoption*', cli_name='options', label=_('Options'), doc=_('A DHCP configuration option'), ), Str('dhcpcomments?', cli_name='comment', label=_('Comment'), doc=_('A DHCP comment'), ), Flag('increment?', cli_name='increment', label=_('increment'), doc=_('Increment serial number - causes the servers to restart'), flags=['virtual_attribute'], ), )
class cert_remove_hold(VirtualCommand): __doc__ = _('Take a revoked certificate off hold.') takes_args = _serial_number has_output_params = ( Flag( 'unrevoked', label=_('Unrevoked'), ), Str( 'error_string', label=_('Error'), ), ) operation = "certificate remove hold" def execute(self, serial_number, **kw): self.check_access() return dict( result=self.Backend.ra.take_certificate_off_hold(serial_number))
class permission_del(LDAPDelete): __doc__ = _('Delete a permission.') msg_summary = _('Deleted permission "%(value)s"') takes_options = LDAPDelete.takes_options + (Flag( 'force', label=_('Force'), flags=['no_option', 'no_output'], doc=_('force delete of SYSTEM permissions'), ), ) def pre_callback(self, ldap, dn, *keys, **options): assert isinstance(dn, DN) if not options.get('force') and not self.obj.check_system( ldap, dn, *keys): raise errors.ACIError( info=_('A SYSTEM permission may not be removed')) # remove permission even when the underlying ACI is missing try: self.api.Command.aci_del(keys[-1], aciprefix=ACI_PREFIX) except errors.NotFound: pass return dn
class baseuser(LDAPObject): """ baseuser object. """ stage_container_dn = api.env.container_stageuser active_container_dn = api.env.container_user delete_container_dn = api.env.container_deleteuser object_class = ['posixaccount'] object_class_config = 'ipauserobjectclasses' possible_objectclasses = [ 'meporiginentry', 'ipauserauthtypeclass', 'ipauser', 'ipatokenradiusproxyuser', 'ipacertmapobject' ] disallow_object_classes = ['krbticketpolicyaux'] permission_filter_objectclasses = ['posixaccount'] search_attributes_config = 'ipausersearchfields' default_attributes = [ 'uid', 'givenname', 'sn', 'homedirectory', 'loginshell', 'uidnumber', 'gidnumber', 'mail', 'ou', 'telephonenumber', 'title', 'memberof', 'nsaccountlock', 'memberofindirect', 'ipauserauthtype', 'userclass', 'ipatokenradiusconfiglink', 'ipatokenradiususername', 'krbprincipalexpiration', 'usercertificate;binary', 'krbprincipalname', 'krbcanonicalname', 'ipacertmapdata' ] search_display_attributes = [ 'uid', 'givenname', 'sn', 'homedirectory', 'krbcanonicalname', 'krbprincipalname', 'loginshell', 'mail', 'telephonenumber', 'title', 'nsaccountlock', 'uidnumber', 'gidnumber', 'sshpubkeyfp', ] uuid_attribute = 'ipauniqueid' attribute_members = { 'manager': ['user'], 'memberof': ['group', 'netgroup', 'role', 'hbacrule', 'sudorule'], 'memberofindirect': ['group', 'netgroup', 'role', 'hbacrule', 'sudorule'], } allow_rename = True bindable = True password_attributes = [('userpassword', 'has_password'), ('krbprincipalkey', 'has_keytab')] label = _('Users') label_singular = _('User') takes_params = ( Str('uid', pattern=PATTERN_GROUPUSER_NAME, pattern_errmsg='may only include letters, numbers, _, -, . and $', maxlength=255, cli_name='login', label=_('User login'), primary_key=True, default_from=lambda givenname, sn: givenname[0] + sn, normalizer=lambda value: value.lower(), ), Str('givenname', cli_name='first', label=_('First name'), ), Str('sn', cli_name='last', label=_('Last name'), ), Str('cn', label=_('Full name'), default_from=lambda givenname, sn: '%s %s' % (givenname, sn), autofill=True, ), Str('displayname?', label=_('Display name'), default_from=lambda givenname, sn: '%s %s' % (givenname, sn), autofill=True, ), Str('initials?', label=_('Initials'), default_from=lambda givenname, sn: '%c%c' % (givenname[0], sn[0]), autofill=True, ), Str('homedirectory?', cli_name='homedir', label=_('Home directory'), ), Str('gecos?', label=_('GECOS'), default_from=lambda givenname, sn: '%s %s' % (givenname, sn), autofill=True, ), Str('loginshell?', cli_name='shell', label=_('Login shell'), ), Principal( 'krbcanonicalname?', validate_realm, label=_('Principal name'), flags={'no_option', 'no_create', 'no_update', 'no_search'}, normalizer=normalize_user_principal ), Principal( 'krbprincipalname*', validate_realm, cli_name='principal', label=_('Principal alias'), default_from=lambda uid: kerberos.Principal( uid.lower(), realm=api.env.realm), autofill=True, normalizer=normalize_user_principal, ), DateTime('krbprincipalexpiration?', cli_name='principal_expiration', label=_('Kerberos principal expiration'), ), DateTime('krbpasswordexpiration?', cli_name='password_expiration', label=_('User password expiration'), ), Str('mail*', cli_name='email', label=_('Email address'), ), Password('userpassword?', cli_name='password', label=_('Password'), doc=_('Prompt to set the user password'), # FIXME: This is temporary till bug is fixed causing updates to # bomb out via the webUI. exclude='webui', ), Flag('random?', doc=_('Generate a random user password'), flags=('no_search', 'virtual_attribute'), default=False, ), Str('randompassword?', label=_('Random password'), flags=('no_create', 'no_update', 'no_search', 'virtual_attribute'), ), Int('uidnumber?', cli_name='uid', label=_('UID'), doc=_('User ID Number (system will assign one if not provided)'), minvalue=1, ), Int('gidnumber?', label=_('GID'), doc=_('Group ID Number'), minvalue=1, ), Str('street?', cli_name='street', label=_('Street address'), ), Str('l?', cli_name='city', label=_('City'), ), Str('st?', cli_name='state', label=_('State/Province'), ), Str('postalcode?', label=_('ZIP'), ), Str('telephonenumber*', cli_name='phone', label=_('Telephone Number') ), Str('mobile*', label=_('Mobile Telephone Number') ), Str('pager*', label=_('Pager Number') ), Str('facsimiletelephonenumber*', cli_name='fax', label=_('Fax Number'), ), Str('ou?', cli_name='orgunit', label=_('Org. Unit'), ), Str('title?', label=_('Job Title'), ), # keep backward compatibility using single value manager option Str('manager?', label=_('Manager'), ), Str('carlicense*', label=_('Car License'), ), Str('ipasshpubkey*', validate_sshpubkey, cli_name='sshpubkey', label=_('SSH public key'), normalizer=normalize_sshpubkey, flags=['no_search'], ), Str('sshpubkeyfp*', label=_('SSH public key fingerprint'), flags={'virtual_attribute', 'no_create', 'no_update', 'no_search'}, ), StrEnum('ipauserauthtype*', cli_name='user_auth_type', label=_('User authentication types'), doc=_('Types of supported user authentication'), values=(u'password', u'radius', u'otp'), ), Str('userclass*', cli_name='class', label=_('Class'), doc=_('User category (semantics placed on this attribute are for ' 'local interpretation)'), ), Str('ipatokenradiusconfiglink?', cli_name='radius', label=_('RADIUS proxy configuration'), ), Str('ipatokenradiususername?', cli_name='radius_username', label=_('RADIUS proxy username'), ), Str('departmentnumber*', label=_('Department Number'), ), Str('employeenumber?', label=_('Employee Number'), ), Str('employeetype?', label=_('Employee Type'), ), Str('preferredlanguage?', label=_('Preferred Language'), pattern='^(([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})?(;q\=((0(\.[0-9]{0,3})?)|(1(\.0{0,3})?)))?' \ + '(\s*,\s*[a-zA-Z]{1,8}(-[a-zA-Z]{1,8})?(;q\=((0(\.[0-9]{0,3})?)|(1(\.0{0,3})?)))?)*)|(\*))$', pattern_errmsg='must match RFC 2068 - 14.4, e.g., "da, en-gb;q=0.8, en;q=0.7"', ), Bytes('usercertificate*', validate_certificate, cli_name='certificate', label=_('Certificate'), doc=_('Base-64 encoded user certificate'), ), Str( 'ipacertmapdata*', cli_name='certmapdata', label=_('Certificate mapping data'), doc=_('Certificate mapping data'), flags=['no_create', 'no_update', 'no_search'], ), ) def normalize_and_validate_email(self, email, config=None): if not config: config = self.backend.get_ipa_config() # check if default email domain should be added defaultdomain = config.get('ipadefaultemaildomain', [None])[0] if email: norm_email = [] if not isinstance(email, (list, tuple)): email = [email] for m in email: if isinstance(m, six.string_types): if '@' not in m and defaultdomain: m = m + u'@' + defaultdomain if not Email(m): raise errors.ValidationError(name='email', error=_('invalid e-mail format: %(email)s') % dict(email=m)) norm_email.append(m) else: if not Email(m): raise errors.ValidationError(name='email', error=_('invalid e-mail format: %(email)s') % dict(email=m)) norm_email.append(m) return norm_email return email def normalize_manager(self, manager, container): """ Given a userid verify the user's existence (in the appropriate containter) and return the dn. """ if not manager: return None if not isinstance(manager, list): manager = [manager] try: container_dn = DN(container, api.env.basedn) for i, mgr in enumerate(manager): if isinstance(mgr, DN) and mgr.endswith(container_dn): continue entry_attrs = self.backend.find_entry_by_attr( self.primary_key.name, mgr, self.object_class, [''], container_dn ) manager[i] = entry_attrs.dn except errors.NotFound: raise errors.NotFound(reason=_('manager %(manager)s not found') % dict(manager=mgr)) return manager def _user_status(self, user, container): assert isinstance(user, DN) return user.endswith(container) def active_user(self, user): assert isinstance(user, DN) return self._user_status(user, DN(self.active_container_dn, api.env.basedn)) def stage_user(self, user): assert isinstance(user, DN) return self._user_status(user, DN(self.stage_container_dn, api.env.basedn)) def delete_user(self, user): assert isinstance(user, DN) return self._user_status(user, DN(self.delete_container_dn, api.env.basedn)) def convert_usercertificate_pre(self, entry_attrs): if 'usercertificate' in entry_attrs: entry_attrs['usercertificate;binary'] = entry_attrs.pop( 'usercertificate') def convert_usercertificate_post(self, entry_attrs, **options): if 'usercertificate;binary' in entry_attrs: entry_attrs['usercertificate'] = entry_attrs.pop( 'usercertificate;binary') def convert_attribute_members(self, entry_attrs, *keys, **options): super(baseuser, self).convert_attribute_members( entry_attrs, *keys, **options) if options.get("raw", False): return # due the backward compatibility, managers have to be returned in # 'manager' attribute instead of 'manager_user' try: entry_attrs['failed_manager'] = entry_attrs.pop('manager') except KeyError: pass try: entry_attrs['manager'] = entry_attrs.pop('manager_user') except KeyError: pass
if six.PY3: unicode = str __doc__ = _(""" Baseuser This contains common definitions for user/stageuser """) register = Registry() NO_UPG_MAGIC = '__no_upg__' baseuser_output_params = ( Flag('has_keytab', label=_('Kerberos keys available'), ), ) UPG_DEFINITION_DN = DN(('cn', 'UPG Definition'), ('cn', 'Definitions'), ('cn', 'Managed Entries'), ('cn', 'etc'), api.env.basedn) def validate_nsaccountlock(entry_attrs): if 'nsaccountlock' in entry_attrs: nsaccountlock = entry_attrs['nsaccountlock'] if not isinstance(nsaccountlock, (bool, Bool)): if not isinstance(nsaccountlock, six.string_types):
class topologysegment_reinitialize(LDAPQuery): __doc__ = _('Request a full re-initialization of the node ' 'retrieving data from the other node.') has_output = output.standard_value msg_summary = _('%(value)s') takes_options = ( Flag( 'left?', doc=_('Initialize left node'), default=False, ), Flag( 'right?', doc=_('Initialize right node'), default=False, ), Flag( 'stop?', doc=_('Stop already started refresh of chosen node(s)'), default=False, ), ) def execute(self, *keys, **options): dn = self.obj.get_dn(*keys, **options) validate_domain_level(self.api) entry = self.obj.backend.get_entry(dn, [ 'nsds5beginreplicarefresh;left', 'nsds5beginreplicarefresh;right' ]) left = options.get('left') right = options.get('right') stop = options.get('stop') if not left and not right: raise errors.OptionError( _('left or right node has to be specified')) if left and right: raise errors.OptionError(_('only one node can be specified')) action = u'start' msg = _('Replication refresh for segment: "%(pkey)s" requested.') if stop: action = u'stop' msg = _('Stopping of replication refresh for segment: "' '%(pkey)s" requested.') # left and right are swapped because internally it's a push not # pull operation if right: entry['nsds5beginreplicarefresh;left'] = [action] if left: entry['nsds5beginreplicarefresh;right'] = [action] self.obj.backend.update_entry(entry) msg = msg % {'pkey': keys[-1]} return dict( result=True, value=msg, )
class vault_archive(ModVaultData): __doc__ = _('Archive data into a vault.') takes_options = ( Bytes( 'data?', doc=_('Binary data to archive'), ), Str( # TODO: use File parameter 'in?', doc=_('File containing data to archive'), ), Str( 'password?', cli_name='password', doc=_('Vault password'), ), Str( # TODO: use File parameter 'password_file?', cli_name='password_file', doc=_('File containing the vault password'), ), Flag( 'override_password?', doc=_('Override existing password'), ), ) @classmethod def __NO_CLI_getter(cls): return (api.Command.get_plugin('vault_archive_internal') is _fake_vault_archive_internal) NO_CLI = classproperty(__NO_CLI_getter) @property def api_version(self): return self.api.Command.vault_archive_internal.api_version def get_args(self): for arg in self.api.Command.vault_archive_internal.args(): yield arg for arg in super(vault_archive, self).get_args(): yield arg def get_options(self): for option in self.api.Command.vault_archive_internal.options(): if option.name not in ('nonce', 'session_key', 'vault_data', 'version'): yield option for option in super(vault_archive, self).get_options(): yield option def get_output_params(self): for param in self.api.Command.vault_archive_internal.output_params(): yield param for param in super(vault_archive, self).get_output_params(): yield param def _iter_output(self): return self.api.Command.vault_archive_internal.output() def _wrap_data(self, algo, json_vault_data): """Encrypt data with wrapped session key and transport cert :param bytes algo: wrapping algorithm instance :param bytes json_vault_data: dumped vault data :return: """ nonce = os.urandom(algo.block_size // 8) # wrap vault_data with session key padder = PKCS7(algo.block_size).padder() padded_data = padder.update(json_vault_data) padded_data += padder.finalize() cipher = Cipher(algo, modes.CBC(nonce), backend=default_backend()) encryptor = cipher.encryptor() wrapped_vault_data = encryptor.update(padded_data) + encryptor.finalize() return nonce, wrapped_vault_data def forward(self, *args, **options): data = options.get('data') input_file = options.get('in') password = options.get('password') password_file = options.get('password_file') override_password = options.pop('override_password', False) # don't send these parameters to server if 'data' in options: del options['data'] if 'in' in options: del options['in'] if 'password' in options: del options['password'] if 'password_file' in options: del options['password_file'] # get data if data and input_file: raise errors.MutuallyExclusiveError( reason=_('Input data specified multiple times')) elif data: if len(data) > MAX_VAULT_DATA_SIZE: raise errors.ValidationError(name="data", error=_( "Size of data exceeds the limit. Current vault data size " "limit is %(limit)d B") % {'limit': MAX_VAULT_DATA_SIZE}) elif input_file: try: stat = os.stat(input_file) except OSError as exc: raise errors.ValidationError(name="in", error=_( "Cannot read file '%(filename)s': %(exc)s") % {'filename': input_file, 'exc': exc.args[1]}) if stat.st_size > MAX_VAULT_DATA_SIZE: raise errors.ValidationError(name="in", error=_( "Size of data exceeds the limit. Current vault data size " "limit is %(limit)d B") % {'limit': MAX_VAULT_DATA_SIZE}) data = validated_read('in', input_file, mode='rb') else: data = b'' if self.api.env.in_server: backend = self.api.Backend.ldap2 else: backend = self.api.Backend.rpcclient if not backend.isconnected(): backend.connect() # retrieve vault info vault = self.api.Command.vault_show(*args, **options)['result'] vault_type = vault['ipavaulttype'][0] if vault_type == u'standard': encrypted_key = None elif vault_type == u'symmetric': # get password if password and password_file: raise errors.MutuallyExclusiveError( reason=_('Password specified multiple times')) elif password: pass elif password_file: password = validated_read('password-file', password_file, encoding='utf-8') password = password.rstrip('\n') else: if override_password: password = self.api.Backend.textui.prompt_password( 'New password') else: password = self.api.Backend.textui.prompt_password( 'Password', confirm=False) if not override_password: # verify password by retrieving existing data opts = options.copy() opts['password'] = password try: self.api.Command.vault_retrieve(*args, **opts) except errors.NotFound: pass salt = vault['ipavaultsalt'][0] # generate encryption key from vault password encryption_key = generate_symmetric_key(password, salt) # encrypt data with encryption key data = encrypt(data, symmetric_key=encryption_key) encrypted_key = None elif vault_type == u'asymmetric': public_key = vault['ipavaultpublickey'][0] # generate encryption key encryption_key = base64.b64encode(os.urandom(32)) # encrypt data with encryption key data = encrypt(data, symmetric_key=encryption_key) # encrypt encryption key with public key encrypted_key = encrypt(encryption_key, public_key=public_key) else: raise errors.ValidationError( name='vault_type', error=_('Invalid vault type')) vault_data = { 'data': base64.b64encode(data).decode('utf-8') } if encrypted_key: vault_data[u'encrypted_key'] = base64.b64encode(encrypted_key)\ .decode('utf-8') json_vault_data = json.dumps(vault_data).encode('utf-8') # generate session key algo = self._generate_session_key() # wrap vault data nonce, wrapped_vault_data = self._wrap_data(algo, json_vault_data) options.update( nonce=nonce, vault_data=wrapped_vault_data ) return self.internal(algo, *args, **options)
class vault_mod(Local): __doc__ = _('Modify a vault.') takes_options = ( Flag( 'change_password?', doc=_('Change password'), ), Str( 'old_password?', cli_name='old_password', doc=_('Old vault password'), ), Str( # TODO: use File parameter 'old_password_file?', cli_name='old_password_file', doc=_('File containing the old vault password'), ), Str( 'new_password?', cli_name='new_password', doc=_('New vault password'), ), Str( # TODO: use File parameter 'new_password_file?', cli_name='new_password_file', doc=_('File containing the new vault password'), ), Bytes( 'private_key?', cli_name='private_key', doc=_('Old vault private key'), ), Str( # TODO: use File parameter 'private_key_file?', cli_name='private_key_file', doc=_('File containing the old vault private key'), ), Str( # TODO: use File parameter 'public_key_file?', cli_name='public_key_file', doc=_('File containing the new vault public key'), ), ) @classmethod def __NO_CLI_getter(cls): return (api.Command.get_plugin('vault_mod_internal') is _fake_vault_mod_internal) NO_CLI = classproperty(__NO_CLI_getter) @property def api_version(self): return self.api.Command.vault_mod_internal.api_version def get_args(self): for arg in self.api.Command.vault_mod_internal.args(): yield arg for arg in super(vault_mod, self).get_args(): yield arg def get_options(self): for option in self.api.Command.vault_mod_internal.options(): if option.name != 'version': yield option for option in super(vault_mod, self).get_options(): yield option def get_output_params(self): for param in self.api.Command.vault_mod_internal.output_params(): yield param for param in super(vault_mod, self).get_output_params(): yield param def _iter_output(self): return self.api.Command.vault_mod_internal.output() def forward(self, *args, **options): vault_type = options.pop('ipavaulttype', False) salt = options.pop('ipavaultsalt', False) change_password = options.pop('change_password', False) old_password = options.pop('old_password', None) old_password_file = options.pop('old_password_file', None) new_password = options.pop('new_password', None) new_password_file = options.pop('new_password_file', None) old_private_key = options.pop('private_key', None) old_private_key_file = options.pop('private_key_file', None) new_public_key = options.pop('ipavaultpublickey', None) new_public_key_file = options.pop('public_key_file', None) if self.api.env.in_server: backend = self.api.Backend.ldap2 else: backend = self.api.Backend.rpcclient if not backend.isconnected(): backend.connect() # determine the vault type based on parameters specified if vault_type: pass elif change_password or new_password or new_password_file or salt: vault_type = u'symmetric' elif new_public_key or new_public_key_file: vault_type = u'asymmetric' # if vault type is specified, retrieve existing secret if vault_type: opts = options.copy() opts.pop('description', None) opts['password'] = old_password opts['password_file'] = old_password_file opts['private_key'] = old_private_key opts['private_key_file'] = old_private_key_file response = self.api.Command.vault_retrieve(*args, **opts) data = response['result']['data'] opts = options.copy() # if vault type is specified, update crypto attributes if vault_type: opts['ipavaulttype'] = vault_type if vault_type == u'standard': opts['ipavaultsalt'] = None opts['ipavaultpublickey'] = None elif vault_type == u'symmetric': if salt: opts['ipavaultsalt'] = salt else: opts['ipavaultsalt'] = os.urandom(16) opts['ipavaultpublickey'] = None elif vault_type == u'asymmetric': # get new vault public key if new_public_key and new_public_key_file: raise errors.MutuallyExclusiveError( reason=_('New public key specified multiple times')) elif new_public_key: pass elif new_public_key_file: new_public_key = validated_read('public_key_file', new_public_key_file, mode='rb') else: raise errors.ValidationError( name='ipavaultpublickey', error=_('Missing new vault public key')) opts['ipavaultsalt'] = None opts['ipavaultpublickey'] = new_public_key response = self.api.Command.vault_mod_internal(*args, **opts) # if vault type is specified, rearchive existing secret if vault_type: opts = options.copy() opts.pop('description', None) opts['data'] = data opts['password'] = new_password opts['password_file'] = new_password_file opts['override_password'] = True self.api.Command.vault_archive(*args, **opts) return response
class automountlocation_import(LDAPQuery): __doc__ = _('Import automount files for a specific location.') takes_args = (Str( 'masterfile', label=_('Master file'), doc=_('Automount master file.'), ), ) takes_options = (Flag( 'continue?', cli_name='continue', doc= _('Continuous operation mode. Errors are reported but the process continues.' ), ), ) def __read_mapfile(self, filename): try: fp = open(filename, 'r') map = fp.readlines() fp.close() except IOError as e: if e.errno == 2: raise errors.NotFound(reason=_('File %(file)s not found') % {'file': filename}) else: raise return map def forward(self, *args, **options): """ The basic idea is to read the master file and create all the maps we need, then read each map file and add all the keys for the map. """ location = self.api.Command['automountlocation_show'](args[0]) result = { 'maps': [], 'keys': [], 'skipped': [], 'duplicatekeys': [], 'duplicatemaps': [] } maps = {} master = self.__read_mapfile(args[1]) for m in master: if m.startswith('#'): continue m = m.rstrip() if m.startswith('+'): result['skipped'].append([m, args[1]]) continue if len(m) == 0: continue am = m.split(None) if len(am) < 2: continue if am[1].startswith('/'): mapfile = am[1].replace('"', '') am[1] = os.path.basename(am[1]) maps[am[1]] = mapfile info = ' '.join(am[1:]) # Add a new key to the auto.master map for the new map file try: api.Command['automountkey_add'](args[0], u'auto.master', automountkey=unicode(am[0]), automountinformation=unicode( ' '.join(am[1:]))) result['keys'].append([am[0], u'auto.master']) except errors.DuplicateEntry as e: if unicode(am[0]) in DEFAULT_KEYS: # ignore conflict when the key was pre-created by the framework pass elif options.get('continue', False): result['duplicatekeys'].append(am[0]) pass else: raise errors.DuplicateEntry( message=_('key %(key)s already exists') % dict(key=am[0])) # Add the new map if not am[1].startswith('-'): try: api.Command['automountmap_add'](args[0], unicode(am[1])) result['maps'].append(am[1]) except errors.DuplicateEntry as e: if unicode(am[1]) in DEFAULT_MAPS: # ignore conflict when the map was pre-created by the framework pass elif options.get('continue', False): result['duplicatemaps'].append(am[0]) pass else: raise errors.DuplicateEntry( message=_('map %(map)s already exists') % dict(map=am[1])) except errors.DuplicateEntry: # This means the same map is used on several mount points. pass # Now iterate over the map files and add the keys. To handle # continuation lines I'll make a pass through it to skip comments # etc and also to combine lines. for m in maps: map = self.__read_mapfile(maps[m]) lines = [] cont = '' for x in map: if x.startswith('#'): continue x = x.rstrip() if x.startswith('+'): result['skipped'].append([m, maps[m]]) continue if len(x) == 0: continue if x.endswith("\\"): cont = cont + x[:-1] + ' ' else: lines.append(cont + x) cont = '' for x in lines: am = x.split(None) key = unicode(am[0].replace('"', '')) try: api.Command['automountkey_add']( args[0], unicode(m), automountkey=key, automountinformation=unicode(' '.join(am[1:]))) result['keys'].append([key, m]) except errors.DuplicateEntry as e: if options.get('continue', False): result['duplicatekeys'].append(am[0]) pass else: raise e return dict(result=result) def output_for_cli(self, textui, result, *keys, **options): maps = result['result']['maps'] keys = result['result']['keys'] duplicatemaps = result['result']['duplicatemaps'] duplicatekeys = result['result']['duplicatekeys'] skipped = result['result']['skipped'] textui.print_plain('Imported maps:') for m in maps: textui.print_plain('Added %s' % m) textui.print_plain('') textui.print_plain('Imported keys:') for k in keys: textui.print_plain('Added %s to %s' % (k[0], k[1])) textui.print_plain('') if len(skipped) > 0: textui.print_plain('Ignored keys:') for k in skipped: textui.print_plain('Ignored %s to %s' % (k[0], k[1])) if options.get('continue', False) and len(duplicatemaps) > 0: textui.print_plain('') textui.print_plain('Duplicate maps skipped:') for m in duplicatemaps: textui.print_plain('Skipped %s' % m) if options.get('continue', False) and len(duplicatekeys) > 0: textui.print_plain('') textui.print_plain('Duplicate keys skipped:') for k in duplicatekeys: textui.print_plain('Skipped %s' % k)
class otptoken_add(LDAPCreate): __doc__ = _('Add a new OTP token.') msg_summary = _('Added OTP token "%(value)s"') takes_options = LDAPCreate.takes_options + ( Flag('qrcode?', label=_('(deprecated)'), flags=('no_option')), Flag('no_qrcode', label=_('Do not display QR code'), default=False), ) def execute(self, ipatokenuniqueid=None, **options): return super(otptoken_add, self).execute(ipatokenuniqueid, **options) def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options): # Fill in a default UUID when not specified. if entry_attrs.get('ipatokenuniqueid', None) is None: entry_attrs['ipatokenuniqueid'] = str(uuid.uuid4()) dn = DN("ipatokenuniqueid=%s" % entry_attrs['ipatokenuniqueid'], dn) if not _check_interval(options.get('ipatokennotbefore', None), options.get('ipatokennotafter', None)): raise ValidationError(name='not_after', error='is before the validity start') # Set the object class and defaults for specific token types options['type'] = options['type'].lower() entry_attrs['objectclass'] = otptoken.object_class + [ 'ipatoken' + options['type'] ] for ttype, tattrs in TOKEN_TYPES.items(): if ttype != options['type']: for tattr in tattrs: if tattr in entry_attrs: del entry_attrs[tattr] # If owner was not specified, default to the person adding this token. # If managedby was not specified, attempt a sensible default. if 'ipatokenowner' not in entry_attrs or 'managedby' not in entry_attrs: cur_dn = DN(self.api.Backend.ldap2.conn.whoami_s()[4:]) if cur_dn: cur_uid = cur_dn[0].value prev_uid = entry_attrs.setdefault('ipatokenowner', cur_uid) if cur_uid == prev_uid: entry_attrs.setdefault('managedby', cur_dn.ldap_text()) # Resolve the owner's dn _normalize_owner(self.api.Object.user, entry_attrs) # Get the issuer for the URI owner = entry_attrs.get('ipatokenowner', None) issuer = api.env.realm if owner is not None: try: issuer = ldap.get_entry( owner, ['krbprincipalname'])['krbprincipalname'][0] except (NotFound, IndexError): pass # Check if key is not empty if entry_attrs['ipatokenotpkey'] is None: raise ValidationError(name='key', error=_(u'cannot be empty')) # Build the URI parameters args = {} args['issuer'] = issuer args['secret'] = base64.b32encode(entry_attrs['ipatokenotpkey']) args['digits'] = entry_attrs['ipatokenotpdigits'] args['algorithm'] = entry_attrs['ipatokenotpalgorithm'].upper() if options['type'] == 'totp': args['period'] = entry_attrs['ipatokentotptimestep'] elif options['type'] == 'hotp': args['counter'] = entry_attrs['ipatokenhotpcounter'] # Build the URI label = urllib.parse.quote(entry_attrs['ipatokenuniqueid']) parameters = urllib.parse.urlencode(args) uri = u'otpauth://%s/%s:%s?%s' % (options['type'], issuer, label, parameters) setattr(context, 'uri', uri) attrs_list.append("objectclass") return dn def post_callback(self, ldap, dn, entry_attrs, *keys, **options): entry_attrs['uri'] = getattr(context, 'uri') _set_token_type(entry_attrs, **options) _convert_owner(self.api.Object.user, entry_attrs, options) return super(otptoken_add, self).post_callback(ldap, dn, entry_attrs, *keys, **options)
class idview_show(LDAPRetrieve): __doc__ = _('Display information about an ID View.') takes_options = LDAPRetrieve.takes_options + ( Flag('show_hosts?', cli_name='show_hosts', doc=_('Enumerate all the hosts the view applies to.'), ), ) has_output_params = global_output_params def show_id_overrides(self, dn, entry_attrs): ldap = self.obj.backend for objectclass, obj_type in [('ipaUserOverride', 'user'), ('ipaGroupOverride', 'group')]: # Attribute to store results is called (user|group)overrides attr_name = obj_type + 'overrides' try: overrides, _truncated = ldap.find_entries( filter="objectclass=%s" % objectclass, attrs_list=['ipaanchoruuid'], base_dn=dn, scope=ldap.SCOPE_ONELEVEL, paged_search=True) resolved_overrides = [] for override in overrides: anchor = override.single_value['ipaanchoruuid'] try: name = resolve_anchor_to_object_name(ldap, obj_type, anchor) resolved_overrides.append(name) except (errors.NotFound, errors.ValidationError): # Anchor could not be resolved, use raw resolved_overrides.append(anchor) entry_attrs[attr_name] = resolved_overrides except errors.NotFound: # No overrides found, nothing to do pass def enumerate_hosts(self, dn, entry_attrs): ldap = self.obj.backend filter_params = { 'ipaAssignedIDView': dn, 'objectClass': 'ipaHost', } try: hosts, _truncated = ldap.find_entries( filter=ldap.make_filter(filter_params, rules=ldap.MATCH_ALL), attrs_list=['cn'], base_dn=api.env.container_host + api.env.basedn, scope=ldap.SCOPE_ONELEVEL, paged_search=True) entry_attrs['appliedtohosts'] = [host.single_value['cn'] for host in hosts] except errors.NotFound: pass def post_callback(self, ldap, dn, entry_attrs, *keys, **options): self.show_id_overrides(dn, entry_attrs) # Enumerating hosts is a potentially expensive operation (uses paged # search to list all the hosts the ID view applies to). Show the list # of the hosts only if explicitly asked for (or asked for --all). # Do not display with --raw, since this attribute does not exist in # LDAP. if ((options.get('show_hosts') or options.get('all')) and not options.get('raw')): self.enumerate_hosts(dn, entry_attrs) return dn
This functionality is primarily used to allow migration from older systems or other Identity Management solutions. """) register = Registry() protected_default_trust_view_error = errors.ProtectedEntryError( label=_('ID View'), key=u"Default Trust View", reason=_('system ID View') ) fallback_to_ldap_option = Flag( 'fallback_to_ldap?', default=False, label=_('Fallback to AD DC LDAP'), doc=_("Allow falling back to AD DC LDAP when resolving AD " "trusted objects. For two-way trusts only."), ) DEFAULT_TRUST_VIEW_NAME = "default trust view" ANCHOR_REGEX = re.compile( r':IPA:.*:[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' r'|' r':SID:S-[0-9\-]+' ) def normalize_idview_name(value): if value in (None, '',):
class migrate_ds(Command): __doc__ = _('Migrate users and groups from DS to IPA.') migrate_objects = { # OBJECT_NAME: (search_filter, pre_callback, post_callback) # # OBJECT_NAME - is the name of an LDAPObject subclass # search_filter - is the filter to retrieve objects from DS # pre_callback - is called for each object just after it was # retrieved from DS and before being added to IPA # post_callback - is called for each object after it was added to IPA # exc_callback - is called when adding entry to IPA raises an exception # # {pre, post}_callback parameters: # ldap - ldap2 instance connected to IPA # pkey - primary key value of the object (uid for users, etc.) # dn - dn of the object as it (will be/is) stored in IPA # entry_attrs - attributes of the object # failed - a list of so-far failed objects # config - IPA config entry attributes # ctx - object context, used to pass data between callbacks # # If pre_callback return value evaluates to False, migration # of the current object is aborted. 'user': { 'filter_template': '(&(|%s)(uid=*))', 'oc_option': 'userobjectclass', 'oc_blocklist_option': 'userignoreobjectclass', 'attr_blocklist_option': 'userignoreattribute', 'pre_callback': _pre_migrate_user, 'post_callback': _post_migrate_user, 'exc_callback': None }, 'group': { 'filter_template': '(&(|%s)(cn=*))', 'oc_option': 'groupobjectclass', 'oc_blocklist_option': 'groupignoreobjectclass', 'attr_blocklist_option': 'groupignoreattribute', 'pre_callback': _pre_migrate_group, 'post_callback': None, 'exc_callback': _group_exc_callback, }, } migrate_order = ('user', 'group') takes_args = ( Str( 'ldapuri', validate_ldapuri, cli_name='ldap_uri', label=_('LDAP URI'), doc=_('LDAP URI of DS server to migrate from'), ), Password( 'bindpw', cli_name='password', label=_('Password'), confirm=False, doc=_('bind password'), ), ) takes_options = ( DNParam('binddn?', cli_name='bind_dn', label=_('Bind DN'), default=DN(('cn', 'directory manager')), autofill=True, ), DNParam('usercontainer', cli_name='user_container', label=_('User container'), doc=_('DN of container for users in DS relative to base DN'), default=DN(('ou', 'people')), autofill=True, ), DNParam('groupcontainer', cli_name='group_container', label=_('Group container'), doc=_('DN of container for groups in DS relative to base DN'), default=DN(('ou', 'groups')), autofill=True, ), Str('userobjectclass+', cli_name='user_objectclass', label=_('User object class'), doc=_('Objectclasses used to search for user entries in DS'), default=(u'person',), autofill=True, ), Str('groupobjectclass+', cli_name='group_objectclass', label=_('Group object class'), doc=_('Objectclasses used to search for group entries in DS'), default=(u'groupOfUniqueNames', u'groupOfNames'), autofill=True, ), Str('userignoreobjectclass*', cli_name='user_ignore_objectclass', label=_('Ignore user object class'), doc=_('Objectclasses to be ignored for user entries in DS'), default=tuple(), autofill=True, ), Str('userignoreattribute*', cli_name='user_ignore_attribute', label=_('Ignore user attribute'), doc=_('Attributes to be ignored for user entries in DS'), default=tuple(), autofill=True, ), Str('groupignoreobjectclass*', cli_name='group_ignore_objectclass', label=_('Ignore group object class'), doc=_('Objectclasses to be ignored for group entries in DS'), default=tuple(), autofill=True, ), Str('groupignoreattribute*', cli_name='group_ignore_attribute', label=_('Ignore group attribute'), doc=_('Attributes to be ignored for group entries in DS'), default=tuple(), autofill=True, ), Flag('groupoverwritegid', cli_name='group_overwrite_gid', label=_('Overwrite GID'), doc=_('When migrating a group already existing in IPA domain overwrite the '\ 'group GID and report as success'), ), StrEnum('schema?', cli_name='schema', label=_('LDAP schema'), doc=_('The schema used on the LDAP server. Supported values are RFC2307 and RFC2307bis. The default is RFC2307bis'), values=_supported_schemas, default=_supported_schemas[0], autofill=True, ), Flag('continue?', label=_('Continue'), doc=_('Continuous operation mode. Errors are reported but the process continues'), default=False, ), DNParam('basedn?', cli_name='base_dn', label=_('Base DN'), doc=_('Base DN on remote LDAP server'), ), Flag('compat?', cli_name='with_compat', label=_('Ignore compat plugin'), doc=_('Allows migration despite the usage of compat plugin'), default=False, ), Str('cacertfile?', cli_name='ca_cert_file', label=_('CA certificate'), doc=_('Load CA certificate of LDAP server from FILE'), default=None, noextrawhitespace=False, ), Bool('use_def_group?', cli_name='use_default_group', label=_('Add to default group'), doc=_('Add migrated users without a group to a default group ' '(default: true)'), default=True, autofill=True, ), StrEnum('scope', cli_name='scope', label=_('Search scope'), doc=_('LDAP search scope for users and groups: base, ' 'onelevel, or subtree. Defaults to onelevel'), values=sorted(_supported_scopes), default=_default_scope, autofill=True, ), ) has_output = ( output.Output( 'result', type=dict, doc=_('Lists of objects migrated; categorized by type.'), ), output.Output( 'failed', type=dict, doc= _('Lists of objects that could not be migrated; categorized by type.' ), ), output.Output( 'enabled', type=bool, doc=_('False if migration mode was disabled.'), ), output.Output( 'compat', type=bool, doc= _('False if migration fails because the compatibility plug-in is enabled.' ), ), ) exclude_doc = _('%s to exclude from migration') truncated_err_msg = _('''\ search results for objects to be migrated have been truncated by the server; migration process might be incomplete\n''') def get_options(self): """ Call get_options of the baseclass and add "exclude" options for each type of object being migrated. """ for option in super(migrate_ds, self).get_options(): yield option for ldap_obj_name in self.migrate_objects: ldap_obj = self.api.Object[ldap_obj_name] name = 'exclude_%ss' % to_cli(ldap_obj_name) doc = self.exclude_doc % ldap_obj.object_name_plural yield Str('%s*' % name, cli_name=name, doc=doc, default=tuple(), autofill=True) def normalize_options(self, options): """ Convert all "exclude" option values to lower-case. Also, empty List parameters are converted to None, but the migration plugin doesn't like that - convert back to empty lists. """ names = [ 'userobjectclass', 'groupobjectclass', 'userignoreobjectclass', 'userignoreattribute', 'groupignoreobjectclass', 'groupignoreattribute' ] names.extend('exclude_%ss' % to_cli(n) for n in self.migrate_objects) for name in names: if options[name]: options[name] = tuple(v.lower() for v in options[name]) else: options[name] = tuple() def _get_search_bases(self, options, ds_base_dn, migrate_order): search_bases = dict() for ldap_obj_name in migrate_order: container = options.get('%scontainer' % to_cli(ldap_obj_name)) if container: # Don't append base dn if user already appended it in the container dn if container.endswith(ds_base_dn): search_base = container else: search_base = DN(container, ds_base_dn) else: search_base = ds_base_dn search_bases[ldap_obj_name] = search_base return search_bases def migrate(self, ldap, config, ds_ldap, ds_base_dn, options): """ Migrate objects from DS to LDAP. """ assert isinstance(ds_base_dn, DN) migrated = {} # {'OBJ': ['PKEY1', 'PKEY2', ...], ...} failed = {} # {'OBJ': {'PKEY1': 'Failed 'cos blabla', ...}, ...} search_bases = self._get_search_bases(options, ds_base_dn, self.migrate_order) migration_start = datetime.datetime.now() scope = _supported_scopes[options.get('scope')] for ldap_obj_name in self.migrate_order: ldap_obj = self.api.Object[ldap_obj_name] template = self.migrate_objects[ldap_obj_name]['filter_template'] oc_list = options[to_cli( self.migrate_objects[ldap_obj_name]['oc_option'])] search_filter = construct_filter(template, oc_list) exclude = options['exclude_%ss' % to_cli(ldap_obj_name)] context = dict(ds_ldap=ds_ldap) migrated[ldap_obj_name] = [] failed[ldap_obj_name] = {} try: entries, truncated = ds_ldap.find_entries( search_filter, ['*'], search_bases[ldap_obj_name], scope, time_limit=0, size_limit=-1) except errors.NotFound: if not options.get('continue', False): raise errors.NotFound( reason= _('%(container)s LDAP search did not return any result ' '(search base: %(search_base)s, ' 'objectclass: %(objectclass)s)') % { 'container': ldap_obj_name, 'search_base': search_bases[ldap_obj_name], 'objectclass': ', '.join(oc_list) }) else: truncated = False entries = [] if truncated: logger.error('%s: %s', ldap_obj.name, self.truncated_err_msg) blocklists = {} for blocklist in ('oc_blocklist', 'attr_blocklist'): blocklist_option = ( self.migrate_objects[ldap_obj_name][blocklist + '_option']) if blocklist_option is not None: blocklists[blocklist] = options.get( blocklist_option, tuple()) else: blocklists[blocklist] = tuple() # get default primary group for new users if 'def_group_dn' not in context and options.get('use_def_group'): def_group = config.get('ipadefaultprimarygroup') context['def_group_dn'] = api.Object.group.get_dn(def_group) try: ldap.get_entry(context['def_group_dn'], ['gidnumber', 'cn']) except errors.NotFound: error_msg = _('Default group for new users not found') raise errors.NotFound(reason=error_msg) context['has_upg'] = ldap.has_upg() valid_gids = set() invalid_gids = set() migrate_cnt = 0 context['migrate_cnt'] = 0 for entry_attrs in entries: context['migrate_cnt'] = migrate_cnt s = datetime.datetime.now() ava = entry_attrs.dn[0][0] if ava.attr == ldap_obj.primary_key.name: # In case if pkey attribute is in the migrated object DN # and the original LDAP is multivalued, make sure that # we pick the correct value (the unique one stored in DN) pkey = ava.value.lower() else: pkey = entry_attrs[ldap_obj.primary_key.name][0].lower() if pkey in exclude: continue entry_attrs.dn = ldap_obj.get_dn(pkey) entry_attrs['objectclass'] = list( set( config.get(ldap_obj.object_class_config, ldap_obj.object_class) + [o.lower() for o in entry_attrs['objectclass']])) entry_attrs[ldap_obj.primary_key.name][0] = entry_attrs[ ldap_obj.primary_key.name][0].lower() callback = self.migrate_objects[ldap_obj_name]['pre_callback'] if callable(callback): try: entry_attrs.dn = callback(ldap, pkey, entry_attrs.dn, entry_attrs, failed[ldap_obj_name], config, context, schema=options['schema'], search_bases=search_bases, valid_gids=valid_gids, invalid_gids=invalid_gids, **blocklists) if not entry_attrs.dn: continue except errors.NotFound as e: failed[ldap_obj_name][pkey] = unicode(e.reason) continue try: ldap.add_entry(entry_attrs) except errors.ExecutionError as e: callback = self.migrate_objects[ldap_obj_name][ 'exc_callback'] if callable(callback): try: callback(ldap, entry_attrs.dn, entry_attrs, e, options) except errors.ExecutionError as e2: failed[ldap_obj_name][pkey] = unicode(e2) continue else: failed[ldap_obj_name][pkey] = unicode(e) continue migrated[ldap_obj_name].append(pkey) callback = self.migrate_objects[ldap_obj_name]['post_callback'] if callable(callback): callback(ldap, pkey, entry_attrs.dn, entry_attrs, failed[ldap_obj_name], config, context) e = datetime.datetime.now() d = e - s total_dur = e - migration_start migrate_cnt += 1 if migrate_cnt > 0 and migrate_cnt % 100 == 0: logger.info("%d %ss migrated. %s elapsed.", migrate_cnt, ldap_obj_name, total_dur) logger.debug("%d %ss migrated, duration: %s (total %s)", migrate_cnt, ldap_obj_name, d, total_dur) if 'def_group_dn' in context: _update_default_group(ldap, context, True) return (migrated, failed) def execute(self, ldapuri, bindpw, **options): ldap = self.api.Backend.ldap2 self.normalize_options(options) config = ldap.get_ipa_config() ds_base_dn = options.get('basedn') if ds_base_dn is not None: assert isinstance(ds_base_dn, DN) # check if migration mode is enabled if config.get('ipamigrationenabled', ('FALSE', ))[0] == 'FALSE': return dict(result={}, failed={}, enabled=False, compat=True) # connect to DS if options.get('cacertfile') is not None: # store CA cert into file tmp_ca_cert_f = write_tmp_file(options['cacertfile']) cacert = tmp_ca_cert_f.name # start TLS connection or STARTTLS ds_ldap = LDAPClient(ldapuri, cacert=cacert, start_tls=True) ds_ldap.simple_bind(options['binddn'], bindpw) tmp_ca_cert_f.close() else: ds_ldap = LDAPClient(ldapuri) ds_ldap.simple_bind(options['binddn'], bindpw, insecure_bind=True) # check whether the compat plugin is enabled if not options.get('compat'): try: ldap.get_entry( DN(('cn', 'users'), ('cn', 'compat'), (api.env.basedn))) return dict(result={}, failed={}, enabled=True, compat=False) except errors.NotFound: pass if not ds_base_dn: # retrieve base DN from remote LDAP server entries, _truncated = ds_ldap.find_entries( '', ['namingcontexts', 'defaultnamingcontext'], DN(''), ds_ldap.SCOPE_BASE, size_limit=-1, time_limit=0, ) if 'defaultnamingcontext' in entries[0]: ds_base_dn = DN(entries[0]['defaultnamingcontext'][0]) assert isinstance(ds_base_dn, DN) else: try: ds_base_dn = DN(entries[0]['namingcontexts'][0]) assert isinstance(ds_base_dn, DN) except (IndexError, KeyError) as e: raise Exception(str(e)) # migrate! (migrated, failed) = self.migrate(ldap, config, ds_ldap, ds_base_dn, options) return dict(result=migrated, failed=failed, enabled=True, compat=True)
class service_add(LDAPCreate): __doc__ = _('Add a new IPA service.') msg_summary = _('Added service "%(value)s"') member_attributes = ['managedby'] has_output_params = LDAPCreate.has_output_params + output_params takes_options = LDAPCreate.takes_options + ( Flag('force', label=_('Force'), doc=_('force principal name even if host not in DNS'), ), Flag('skip_host_check', label=_('Skip host check'), doc=_('force service to be created even when host ' 'object does not exist to manage it'), ), ) def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options): assert isinstance(dn, DN) principal = keys[-1] hostname = principal.hostname if principal.is_host and not options['force']: raise errors.HostService() if not options['skip_host_check']: try: hostresult = self.api.Command['host_show'](hostname)['result'] except errors.NotFound: raise errors.NotFound(reason=_( "The host '%s' does not exist to add a service to.") % hostname) self.obj.validate_ipakrbauthzdata(entry_attrs) if not options.get('force', False): # We know the host exists if we've gotten this far but we # really want to discourage creating services for hosts that # don't exist in DNS. util.verify_host_resolvable(hostname) if not (options['skip_host_check'] or 'managedby' in entry_attrs): entry_attrs['managedby'] = hostresult['dn'] # Enforce ipaKrbPrincipalAlias to aid case-insensitive searches # as krbPrincipalName/krbCanonicalName are case-sensitive in Kerberos # schema entry_attrs['ipakrbprincipalalias'] = keys[-1] # Objectclass ipakrbprincipal providing ipakrbprincipalalias is not in # in a list of default objectclasses, add it manually entry_attrs['objectclass'].append('ipakrbprincipal') # set krbcanonicalname attribute to enable principal canonicalization util.set_krbcanonicalname(entry_attrs) update_krbticketflags(ldap, entry_attrs, attrs_list, options, False) return dn def post_callback(self, ldap, dn, entry_attrs, *keys, **options): set_kerberos_attrs(entry_attrs, options) rename_ipaallowedtoperform_from_ldap(entry_attrs, options) self.obj.populate_krbcanonicalname(entry_attrs, options) return dn
""") + _(""" Allow user to create a keytab: ipa service-allow-create-keytab HTTP/web.example.com --users=tuser1 """) + _(""" Generate and retrieve a keytab for an IPA service: ipa-getkeytab -s ipa.example.com -p HTTP/web.example.com -k /etc/httpd/httpd.keytab """) logger = logging.getLogger(__name__) register = Registry() output_params = ( Flag('has_keytab', label=_('Keytab'), ), Str('managedby_host', label='Managed by', ), Str('ipaallowedtoperform_read_keys_user', label=_('Users allowed to retrieve keytab'), ), Str('ipaallowedtoperform_read_keys_group', label=_('Groups allowed to retrieve keytab'), ), Str('ipaallowedtoperform_read_keys_host', label=_('Hosts allowed to retrieve keytab'), ), Str('ipaallowedtoperform_read_keys_hostgroup', label=_('Host Groups allowed to retrieve keytab'),