示例#1
0
    def convert_members_rfc2307bis(member_attr, search_bases, overwrite=False):
        """
        Convert DNs in member attributes to work in IPA.
        """
        new_members = []
        entry_attrs.setdefault(member_attr, [])
        for m in entry_attrs[member_attr]:
            try:
                m = DN(m)
            except ValueError, e:
                # This should be impossible unless the remote server
                # doesn't enforce syntax checking.
                api.log.error('Malformed DN %s: %s'  % (m, e))
                continue
            try:
                rdnval = m[0].value
            except IndexError:
                api.log.error('Malformed DN %s has no RDN?' % m)
                continue

            if m.endswith(search_bases['user']):
                api.log.debug('migrating %s user %s', member_attr, m)
                m = DN((api.Object.user.primary_key.name, rdnval),
                       api.env.container_user, api.env.basedn)
            elif m.endswith(search_bases['group']):
                api.log.debug('migrating %s group %s', member_attr, m)
                m = DN((api.Object.group.primary_key.name, rdnval),
                       api.env.container_group, api.env.basedn)
            else:
                api.log.error('entry %s does not belong into any known container' % m)
                continue

            new_members.append(m)
示例#2
0
 def dn2zone_name(self, dn):
     """cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
     # verify that metadata object is under DNS sub-tree
     dn = DN(dn)
     container = DN(self.api.env.container_dns, self.api.env.basedn)
     idx = dn.rfind(container)
     assert idx != -1, "Metadata object %s is not inside %s" % (dn, container)
     assert len(dn[idx - 1]) == 1, "Multi-valued RDN as zone name is not supported"
     return dns.name.from_text(dn[idx - 1]["idnsname"])
示例#3
0
文件: host.py 项目: rkuska/freeipa
    def suppress_netgroup_memberof(self, ldap, entry_attrs):
        """
        We don't want to show managed netgroups so remove them from the
        memberofindirect list.
        """
        ng_container = DN(api.env.container_netgroup, api.env.basedn)
        for member in list(entry_attrs.get('memberofindirect', [])):
            memberdn = DN(member)
            if not memberdn.endswith(ng_container):
                continue

            filter = ldap.make_filter({'objectclass': 'mepmanagedentry'})
            try:
                ldap.get_entries(memberdn, ldap.SCOPE_BASE, filter, [''])
            except errors.NotFound:
                pass
            else:
                entry_attrs['memberofindirect'].remove(member)
示例#4
0
文件: whoami.py 项目: encukou/freeipa
    def execute(self, **options):
        """
        Retrieve the DN we are authenticated as to LDAP and find bindable IPA
        object that handles the container where this DN belongs to. Then report
        details about this object.
        """
        exceptions = {
                'idoverrideuser': (DN("cn={0}".format(DEFAULT_TRUST_VIEW_NAME)),
                                   DEFAULT_TRUST_VIEW_NAME, 'ipaOriginalUid'),
        }
        ldap = api.Backend.ldap2

        # whoami_s() call returns a string 'dn: <actual DN value>'
        # We also reject ldapi-as-root connections as DM is a virtual object
        dn = DN(ldap.conn.whoami_s()[4:])
        if dn == DN('cn=Directory Manager'):
            raise errors.NotFound(
                    reason=_('Cannot query Directory Manager with API'))

        entry = ldap.get_entry(dn)
        o_name = None
        o_func = None
        o_args = []
        for o in api.Object():
            if not getattr(o, 'bindable', None):
                continue
            container = getattr(o, 'container_dn', None)
            if container is None:
                continue
            # Adjust container for exception two-level objects
            if o.name in exceptions:
                container = exceptions[o.name][0] + container
            if dn.find(container + api.env.basedn) == 1:
                # We found exact container this DN belongs to
                o_name = unicode(o.name)
                o_args = [unicode(entry.single_value.get(o.primary_key.name))]
                o_func = unicode(o.methods.show.full_name)
                if o.name in exceptions:
                    o_args = [unicode(exceptions[o.name][1]),
                              unicode(entry.single_value.get(
                                      exceptions[o.name][2]))]
                break

        return {'object': o_name, 'command': o_func, 'arguments': o_args}
示例#5
0
    def setUp(self):
        # ava1 must sort before ava2
        self.attr1    = 'cn'
        self.value1   = u'Bob'
        self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
        self.ava1     = AVA(self.attr1, self.value1)

        self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
        self.rdn1     = RDN((self.attr1, self.value1))

        self.attr2    = 'ou'
        self.value2   = u'people'
        self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
        self.ava2     = AVA(self.attr2, self.value2)

        self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
        self.rdn2     = RDN((self.attr2, self.value2))

        self.str_dn1 = self.str_rdn1
        self.dn1 = DN(self.rdn1)

        self.str_dn2 = self.str_rdn2
        self.dn2 = DN(self.rdn2)

        self.str_dn3 = '%s,%s' % (self.str_rdn1, self.str_rdn2)
        self.dn3 = DN(self.rdn1, self.rdn2)

        self.base_rdn1 = RDN(('dc', 'redhat'))
        self.base_rdn2 = RDN(('dc', 'com'))
        self.base_dn = DN(self.base_rdn1, self.base_rdn2)

        self.container_rdn1 = RDN(('cn', 'sudorules'))
        self.container_rdn2 = RDN(('cn', 'sudo'))
        self.container_dn = DN(self.container_rdn1, self.container_rdn2)

        self.base_container_dn = DN((self.attr1, self.value1),
                                    self.container_dn, self.base_dn)

        self.x500name = x509.Name([
            x509.NameAttribute(
                x509.NameOID.ORGANIZATIONAL_UNIT_NAME, self.value2),
            x509.NameAttribute(x509.NameOID.COMMON_NAME, self.value1),
        ])
示例#6
0
    def test_x500_text(self):
        # null DN x500 ordering and LDAP ordering are the same
        nulldn = DN()
        self.assertEqual(nulldn.ldap_text(), nulldn.x500_text())

        # reverse a DN with a single RDN
        self.assertEqual(self.dn1.ldap_text(), self.dn1.x500_text())

        # reverse a DN with 2 RDNs
        dn3_x500 = self.dn3.x500_text()
        dn3_rev = DN(self.rdn2, self.rdn1)
        self.assertEqual(dn3_rev.ldap_text(), dn3_x500)

        # reverse a longer DN
        longdn_x500 = self.base_container_dn.x500_text()
        longdn_rev = DN(longdn_x500)
        l = len(self.base_container_dn)
        for i in range(l):
            self.assertEqual(longdn_rev[i], self.base_container_dn[l-1-i])
示例#7
0
    def __check_replica(self):
        try:
            cifs_services = DN(api.env.container_service, self.suffix)
            # Search for cifs services which also belong to adtrust agents, these are our DCs
            res = self.admin_conn.get_entries(cifs_services,
                ldap.SCOPE_ONELEVEL,
                "(&(krbprincipalname=cifs/*@%s)(memberof=%s))" % (self.realm, str(self.smb_dn)))
            if len(res) > 1:
                # there are other CIFS services defined, we are not alone
                for entry in res:
                    managedBy = entry.single_value.get('managedBy')
                    if managedBy:
                        fqdn = DN(managedBy)['fqdn']
                        if fqdn != unicode(self.fqdn):
                            # this is CIFS service of a different host in our
                            # REALM, we need to remember it to announce via
                            # SRV records for _msdcs
                            self.cifs_hosts.append(fqdn.split(".")[0])

        except Exception as e:
            root_logger.critical("Checking replicas for cifs principals failed with error '%s'" % e)
示例#8
0
文件: aci.py 项目: msrb/freeipa
    def execute(self, term, **kw):
        ldap = self.api.Backend.ldap2

        entry = ldap.get_entry(self.api.env.basedn, ["aci"])

        acis = _convert_strings_to_acis(entry.get("aci", []))
        results = []

        if term:
            term = term.lower()
            for a in acis:
                if a.name.lower().find(term) != -1 and a not in results:
                    results.append(a)
            acis = list(results)
        else:
            results = list(acis)

        if kw.get("aciname"):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if name != kw["aciname"]:
                    results.remove(a)
            acis = list(results)

        if kw.get("aciprefix"):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if prefix != kw["aciprefix"]:
                    results.remove(a)
            acis = list(results)

        if kw.get("attrs"):
            for a in acis:
                if not "targetattr" in a.target:
                    results.remove(a)
                    continue
                alist1 = sorted([t.lower() for t in a.target["targetattr"]["expression"]])
                alist2 = sorted([t.lower() for t in kw["attrs"]])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get("permission"):
            try:
                self.api.Command["permission_show"](kw["permission"])
            except errors.NotFound:
                pass
            else:
                for a in acis:
                    uri = "ldap:///%s" % entry.dn
                    if a.bindrule["expression"] != uri:
                        results.remove(a)
                acis = list(results)

        if kw.get("permissions"):
            for a in acis:
                alist1 = sorted(a.permissions)
                alist2 = sorted(kw["permissions"])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get("memberof"):
            try:
                dn = _group_from_memberof(kw["memberof"])
            except errors.NotFound:
                pass
            else:
                memberof_filter = "(memberOf=%s)" % dn
                for a in acis:
                    if "targetfilter" in a.target:
                        targetfilter = a.target["targetfilter"]["expression"]
                        if targetfilter != memberof_filter:
                            results.remove(a)
                    else:
                        results.remove(a)

        if kw.get("type"):
            for a in acis:
                if "target" in a.target:
                    target = a.target["target"]["expression"]
                else:
                    results.remove(a)
                    continue
                found = False
                for k in _type_map.keys():
                    if _type_map[k] == target and kw["type"] == k:
                        found = True
                        break
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get("selfaci", False) is True:
            for a in acis:
                if a.bindrule["expression"] != u"ldap:///self":
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get("group"):
            for a in acis:
                groupdn = a.bindrule["expression"]
                groupdn = DN(groupdn.replace("ldap:///", ""))
                try:
                    cn = groupdn[0]["cn"]
                except (IndexError, KeyError):
                    cn = None
                if cn is None or cn != kw["group"]:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get("targetgroup"):
            for a in acis:
                found = False
                if "target" in a.target:
                    target = a.target["target"]["expression"]
                    targetdn = DN(target.replace("ldap:///", ""))
                    group_container_dn = DN(api.env.container_group, api.env.basedn)
                    if targetdn.endswith(group_container_dn):
                        try:
                            cn = targetdn[0]["cn"]
                        except (IndexError, KeyError):
                            cn = None
                        if cn == kw["targetgroup"]:
                            found = True
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get("filter"):
            if not kw["filter"].startswith("("):
                kw["filter"] = unicode("(" + kw["filter"] + ")")
            for a in acis:
                if (
                    "targetfilter" not in a.target
                    or not a.target["targetfilter"]["expression"]
                    or a.target["targetfilter"]["expression"] != kw["filter"]
                ):
                    results.remove(a)

        if kw.get("subtree"):
            for a in acis:
                if "target" in a.target:
                    target = a.target["target"]["expression"]
                else:
                    results.remove(a)
                    continue
                if kw["subtree"].lower() != target.lower():
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        acis = []
        for result in results:
            if kw.get("raw", False):
                aci = dict(aci=unicode(result))
            else:
                aci = _aci_to_kw(ldap, result, pkey_only=kw.get("pkey_only", False))
            acis.append(aci)

        return dict(result=acis, count=len(acis), truncated=False)
示例#9
0
def get_user_dn(uid):
    return DN(('uid', uid), api.env.container_user, api.env.basedn)
示例#10
0
def promote_check(installer):
    options = installer
    installer._enrollment_performed = False
    installer._top_dir = tempfile.mkdtemp("ipa")

    # check selinux status, http and DS ports, NTP conflicting services
    common_check(options.no_ntp)

    client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
    if not client_fstore.has_files():
        ensure_enrolled(installer)
    else:
        if (options.domain_name or options.server or options.realm_name or
                options.host_name or options.password or options.keytab):
            print("IPA client is already configured on this system, ignoring "
                  "the --domain, --server, --realm, --hostname, --password "
                  "and --keytab options.")

        # The NTP configuration can not be touched on pre-installed client:
        if options.no_ntp or options.ntp_servers or options.ntp_pool:
                raise ScriptError(
                    "NTP configuration cannot be updated during promotion")

    sstore = sysrestore.StateFile(paths.SYSRESTORE)

    fstore = sysrestore.FileStore(paths.SYSRESTORE)

    env = Env()
    env._bootstrap(context='installer', confdir=paths.ETC_IPA, log=None)
    env._finalize_core(**dict(constants.DEFAULT_CONFIG))

    # pylint: disable=no-member
    xmlrpc_uri = 'https://{}/ipa/xml'.format(ipautil.format_netloc(env.host))
    api.bootstrap(in_server=True,
                  context='installer',
                  confdir=paths.ETC_IPA,
                  ldap_uri=installutils.realm_to_ldapi_uri(env.realm),
                  xmlrpc_uri=xmlrpc_uri)
    # pylint: enable=no-member
    api.finalize()

    config = ReplicaConfig()
    config.realm_name = api.env.realm
    config.host_name = api.env.host
    config.domain_name = api.env.domain
    config.master_host_name = api.env.server
    config.ca_host_name = api.env.ca_host
    config.kra_host_name = config.ca_host_name
    config.ca_ds_port = 389
    config.setup_ca = options.setup_ca
    config.setup_kra = options.setup_kra
    config.dir = installer._top_dir
    config.basedn = api.env.basedn

    http_pkcs12_file = None
    http_pkcs12_info = None
    http_ca_cert = None
    dirsrv_pkcs12_file = None
    dirsrv_pkcs12_info = None
    dirsrv_ca_cert = None
    pkinit_pkcs12_file = None
    pkinit_pkcs12_info = None
    pkinit_ca_cert = None

    if options.http_cert_files:
        if options.http_pin is None:
            options.http_pin = installutils.read_password(
                "Enter Apache Server private key unlock",
                confirm=False, validate=False, retry=False)
            if options.http_pin is None:
                raise ScriptError(
                    "Apache Server private key unlock password required")
        http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
            cert_files=options.http_cert_files,
            key_password=options.http_pin,
            key_nickname=options.http_cert_name,
            ca_cert_files=options.ca_cert_files,
            host_name=config.host_name)
        http_pkcs12_info = (http_pkcs12_file.name, http_pin)

    if options.dirsrv_cert_files:
        if options.dirsrv_pin is None:
            options.dirsrv_pin = installutils.read_password(
                "Enter Directory Server private key unlock",
                confirm=False, validate=False, retry=False)
            if options.dirsrv_pin is None:
                raise ScriptError(
                    "Directory Server private key unlock password required")
        dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
            cert_files=options.dirsrv_cert_files,
            key_password=options.dirsrv_pin,
            key_nickname=options.dirsrv_cert_name,
            ca_cert_files=options.ca_cert_files,
            host_name=config.host_name)
        dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)

    if options.pkinit_cert_files:
        if options.pkinit_pin is None:
            options.pkinit_pin = installutils.read_password(
                "Enter Kerberos KDC private key unlock",
                confirm=False, validate=False, retry=False)
            if options.pkinit_pin is None:
                raise ScriptError(
                    "Kerberos KDC private key unlock password required")
        pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
            cert_files=options.pkinit_cert_files,
            key_password=options.pkinit_pin,
            key_nickname=options.pkinit_cert_name,
            ca_cert_files=options.ca_cert_files,
            realm_name=config.realm_name)
        pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)

    if (options.http_cert_files and options.dirsrv_cert_files and
            http_ca_cert != dirsrv_ca_cert):
        raise RuntimeError("Apache Server SSL certificate and Directory "
                           "Server SSL certificate are not signed by the same"
                           " CA certificate")

    if (options.http_cert_files and
            options.pkinit_cert_files and
            http_ca_cert != pkinit_ca_cert):
        raise RuntimeError("Apache Server SSL certificate and PKINIT KDC "
                           "certificate are not signed by the same CA "
                           "certificate")

    installutils.verify_fqdn(config.host_name, options.no_host_dns)
    installutils.verify_fqdn(config.master_host_name, options.no_host_dns)

    ccache = os.environ['KRB5CCNAME']
    kinit_keytab('host/{env.host}@{env.realm}'.format(env=api.env),
                 paths.KRB5_KEYTAB,
                 ccache)

    cafile = paths.IPA_CA_CRT
    if not os.path.isfile(cafile):
        raise RuntimeError("CA cert file is not available! Please reinstall"
                           "the client and try again.")

    ldapuri = 'ldaps://%s' % ipautil.format_netloc(config.master_host_name)
    xmlrpc_uri = 'https://{}/ipa/xml'.format(
        ipautil.format_netloc(config.master_host_name))
    remote_api = create_api(mode=None)
    remote_api.bootstrap(in_server=True,
                         context='installer',
                         confdir=paths.ETC_IPA,
                         ldap_uri=ldapuri,
                         xmlrpc_uri=xmlrpc_uri)
    remote_api.finalize()
    installer._remote_api = remote_api

    with rpc_client(remote_api) as client:
        check_remote_version(client, parse_version(api.env.version))
        check_remote_fips_mode(client, api.env.fips_mode)

    conn = remote_api.Backend.ldap2
    replman = None
    try:
        # Try out authentication
        conn.connect(ccache=ccache)
        replman = ReplicationManager(config.realm_name,
                                     config.master_host_name, None)

        promotion_check_ipa_domain(conn, remote_api.env.basedn)

        # Make sure that domain fulfills minimal domain level
        # requirement
        domain_level = current_domain_level(remote_api)
        check_domain_level_is_supported(domain_level)
        if domain_level < constants.MIN_DOMAIN_LEVEL:
            raise RuntimeError(
                "Cannot promote this client to a replica. The domain level "
                "must be raised to {mindomainlevel} before the replica can be "
                "installed".format(
                    mindomainlevel=constants.MIN_DOMAIN_LEVEL
                ))

        # Check authorization
        result = remote_api.Command['hostgroup_find'](
            cn=u'ipaservers',
            host=[unicode(api.env.host)]
        )['result']
        add_to_ipaservers = not result

        if add_to_ipaservers:
            if options.password and not options.admin_password:
                raise errors.ACIError(info="Not authorized")

            if installer._ccache is None:
                del os.environ['KRB5CCNAME']
            else:
                os.environ['KRB5CCNAME'] = installer._ccache

            try:
                installutils.check_creds(options, config.realm_name)
                installer._ccache = os.environ.get('KRB5CCNAME')
            finally:
                os.environ['KRB5CCNAME'] = ccache

            conn.disconnect()
            conn.connect(ccache=installer._ccache)

            try:
                result = remote_api.Command['hostgroup_show'](
                    u'ipaservers',
                    all=True,
                    rights=True
                )['result']

                if 'w' not in result['attributelevelrights']['member']:
                    raise errors.ACIError(info="Not authorized")
            finally:
                conn.disconnect()
                conn.connect(ccache=ccache)


        # Check that we don't already have a replication agreement
        if replman.get_replication_agreement(config.host_name):
            msg = ("A replication agreement for this host already exists. "
                   "It needs to be removed.\n"
                   "Run this command:\n"
                   "    %% ipa-replica-manage del {host} --force"
                   .format(host=config.host_name))
            raise ScriptError(msg, rval=3)

        # Detect if the other master can handle replication managers
        # cn=replication managers,cn=sysaccounts,cn=etc,$SUFFIX
        dn = DN(('cn', 'replication managers'), ('cn', 'sysaccounts'),
                ('cn', 'etc'), ipautil.realm_to_suffix(config.realm_name))
        try:
            conn.get_entry(dn)
        except errors.NotFound:
            msg = ("The Replication Managers group is not available in "
                   "the domain. Replica promotion requires the use of "
                   "Replication Managers to be able to replicate data. "
                   "Upgrade the peer master or use the ipa-replica-prepare "
                   "command on the master and use a prep file to install "
                   "this replica.")
            logger.error("%s", msg)
            raise ScriptError(rval=3)

        dns_masters = remote_api.Object['dnsrecord'].get_dns_masters()
        if dns_masters:
            if not options.no_host_dns:
                logger.debug('Check forward/reverse DNS resolution')
                resolution_ok = (
                    check_dns_resolution(config.master_host_name,
                                         dns_masters) and
                    check_dns_resolution(config.host_name, dns_masters))
                if not resolution_ok and installer.interactive:
                    if not ipautil.user_input("Continue?", False):
                        raise ScriptError(rval=0)
        else:
            logger.debug('No IPA DNS servers, '
                         'skipping forward/reverse resolution check')

        entry_attrs = conn.get_ipa_config()
        subject_base = entry_attrs.get('ipacertificatesubjectbase', [None])[0]
        if subject_base is not None:
            config.subject_base = DN(subject_base)

        # Find if any server has a CA
        ca_host = service.find_providing_server(
                'CA', conn, config.ca_host_name)
        if ca_host is not None:
            config.ca_host_name = ca_host
            ca_enabled = True
            if options.dirsrv_cert_files:
                logger.error("Certificates could not be provided when "
                             "CA is present on some master.")
                raise ScriptError(rval=3)
        else:
            if options.setup_ca:
                logger.error("The remote master does not have a CA "
                             "installed, can't set up CA")
                raise ScriptError(rval=3)
            ca_enabled = False
            if not options.dirsrv_cert_files:
                logger.error("Cannot issue certificates: a CA is not "
                             "installed. Use the --http-cert-file, "
                             "--dirsrv-cert-file options to provide "
                             "custom certificates.")
                raise ScriptError(rval=3)

        kra_host = service.find_providing_server(
                'KRA', conn, config.kra_host_name)
        if kra_host is not None:
            config.kra_host_name = kra_host
            kra_enabled = True
        else:
            if options.setup_kra:
                logger.error("There is no KRA server in the domain, "
                             "can't setup a KRA clone")
                raise ScriptError(rval=3)
            kra_enabled = False

        if ca_enabled:
            options.realm_name = config.realm_name
            options.host_name = config.host_name
            ca.install_check(False, config, options)

        if kra_enabled:
            try:
                kra.install_check(remote_api, config, options)
            except RuntimeError as e:
                raise ScriptError(e)

        if options.setup_dns:
            dns.install_check(False, remote_api, True, options,
                              config.host_name)
            config.ips = dns.ip_addresses
        else:
            config.ips = installutils.get_server_ip_address(
                config.host_name, not installer.interactive,
                False, options.ip_addresses)

            # check addresses here, dns module is doing own check
            no_matching_interface_for_ip_address_warning(config.ips)

        if options.setup_adtrust:
            adtrust.install_check(False, options, remote_api)

    except errors.ACIError:
        logger.debug("%s", traceback.format_exc())
        raise ScriptError("\nInsufficient privileges to promote the server."
                          "\nPossible issues:"
                          "\n- A user has insufficient privileges"
                          "\n- This client has insufficient privileges "
                          "to become an IPA replica")
    except errors.LDAPError:
        logger.debug("%s", traceback.format_exc())
        raise ScriptError("\nUnable to connect to LDAP server %s" %
                          config.master_host_name)
    finally:
        if replman and replman.conn:
            replman.conn.unbind()
        if conn.isconnected():
            conn.disconnect()

    # check connection
    if not options.skip_conncheck:
        if add_to_ipaservers:
            # use user's credentials when the server host is not ipaservers
            if installer._ccache is None:
                del os.environ['KRB5CCNAME']
            else:
                os.environ['KRB5CCNAME'] = installer._ccache

        try:
            replica_conn_check(
                config.master_host_name, config.host_name, config.realm_name,
                options.setup_ca, 389,
                options.admin_password, principal=options.principal,
                ca_cert_file=cafile)
        finally:
            if add_to_ipaservers:
                os.environ['KRB5CCNAME'] = ccache

    installer._ca_enabled = ca_enabled
    installer._kra_enabled = kra_enabled
    installer._ca_file = cafile
    installer._fstore = fstore
    installer._sstore = sstore
    installer._config = config
    installer._add_to_ipaservers = add_to_ipaservers
    installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
    installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
    installer._http_pkcs12_file = http_pkcs12_file
    installer._http_pkcs12_info = http_pkcs12_info
    installer._pkinit_pkcs12_file = pkinit_pkcs12_file
    installer._pkinit_pkcs12_info = pkinit_pkcs12_info
示例#11
0
class update_referint(Updater):
    """
    Update referential integrity configuration to new style
    http://directory.fedoraproject.org/docs/389ds/design/ri-plugin-configuration.html

    old attr              -> new attr
    nsslapd-pluginArg0    -> referint-update-delay
    nsslapd-pluginArg1    -> referint-logfile
    nsslapd-pluginArg2    -> referint-logchanges
    nsslapd-pluginArg3..N -> referint-membership-attr [3..N]

    Old and new style cannot be mixed, all nslapd-pluginArg* attrs have to be removed
    """

    referint_dn = DN(('cn', 'referential integrity postoperation'),
                     ('cn', 'plugins'), ('cn', 'config'))

    def execute(self, **options):

        logger.debug("Upgrading referential integrity plugin configuration")
        ldap = self.api.Backend.ldap2
        try:
            entry = ldap.get_entry(self.referint_dn)
        except errors.NotFound:
            logger.error("Referential integrity configuration not found")
            return False, []

        referint_membership_attrs = []

        logger.debug("Initial value: %s", repr(entry))

        # nsslapd-pluginArg0    -> referint-update-delay
        update_delay = entry.get('nsslapd-pluginArg0')
        if update_delay:
            logger.debug("add: referint-update-delay: %s", update_delay)
            entry['referint-update-delay'] = update_delay
            entry['nsslapd-pluginArg0'] = None
        else:
            logger.debug("Plugin already uses new style, skipping")
            return False, []

        # nsslapd-pluginArg1    -> referint-logfile
        logfile = entry.get('nsslapd-pluginArg1')
        if logfile:
            logger.debug("add: referint-logfile: %s", logfile)
            entry['referint-logfile'] = logfile
            entry['nsslapd-pluginArg1'] = None

        # nsslapd-pluginArg2    -> referint-logchanges
        logchanges = entry.get('nsslapd-pluginArg2')
        if logchanges:
            logger.debug("add: referint-logchanges: %s", logchanges)
            entry['referint-logchanges'] = logchanges
            entry['nsslapd-pluginArg2'] = None

        # nsslapd-pluginArg3..N -> referint-membership-attr [3..N]
        for key in list(entry):
            if key.lower().startswith('nsslapd-pluginarg'):
                arg_val = entry.single_value[key]
                if arg_val:
                    referint_membership_attrs.append(arg_val)
                entry[key] = None

        if referint_membership_attrs:
            # entry['referint-membership-attr'] is None, plugin doesn't allow
            # mixing old and new style
            entry['referint-membership-attr'] = referint_membership_attrs

        logger.debug("Final value: %s", repr(entry))
        try:
            ldap.update_entry(entry)
        except errors.EmptyModlist:
            logger.debug("No modifications required")
            return False, []

        return False, []
示例#12
0
    def __setup_ssl(self):
        db = certs.CertDB(self.realm,
                          nssdir=paths.HTTPD_ALIAS_DIR,
                          subject_base=self.subject_base,
                          user="******",
                          group=constants.HTTPD_GROUP,
                          create=True)
        self.disable_system_trust()
        self.create_password_conf()
        if self.pkcs12_info:
            if self.ca_is_configured:
                trust_flags = 'CT,C,C'
            else:
                trust_flags = None
            db.init_from_pkcs12(self.pkcs12_info[0],
                                self.pkcs12_info[1],
                                ca_file=self.ca_file,
                                trust_flags=trust_flags)
            server_certs = db.find_server_certs()
            if len(server_certs) == 0:
                raise RuntimeError(
                    "Could not find a suitable server cert in import in %s" %
                    self.pkcs12_info[0])

            # We only handle one server cert
            nickname = server_certs[0][0]
            if nickname == 'ipaCert':
                nickname = server_certs[1][0]
            self.dercert = db.get_cert_from_db(nickname, pem=False)

            if self.ca_is_configured:
                db.track_server_cert(nickname, self.principal, db.passwd_fname,
                                     'restart_httpd')

            self.__set_mod_nss_nickname(nickname)
            self.add_cert_to_service()

        else:
            if not self.promote:
                ca_args = [
                    paths.CERTMONGER_DOGTAG_SUBMIT, '--ee-url',
                    'https://%s:8443/ca/ee/ca' % self.fqdn, '--certfile',
                    paths.RA_AGENT_PEM, '--keyfile', paths.RA_AGENT_KEY,
                    '--cafile', paths.IPA_CA_CRT, '--agent-submit'
                ]
                helper = " ".join(ca_args)
                prev_helper = certmonger.modify_ca_helper('IPA', helper)
            else:
                prev_helper = None
            try:
                certmonger.request_and_wait_for_cert(
                    certpath=db.secdir,
                    nickname=self.cert_nickname,
                    principal=self.principal,
                    passwd_fname=db.passwd_fname,
                    subject=str(DN(('CN', self.fqdn), self.subject_base)),
                    ca='IPA',
                    profile=dogtag.DEFAULT_PROFILE,
                    dns=[self.fqdn],
                    post_command='restart_httpd')
            finally:
                if prev_helper is not None:
                    certmonger.modify_ca_helper('IPA', prev_helper)

            self.dercert = db.get_cert_from_db(self.cert_nickname, pem=False)

            if prev_helper is not None:
                self.add_cert_to_service()

            # Verify we have a valid server cert
            server_certs = db.find_server_certs()
            if not server_certs:
                raise RuntimeError("Could not find a suitable server cert.")

        # store the CA cert nickname so that we can publish it later on
        self.cacert_nickname = db.cacert_name
示例#13
0
def get_group_dn(cn):
    return DN(('cn', cn), api.env.container_group, api.env.basedn)
示例#14
0
    def execute(self, ldapuri, bindpw, **options):
        ldap = self.api.Backend.ldap2
        self.normalize_options(options)
        config = ldap.get_ipa_config()

        ds_base_dn = options.get('basedn')
        if ds_base_dn is not None:
            assert isinstance(ds_base_dn, DN)

        # check if migration mode is enabled
        if config.get('ipamigrationenabled', ('FALSE', ))[0] == 'FALSE':
            return dict(result={}, failed={}, enabled=False, compat=True)

        # connect to DS
        cacert = None
        if options.get('cacertfile') is not None:
            # store CA cert into file
            tmp_ca_cert_f = write_tmp_file(options['cacertfile'])
            cacert = tmp_ca_cert_f.name

            # start TLS connection
            ds_ldap = LDAPClient(ldapuri, cacert=cacert)
            ds_ldap.simple_bind(options['binddn'], bindpw)

            tmp_ca_cert_f.close()
        else:
            ds_ldap = LDAPClient(ldapuri, cacert=cacert)
            ds_ldap.simple_bind(options['binddn'], bindpw)

        # check whether the compat plugin is enabled
        if not options.get('compat'):
            try:
                ldap.get_entry(DN(('cn', 'compat'), (api.env.basedn)))
                return dict(result={}, failed={}, enabled=True, compat=False)
            except errors.NotFound:
                pass

        if not ds_base_dn:
            # retrieve base DN from remote LDAP server
            entries, _truncated = ds_ldap.find_entries(
                '',
                ['namingcontexts', 'defaultnamingcontext'],
                DN(''),
                ds_ldap.SCOPE_BASE,
                size_limit=-1,
                time_limit=0,
            )
            if 'defaultnamingcontext' in entries[0]:
                ds_base_dn = DN(entries[0]['defaultnamingcontext'][0])
                assert isinstance(ds_base_dn, DN)
            else:
                try:
                    ds_base_dn = DN(entries[0]['namingcontexts'][0])
                    assert isinstance(ds_base_dn, DN)
                except (IndexError, KeyError) as e:
                    raise Exception(str(e))

        # migrate!
        (migrated, failed) = self.migrate(ldap, config, ds_ldap, ds_base_dn,
                                          options)

        return dict(result=migrated, failed=failed, enabled=True, compat=True)
示例#15
0
文件: aci.py 项目: zhoubh/freeipa
must include all existing attributes as well. When doing an aci-mod the
targetattr REPLACES the current attributes, it does not add to them.
""")

if six.PY3:
    unicode = str

logger = logging.getLogger(__name__)

register = Registry()

ACI_NAME_PREFIX_SEP = ":"

_type_map = {
    'user':
    '******' + str(DN(('uid', '*'), api.env.container_user, api.env.basedn)),
    'group':
    'ldap:///' + str(DN(('cn', '*'), api.env.container_group, api.env.basedn)),
    'host':
    'ldap:///' + str(DN(
        ('fqdn', '*'), api.env.container_host, api.env.basedn)),
    'hostgroup':
    'ldap:///' +
    str(DN(('cn', '*'), api.env.container_hostgroup, api.env.basedn)),
    'service':
    'ldap:///' + str(
        DN(('krbprincipalname', '*'), api.env.container_service,
           api.env.basedn)),
    'netgroup':
    'ldap:///' +
    str(DN(('ipauniqueid', '*'), api.env.container_netgroup, api.env.basedn)),
示例#16
0
class migrate_ds(Command):
    __doc__ = _('Migrate users and groups from DS to IPA.')

    migrate_objects = {
        # OBJECT_NAME: (search_filter, pre_callback, post_callback)
        #
        # OBJECT_NAME - is the name of an LDAPObject subclass
        # search_filter - is the filter to retrieve objects from DS
        # pre_callback - is called for each object just after it was
        #                retrieved from DS and before being added to IPA
        # post_callback - is called for each object after it was added to IPA
        # exc_callback - is called when adding entry to IPA raises an exception
        #
        # {pre, post}_callback parameters:
        #  ldap - ldap2 instance connected to IPA
        #  pkey - primary key value of the object (uid for users, etc.)
        #  dn - dn of the object as it (will be/is) stored in IPA
        #  entry_attrs - attributes of the object
        #  failed - a list of so-far failed objects
        #  config - IPA config entry attributes
        #  ctx - object context, used to pass data between callbacks
        #
        # If pre_callback return value evaluates to False, migration
        # of the current object is aborted.
        'user': {
            'filter_template': '(&(|%s)(uid=*))',
            'oc_option': 'userobjectclass',
            'oc_blacklist_option': 'userignoreobjectclass',
            'attr_blacklist_option': 'userignoreattribute',
            'pre_callback': _pre_migrate_user,
            'post_callback': _post_migrate_user,
            'exc_callback': None
        },
        'group': {
            'filter_template': '(&(|%s)(cn=*))',
            'oc_option': 'groupobjectclass',
            'oc_blacklist_option': 'groupignoreobjectclass',
            'attr_blacklist_option': 'groupignoreattribute',
            'pre_callback': _pre_migrate_group,
            'post_callback': None,
            'exc_callback': _group_exc_callback,
        },
    }
    migrate_order = ('user', 'group')

    takes_args = (
        Str(
            'ldapuri',
            validate_ldapuri,
            cli_name='ldap_uri',
            label=_('LDAP URI'),
            doc=_('LDAP URI of DS server to migrate from'),
        ),
        Password(
            'bindpw',
            cli_name='password',
            label=_('Password'),
            confirm=False,
            doc=_('bind password'),
        ),
    )

    takes_options = (
        DNParam('binddn?',
            cli_name='bind_dn',
            label=_('Bind DN'),
            default=DN(('cn', 'directory manager')),
            autofill=True,
        ),
        DNParam('usercontainer',
            cli_name='user_container',
            label=_('User container'),
            doc=_('DN of container for users in DS relative to base DN'),
            default=DN(('ou', 'people')),
            autofill=True,
        ),
        DNParam('groupcontainer',
            cli_name='group_container',
            label=_('Group container'),
            doc=_('DN of container for groups in DS relative to base DN'),
            default=DN(('ou', 'groups')),
            autofill=True,
        ),
        Str('userobjectclass+',
            cli_name='user_objectclass',
            label=_('User object class'),
            doc=_('Objectclasses used to search for user entries in DS'),
            default=(u'person',),
            autofill=True,
        ),
        Str('groupobjectclass+',
            cli_name='group_objectclass',
            label=_('Group object class'),
            doc=_('Objectclasses used to search for group entries in DS'),
            default=(u'groupOfUniqueNames', u'groupOfNames'),
            autofill=True,
        ),
        Str('userignoreobjectclass*',
            cli_name='user_ignore_objectclass',
            label=_('Ignore user object class'),
            doc=_('Objectclasses to be ignored for user entries in DS'),
            default=tuple(),
            autofill=True,
        ),
        Str('userignoreattribute*',
            cli_name='user_ignore_attribute',
            label=_('Ignore user attribute'),
            doc=_('Attributes to be ignored for user entries in DS'),
            default=tuple(),
            autofill=True,
        ),
        Str('groupignoreobjectclass*',
            cli_name='group_ignore_objectclass',
            label=_('Ignore group object class'),
            doc=_('Objectclasses to be ignored for group entries in DS'),
            default=tuple(),
            autofill=True,
        ),
        Str('groupignoreattribute*',
            cli_name='group_ignore_attribute',
            label=_('Ignore group attribute'),
            doc=_('Attributes to be ignored for group entries in DS'),
            default=tuple(),
            autofill=True,
        ),
        Flag('groupoverwritegid',
            cli_name='group_overwrite_gid',
            label=_('Overwrite GID'),
            doc=_('When migrating a group already existing in IPA domain overwrite the '\
                  'group GID and report as success'),
        ),
        StrEnum('schema?',
            cli_name='schema',
            label=_('LDAP schema'),
            doc=_('The schema used on the LDAP server. Supported values are RFC2307 and RFC2307bis. The default is RFC2307bis'),
            values=_supported_schemas,
            default=_supported_schemas[0],
            autofill=True,
        ),
        Flag('continue?',
            label=_('Continue'),
            doc=_('Continuous operation mode. Errors are reported but the process continues'),
            default=False,
        ),
        DNParam('basedn?',
            cli_name='base_dn',
            label=_('Base DN'),
            doc=_('Base DN on remote LDAP server'),
        ),
        Flag('compat?',
            cli_name='with_compat',
            label=_('Ignore compat plugin'),
            doc=_('Allows migration despite the usage of compat plugin'),
            default=False,
        ),
        Str('cacertfile?',
            cli_name='ca_cert_file',
            label=_('CA certificate'),
            doc=_('Load CA certificate of LDAP server from FILE'),
            default=None,
            noextrawhitespace=False,
            ),
        Bool('use_def_group?',
             cli_name='use_default_group',
             label=_('Add to default group'),
             doc=_('Add migrated users without a group to a default group '
                   '(default: true)'),
             default=True,
             autofill=True,
             ),
        StrEnum('scope',
                cli_name='scope',
                label=_('Search scope'),
                doc=_('LDAP search scope for users and groups: base, '
                      'onelevel, or subtree. Defaults to onelevel'),
                values=sorted(_supported_scopes),
                default=_default_scope,
                autofill=True,
                ),
    )

    has_output = (
        output.Output(
            'result',
            type=dict,
            doc=_('Lists of objects migrated; categorized by type.'),
        ),
        output.Output(
            'failed',
            type=dict,
            doc=
            _('Lists of objects that could not be migrated; categorized by type.'
              ),
        ),
        output.Output(
            'enabled',
            type=bool,
            doc=_('False if migration mode was disabled.'),
        ),
        output.Output(
            'compat',
            type=bool,
            doc=
            _('False if migration fails because the compatibility plug-in is enabled.'
              ),
        ),
    )

    exclude_doc = _('%s to exclude from migration')

    truncated_err_msg = _('''\
search results for objects to be migrated
have been truncated by the server;
migration process might be incomplete\n''')

    def get_options(self):
        """
        Call get_options of the baseclass and add "exclude" options
        for each type of object being migrated.
        """
        for option in super(migrate_ds, self).get_options():
            yield option
        for ldap_obj_name in self.migrate_objects:
            ldap_obj = self.api.Object[ldap_obj_name]
            name = 'exclude_%ss' % to_cli(ldap_obj_name)
            doc = self.exclude_doc % ldap_obj.object_name_plural
            yield Str('%s*' % name,
                      cli_name=name,
                      doc=doc,
                      default=tuple(),
                      autofill=True)

    def normalize_options(self, options):
        """
        Convert all "exclude" option values to lower-case.

        Also, empty List parameters are converted to None, but the migration
        plugin doesn't like that - convert back to empty lists.
        """
        names = [
            'userobjectclass', 'groupobjectclass', 'userignoreobjectclass',
            'userignoreattribute', 'groupignoreobjectclass',
            'groupignoreattribute'
        ]
        names.extend('exclude_%ss' % to_cli(n) for n in self.migrate_objects)
        for name in names:
            if options[name]:
                options[name] = tuple(v.lower() for v in options[name])
            else:
                options[name] = tuple()

    def _get_search_bases(self, options, ds_base_dn, migrate_order):
        search_bases = dict()
        for ldap_obj_name in migrate_order:
            container = options.get('%scontainer' % to_cli(ldap_obj_name))
            if container:
                # Don't append base dn if user already appended it in the container dn
                if container.endswith(ds_base_dn):
                    search_base = container
                else:
                    search_base = DN(container, ds_base_dn)
            else:
                search_base = ds_base_dn
            search_bases[ldap_obj_name] = search_base
        return search_bases

    def migrate(self, ldap, config, ds_ldap, ds_base_dn, options):
        """
        Migrate objects from DS to LDAP.
        """
        assert isinstance(ds_base_dn, DN)
        migrated = {}  # {'OBJ': ['PKEY1', 'PKEY2', ...], ...}
        failed = {}  # {'OBJ': {'PKEY1': 'Failed 'cos blabla', ...}, ...}
        search_bases = self._get_search_bases(options, ds_base_dn,
                                              self.migrate_order)
        migration_start = datetime.datetime.now()

        scope = _supported_scopes[options.get('scope')]

        for ldap_obj_name in self.migrate_order:
            ldap_obj = self.api.Object[ldap_obj_name]

            template = self.migrate_objects[ldap_obj_name]['filter_template']
            oc_list = options[to_cli(
                self.migrate_objects[ldap_obj_name]['oc_option'])]
            search_filter = construct_filter(template, oc_list)

            exclude = options['exclude_%ss' % to_cli(ldap_obj_name)]
            context = dict(ds_ldap=ds_ldap)

            migrated[ldap_obj_name] = []
            failed[ldap_obj_name] = {}

            try:
                entries, truncated = ds_ldap.find_entries(
                    search_filter, ['*'],
                    search_bases[ldap_obj_name],
                    scope,
                    time_limit=0,
                    size_limit=-1)
            except errors.NotFound:
                if not options.get('continue', False):
                    raise errors.NotFound(
                        reason=
                        _('%(container)s LDAP search did not return any result '
                          '(search base: %(search_base)s, '
                          'objectclass: %(objectclass)s)') % {
                              'container': ldap_obj_name,
                              'search_base': search_bases[ldap_obj_name],
                              'objectclass': ', '.join(oc_list)
                          })
                else:
                    truncated = False
                    entries = []
            if truncated:
                logger.error('%s: %s', ldap_obj.name, self.truncated_err_msg)

            blacklists = {}
            for blacklist in ('oc_blacklist', 'attr_blacklist'):
                blacklist_option = self.migrate_objects[ldap_obj_name][
                    blacklist + '_option']
                if blacklist_option is not None:
                    blacklists[blacklist] = options.get(
                        blacklist_option, tuple())
                else:
                    blacklists[blacklist] = tuple()

            # get default primary group for new users
            if 'def_group_dn' not in context and options.get('use_def_group'):
                def_group = config.get('ipadefaultprimarygroup')
                context['def_group_dn'] = api.Object.group.get_dn(def_group)
                try:
                    ldap.get_entry(context['def_group_dn'],
                                   ['gidnumber', 'cn'])
                except errors.NotFound:
                    error_msg = _('Default group for new users not found')
                    raise errors.NotFound(reason=error_msg)

            context['has_upg'] = ldap.has_upg()

            valid_gids = set()
            invalid_gids = set()
            migrate_cnt = 0
            context['migrate_cnt'] = 0
            for entry_attrs in entries:
                context['migrate_cnt'] = migrate_cnt
                s = datetime.datetime.now()

                ava = entry_attrs.dn[0][0]
                if ava.attr == ldap_obj.primary_key.name:
                    # In case if pkey attribute is in the migrated object DN
                    # and the original LDAP is multivalued, make sure that
                    # we pick the correct value (the unique one stored in DN)
                    pkey = ava.value.lower()
                else:
                    pkey = entry_attrs[ldap_obj.primary_key.name][0].lower()

                if pkey in exclude:
                    continue

                entry_attrs.dn = ldap_obj.get_dn(pkey)
                entry_attrs['objectclass'] = list(
                    set(
                        config.get(ldap_obj.object_class_config,
                                   ldap_obj.object_class) +
                        [o.lower() for o in entry_attrs['objectclass']]))
                entry_attrs[ldap_obj.primary_key.name][0] = entry_attrs[
                    ldap_obj.primary_key.name][0].lower()

                callback = self.migrate_objects[ldap_obj_name]['pre_callback']
                if callable(callback):
                    try:
                        entry_attrs.dn = callback(ldap,
                                                  pkey,
                                                  entry_attrs.dn,
                                                  entry_attrs,
                                                  failed[ldap_obj_name],
                                                  config,
                                                  context,
                                                  schema=options['schema'],
                                                  search_bases=search_bases,
                                                  valid_gids=valid_gids,
                                                  invalid_gids=invalid_gids,
                                                  **blacklists)
                        if not entry_attrs.dn:
                            continue
                    except errors.NotFound as e:
                        failed[ldap_obj_name][pkey] = unicode(e.reason)
                        continue

                try:
                    ldap.add_entry(entry_attrs)
                except errors.ExecutionError as e:
                    callback = self.migrate_objects[ldap_obj_name][
                        'exc_callback']
                    if callable(callback):
                        try:
                            callback(ldap, entry_attrs.dn, entry_attrs, e,
                                     options)
                        except errors.ExecutionError as e:
                            failed[ldap_obj_name][pkey] = unicode(e)
                            continue
                    else:
                        failed[ldap_obj_name][pkey] = unicode(e)
                        continue

                migrated[ldap_obj_name].append(pkey)

                callback = self.migrate_objects[ldap_obj_name]['post_callback']
                if callable(callback):
                    callback(ldap, pkey, entry_attrs.dn, entry_attrs,
                             failed[ldap_obj_name], config, context)
                e = datetime.datetime.now()
                d = e - s
                total_dur = e - migration_start
                migrate_cnt += 1
                if migrate_cnt > 0 and migrate_cnt % 100 == 0:
                    logger.info("%d %ss migrated. %s elapsed.", migrate_cnt,
                                ldap_obj_name, total_dur)
                logger.debug("%d %ss migrated, duration: %s (total %s)",
                             migrate_cnt, ldap_obj_name, d, total_dur)

        if 'def_group_dn' in context:
            _update_default_group(ldap, context, True)

        return (migrated, failed)

    def execute(self, ldapuri, bindpw, **options):
        ldap = self.api.Backend.ldap2
        self.normalize_options(options)
        config = ldap.get_ipa_config()

        ds_base_dn = options.get('basedn')
        if ds_base_dn is not None:
            assert isinstance(ds_base_dn, DN)

        # check if migration mode is enabled
        if config.get('ipamigrationenabled', ('FALSE', ))[0] == 'FALSE':
            return dict(result={}, failed={}, enabled=False, compat=True)

        # connect to DS
        cacert = None
        if options.get('cacertfile') is not None:
            # store CA cert into file
            tmp_ca_cert_f = write_tmp_file(options['cacertfile'])
            cacert = tmp_ca_cert_f.name

            # start TLS connection
            ds_ldap = LDAPClient(ldapuri, cacert=cacert)
            ds_ldap.simple_bind(options['binddn'], bindpw)

            tmp_ca_cert_f.close()
        else:
            ds_ldap = LDAPClient(ldapuri, cacert=cacert)
            ds_ldap.simple_bind(options['binddn'], bindpw)

        # check whether the compat plugin is enabled
        if not options.get('compat'):
            try:
                ldap.get_entry(DN(('cn', 'compat'), (api.env.basedn)))
                return dict(result={}, failed={}, enabled=True, compat=False)
            except errors.NotFound:
                pass

        if not ds_base_dn:
            # retrieve base DN from remote LDAP server
            entries, _truncated = ds_ldap.find_entries(
                '',
                ['namingcontexts', 'defaultnamingcontext'],
                DN(''),
                ds_ldap.SCOPE_BASE,
                size_limit=-1,
                time_limit=0,
            )
            if 'defaultnamingcontext' in entries[0]:
                ds_base_dn = DN(entries[0]['defaultnamingcontext'][0])
                assert isinstance(ds_base_dn, DN)
            else:
                try:
                    ds_base_dn = DN(entries[0]['namingcontexts'][0])
                    assert isinstance(ds_base_dn, DN)
                except (IndexError, KeyError) as e:
                    raise Exception(str(e))

        # migrate!
        (migrated, failed) = self.migrate(ldap, config, ds_ldap, ds_base_dn,
                                          options)

        return dict(result=migrated, failed=failed, enabled=True, compat=True)
示例#17
0
def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx,
                      **kwargs):
    assert isinstance(dn, DN)
    attr_blacklist = ['krbprincipalkey', 'memberofindirect', 'memberindirect']
    attr_blacklist.extend(kwargs.get('attr_blacklist', []))
    ds_ldap = ctx['ds_ldap']
    search_bases = kwargs.get('search_bases', None)
    valid_gids = kwargs['valid_gids']
    invalid_gids = kwargs['invalid_gids']

    if 'gidnumber' not in entry_attrs:
        raise errors.NotFound(reason=_('%(user)s is not a POSIX user') %
                              dict(user=pkey))
    else:
        # See if the gidNumber at least points to a valid group on the remote
        # server.
        if entry_attrs['gidnumber'][0] in invalid_gids:
            logger.warning(
                'GID number %s of migrated user %s does not point '
                'to a known group.', entry_attrs['gidnumber'][0], pkey)
        elif entry_attrs['gidnumber'][0] not in valid_gids:
            try:
                remote_entry = ds_ldap.find_entry_by_attr(
                    'gidnumber', entry_attrs['gidnumber'][0], 'posixgroup',
                    [''], search_bases['group'])
                valid_gids.add(entry_attrs['gidnumber'][0])
            except errors.NotFound:
                logger.warning(
                    'GID number %s of migrated user %s does not '
                    'point to a known group.', entry_attrs['gidnumber'][0],
                    pkey)
                invalid_gids.add(entry_attrs['gidnumber'][0])
            except errors.SingleMatchExpected as e:
                # GID number matched more groups, this should not happen
                logger.warning(
                    'GID number %s of migrated user %s should '
                    'match 1 group, but it matched %d groups',
                    entry_attrs['gidnumber'][0], pkey, e.found)
            except errors.LimitsExceeded as e:
                logger.warning('Search limit exceeded searching for GID %s',
                               entry_attrs['gidnumber'][0])

    # We don't want to create a UPG so set the magic value in description
    # to let the DS plugin know.
    entry_attrs.setdefault('description', [])
    entry_attrs['description'].append(NO_UPG_MAGIC)

    # fill in required attributes by IPA
    entry_attrs['ipauniqueid'] = 'autogenerate'
    if 'homedirectory' not in entry_attrs:
        homes_root = config.get('ipahomesrootdir', (paths.HOME_DIR, ))[0]
        home_dir = '%s/%s' % (homes_root, pkey)
        home_dir = home_dir.replace('//', '/').rstrip('/')
        entry_attrs['homedirectory'] = home_dir

    if 'loginshell' not in entry_attrs:
        default_shell = config.get('ipadefaultloginshell', [paths.SH])[0]
        entry_attrs.setdefault('loginshell', default_shell)

    # do not migrate all attributes
    for attr in attr_blacklist:
        entry_attrs.pop(attr, None)

    # do not migrate all object classes
    if 'objectclass' in entry_attrs:
        for object_class in kwargs.get('oc_blacklist', []):
            try:
                entry_attrs['objectclass'].remove(object_class)
            except ValueError:  # object class not present
                pass

    _create_kerberos_principals(ldap, pkey, entry_attrs, failed)

    # Fix any attributes with DN syntax that point to entries in the old
    # tree

    for attr in entry_attrs.keys():
        if ldap.has_dn_syntax(attr):
            for ind, value in enumerate(entry_attrs[attr]):
                if not isinstance(value, DN):
                    # value is not DN instance, the automatic encoding may have
                    # failed due to missing schema or the remote attribute type OID was
                    # not detected as DN type. Try to work this around
                    logger.debug(
                        '%s: value %s of type %s in attribute %s is '
                        'not a DN, convert it', pkey, value, type(value), attr)
                    try:
                        value = DN(value)
                    except ValueError as e:
                        logger.warning(
                            '%s: skipping normalization of value '
                            '%s of type %s in attribute %s which '
                            'could not be converted to DN: %s', pkey, value,
                            type(value), attr, e)
                        continue
                try:
                    remote_entry = ds_ldap.get_entry(value, [
                        api.Object.user.primary_key.name,
                        api.Object.group.primary_key.name
                    ])
                except errors.NotFound:
                    logger.warning(
                        '%s: attribute %s refers to non-existent '
                        'entry %s', pkey, attr, value)
                    continue
                if value.endswith(search_bases['user']):
                    primary_key = api.Object.user.primary_key.name
                    container = api.env.container_user
                elif value.endswith(search_bases['group']):
                    primary_key = api.Object.group.primary_key.name
                    container = api.env.container_group
                else:
                    logger.warning(
                        '%s: value %s in attribute %s does not '
                        'belong into any known container', pkey, value, attr)
                    continue

                if not remote_entry.get(primary_key):
                    logger.warning(
                        '%s: there is no primary key %s to migrate '
                        'for %s', pkey, primary_key, attr)
                    continue

                logger.debug('converting DN value %s for %s in %s', value,
                             attr, dn)
                rdnval = remote_entry[primary_key][0].lower()
                entry_attrs[attr][ind] = DN((primary_key, rdnval), container,
                                            api.env.basedn)

    return dn
示例#18
0
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipaserver/plugins/role.py` module.
"""

from ipalib import api, errors
from ipatests.test_xmlrpc import objectclasses
from ipatests.test_xmlrpc.xmlrpc_test import Declarative, fuzzy_uuid
from ipapython.dn import DN
import pytest

search = u'test-role'

role1 = u'test-role-1'
role1_dn = DN(('cn', role1), api.env.container_rolegroup, api.env.basedn)
renamedrole1 = u'test-role'
invalidrole1 = u' whitespace '

role2 = u'test-role-2'
role2_dn = DN(('cn', role2), api.env.container_rolegroup, api.env.basedn)

group1 = u'testgroup1'
group1_dn = DN(('cn', group1), api.env.container_group, api.env.basedn)

privilege1 = u'r,w privilege 1'
privilege1_dn = DN(('cn', privilege1), DN(api.env.container_privilege),
                   api.env.basedn)


@pytest.mark.tier1
示例#19
0
    def test_cmp(self):
        # Equality
        dn1 = DN((self.attr1, self.value1))

        self.assertTrue(dn1 == self.dn1)
        self.assertFalse(dn1 != self.dn1)

        self.assertTrue(dn1 == self.str_dn1)
        self.assertFalse(dn1 != self.str_dn1)

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 0)

        # Make dn1's attr greater
        with self.assertRaises(AttributeError):
            dn1[0].attr = self.attr1 + "1"
        dn1 = DN((self.attr1 + "1", self.value1))

        self.assertFalse(dn1 == self.dn1)
        self.assertTrue(dn1 != self.dn1)

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 1)

        result = cmp(self.dn1, dn1)
        self.assertEqual(result, -1)

        # Reset dn1's attr, should be equal again
        with self.assertRaises(AttributeError):
            dn1[0].attr = self.attr1
        dn1 = DN((self.attr1, self.value1))

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 0)

        # Make dn1's value greater
        # attr will be equal, this tests secondary comparision component
        with self.assertRaises(AttributeError):
            dn1[0].value = self.value1 + "1"
        dn1 = DN((self.attr1, self.value1 + "1"))

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 1)

        result = cmp(self.dn1, dn1)
        self.assertEqual(result, -1)

        # Make sure dn's with more rdn's are greater
        result = cmp(self.dn1, self.dn3)
        self.assertEqual(result, -1)
        result = cmp(self.dn3, self.dn1)
        self.assertEqual(result, 1)


        # Test startswith, endswith
        container_dn = DN(self.container_dn)
        base_container_dn = DN(self.base_container_dn)

        self.assertTrue(base_container_dn.startswith(self.rdn1))
        self.assertTrue(base_container_dn.startswith(self.dn1))
        self.assertTrue(base_container_dn.startswith(self.dn1 + container_dn))
        self.assertFalse(base_container_dn.startswith(self.dn2))
        self.assertFalse(base_container_dn.startswith(self.rdn2))
        self.assertTrue(base_container_dn.startswith((self.dn1)))
        self.assertTrue(base_container_dn.startswith((self.rdn1)))
        self.assertFalse(base_container_dn.startswith((self.rdn2)))
        self.assertTrue(base_container_dn.startswith((self.rdn2, self.rdn1)))
        self.assertTrue(base_container_dn.startswith((self.dn1, self.dn2)))

        self.assertTrue(base_container_dn.endswith(self.base_dn))
        self.assertTrue(base_container_dn.endswith(container_dn + self.base_dn))
        self.assertFalse(base_container_dn.endswith(DN(self.base_rdn1)))
        self.assertTrue(base_container_dn.endswith(DN(self.base_rdn2)))
        self.assertTrue(base_container_dn.endswith((DN(self.base_rdn1), DN(self.base_rdn2))))

        # Test "in" membership
        self.assertTrue(self.container_rdn1 in container_dn)
        self.assertTrue(container_dn in container_dn)
        self.assertFalse(self.base_rdn1 in container_dn)

        self.assertTrue(self.container_rdn1 in base_container_dn)
        self.assertTrue(container_dn in base_container_dn)
        self.assertTrue(container_dn + self.base_dn in
                        base_container_dn)
        self.assertTrue(self.dn1 + container_dn + self.base_dn in
                        base_container_dn)
        self.assertTrue(self.dn1 + container_dn + self.base_dn ==
                        base_container_dn)

        self.assertFalse(self.container_rdn1 in self.base_dn)
示例#20
0
    def ask_for_options(self):
        options = self.options
        super(ReplicaPrepare, self).ask_for_options()
        http_ca_cert = None
        dirsrv_ca_cert = None

        # get the directory manager password
        self.dirman_password = options.password
        if not options.password:
            self.dirman_password = installutils.read_password(
                "Directory Manager (existing master)",
                confirm=False,
                validate=False)
            if self.dirman_password is None:
                raise admintool.ScriptError(
                    "Directory Manager password required")

        # Try out the password & get the subject base
        api.Backend.ldap2.disconnect()
        try:
            api.Backend.ldap2.connect(bind_pw=self.dirman_password)

            entry_attrs = api.Backend.ldap2.get_ipa_config()
            self.subject_base = entry_attrs.get('ipacertificatesubjectbase',
                                                [None])[0]

            ca_enabled = api.Command.ca_is_enabled()['result']
        except errors.ACIError:
            raise admintool.ScriptError("The password provided is incorrect "
                                        "for LDAP server %s" % api.env.host)
        except errors.LDAPError:
            raise admintool.ScriptError("Unable to connect to LDAP server %s" %
                                        api.env.host)
        except errors.DatabaseError as e:
            raise admintool.ScriptError(e.desc)

        if ca_enabled and not os.path.isfile(paths.CA_CS_CFG_PATH):
            raise admintool.ScriptError(
                "CA is not installed on this server. "
                "ipa-replica-prepare must be run on an IPA server with CA.")
        if not ca_enabled and not options.http_cert_files:
            raise admintool.ScriptError(
                "Cannot issue certificates: a CA is not installed. Use the "
                "--http-cert-file, --dirsrv-cert-file options to provide "
                "custom certificates.")

        if self.subject_base is not None:
            self.subject_base = DN(self.subject_base)

        # Validate more options using the password
        try:
            installutils.verify_fqdn(self.replica_fqdn, local_hostname=False)
        except installutils.BadHostError as e:
            if isinstance(e, installutils.HostLookupError):
                if not options.ip_addresses:
                    if dns_container_exists(api.env.basedn):
                        logger.info('You might use the --ip-address option '
                                    'to create a DNS entry if the DNS zone '
                                    'is managed by IPA.')
                    raise
                else:
                    # The host doesn't exist in DNS but we're adding it.
                    pass
            else:
                raise

        if options.ip_addresses:
            if not dns_container_exists(api.env.basedn):
                logger.error(
                    "It is not possible to add a DNS record automatically "
                    "because DNS is not managed by IPA. Please create DNS "
                    "record manually and then omit --ip-address option.")
                raise admintool.ScriptError("Cannot add DNS record")

            options.reverse_zones = bindinstance.check_reverse_zones(
                options.ip_addresses, options.reverse_zones, options, False,
                True)

            _host, zone = self.replica_fqdn.split('.', 1)
            if not bindinstance.dns_zone_exists(zone, api=api):
                logger.error(
                    "DNS zone %s does not exist in IPA managed DNS "
                    "server. Either create DNS zone or omit "
                    "--ip-address option.", zone)
                raise admintool.ScriptError("Cannot add DNS record")

        self.http_pin = self.dirsrv_pin = None

        if options.http_cert_files:
            if options.http_pin is None:
                options.http_pin = installutils.read_password(
                    "Enter Apache Server private key unlock",
                    confirm=False,
                    validate=False,
                    retry=False)
                if options.http_pin is None:
                    raise admintool.ScriptError(
                        "Apache Server private key unlock password required")
            http_pkcs12_file, http_pin, http_ca_cert = self.load_pkcs12(
                options.http_cert_files, options.http_pin,
                options.http_cert_name)
            self.http_pkcs12_file = http_pkcs12_file
            self.http_pin = http_pin

        if options.dirsrv_cert_files:
            if options.dirsrv_pin is None:
                options.dirsrv_pin = installutils.read_password(
                    "Enter Directory Server private key unlock",
                    confirm=False,
                    validate=False,
                    retry=False)
                if options.dirsrv_pin is None:
                    raise admintool.ScriptError(
                        "Directory Server private key unlock password required"
                    )
            dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = self.load_pkcs12(
                options.dirsrv_cert_files, options.dirsrv_pin,
                options.dirsrv_cert_name)
            self.dirsrv_pkcs12_file = dirsrv_pkcs12_file
            self.dirsrv_pin = dirsrv_pin

        if (options.http_cert_files and options.dirsrv_cert_files
                and http_ca_cert != dirsrv_ca_cert):
            raise admintool.ScriptError(
                "Apache Server SSL certificate and Directory Server SSL "
                "certificate are not signed by the same CA certificate")
示例#21
0
文件: aci.py 项目: zhoubh/freeipa
def _make_aci(ldap, current, aciname, kw):
    """
    Given a name and a set of keywords construct an ACI.
    """
    # Do some quick and dirty validation.
    checked_args = [
        'type', 'filter', 'subtree', 'targetgroup', 'attrs', 'memberof'
    ]
    valid = {}
    for arg in checked_args:
        if arg in kw:
            valid[arg] = kw[arg] is not None
        else:
            valid[arg] = False

    if valid['type'] + valid['filter'] + valid['subtree'] + valid[
            'targetgroup'] > 1:
        raise errors.ValidationError(
            name='target',
            error=_(
                'type, filter, subtree and targetgroup are mutually exclusive')
        )

    if 'aciprefix' not in kw:
        raise errors.ValidationError(name='aciprefix',
                                     error=_('ACI prefix is required'))

    if sum(valid.values()) == 0:
        raise errors.ValidationError(
            name='target',
            error=
            _('at least one of: type, filter, subtree, targetgroup, attrs or memberof are required'
              ))

    if valid['filter'] + valid['memberof'] > 1:
        raise errors.ValidationError(
            name='target',
            error=_('filter and memberof are mutually exclusive'))

    group = 'group' in kw
    permission = 'permission' in kw
    selfaci = 'selfaci' in kw and kw['selfaci'] == True
    if group + permission + selfaci > 1:
        raise errors.ValidationError(
            name='target',
            error=_('group, permission and self are mutually exclusive'))
    elif group + permission + selfaci == 0:
        raise errors.ValidationError(
            name='target',
            error=_('One of group, permission or self is required'))

    # Grab the dn of the group we're granting access to. This group may be a
    # permission or a user group.
    entry_attrs = []
    if permission:
        # This will raise NotFound if the permission doesn't exist
        try:
            entry_attrs = api.Command['permission_show'](
                kw['permission'])['result']
        except errors.NotFound as e:
            if 'test' in kw and not kw.get('test'):
                raise e
            else:
                entry_attrs = {
                    'dn':
                    DN(('cn', kw['permission']), api.env.container_permission,
                       api.env.basedn),
                }
    elif group:
        # Not so friendly with groups. This will raise
        try:
            group_dn = api.Object['group'].get_dn_if_exists(kw['group'])
            entry_attrs = {'dn': group_dn}
        except errors.NotFound:
            raise errors.NotFound(reason=_("Group '%s' does not exist") %
                                  kw['group'])

    try:
        a = ACI(current)
        a.name = _make_aci_name(kw['aciprefix'], aciname)
        a.permissions = kw['permissions']
        if 'selfaci' in kw and kw['selfaci']:
            a.set_bindrule('userdn = "ldap:///self"')
        else:
            dn = entry_attrs['dn']
            a.set_bindrule('groupdn = "ldap:///%s"' % dn)
        if valid['attrs']:
            a.set_target_attr(kw['attrs'])
        if valid['memberof']:
            try:
                api.Object['group'].get_dn_if_exists(kw['memberof'])
            except errors.NotFound:
                raise api.Object['group'].handle_not_found(kw['memberof'])
            groupdn = _group_from_memberof(kw['memberof'])
            a.set_target_filter('memberOf=%s' % groupdn)
        if valid['filter']:
            # Test the filter by performing a simple search on it. The
            # filter is considered valid if either it returns some entries
            # or it returns no entries, otherwise we let whatever exception
            # happened be raised.
            if kw['filter'] in ('', None, u''):
                raise errors.BadSearchFilter(info=_('empty filter'))
            try:
                ldap.find_entries(filter=kw['filter'])
            except errors.NotFound:
                pass
            a.set_target_filter(kw['filter'])
        if valid['type']:
            target = _type_map[kw['type']]
            a.set_target(target)
        if valid['targetgroup']:
            # Purposely no try here so we'll raise a NotFound
            group_dn = api.Object['group'].get_dn_if_exists(kw['targetgroup'])
            target = 'ldap:///%s' % group_dn
            a.set_target(target)
        if valid['subtree']:
            # See if the subtree is a full URI
            target = kw['subtree']
            if not target.startswith('ldap:///'):
                target = 'ldap:///%s' % target
            a.set_target(target)
    except SyntaxError as e:
        raise errors.ValidationError(name='target',
                                     error=_('Syntax Error: %(error)s') %
                                     dict(error=str(e)))

    return a
示例#22
0
            except ValueError, e:
                raise admintool.ScriptError(
                    "Not a valid CA certificate: %s (visit "
                    "http://www.freeipa.org/page/Troubleshooting for "
                    "troubleshooting guide)" % e)

            trust_chain = tmpdb.get_trust_chain('IPA CA')[:-1]
            for nickname in trust_chain:
                try:
                    ca_cert = tmpdb.get_cert(nickname)
                except RuntimeError:
                    break
                certstore.put_ca_cert_nss(self.conn, api.env.basedn, ca_cert,
                                          nickname, ',,')

        dn = DN(('cn', self.cert_nickname), ('cn', 'ca_renewal'),
                ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
        try:
            entry = self.conn.get_entry(dn, ['usercertificate'])
            entry['usercertificate'] = [cert]
            self.conn.update_entry(entry)
        except errors.NotFound:
            entry = self.conn.make_entry(
                dn,
                objectclass=['top', 'pkiuser', 'nscontainer'],
                cn=[self.cert_nickname],
                usercertificate=[cert])
            self.conn.add_entry(entry)
        except errors.EmptyModlist:
            pass

        try:
示例#23
0
文件: aci.py 项目: guanwei/freeipa
def _aci_to_kw(ldap, a, test=False, pkey_only=False):
    """Convert an ACI into its equivalent keywords.

       This is used for the modify operation so we can merge the
       incoming kw and existing ACI and pass the result to
       _make_aci().
    """
    kw = {}
    kw['aciprefix'], kw['aciname'] = _parse_aci_name(a.name)
    if pkey_only:
        return kw
    kw['permissions'] = tuple(a.permissions)
    if 'targetattr' in a.target:
        kw['attrs'] = tuple(unicode(e)
                            for e in a.target['targetattr']['expression'])
    if 'targetfilter' in a.target:
        target = a.target['targetfilter']['expression']
        if target.startswith('(memberOf=') or target.startswith('memberOf='):
            (junk, memberof) = target.split('memberOf=', 1)
            memberof = DN(memberof)
            kw['memberof'] = memberof['cn']
        else:
            kw['filter'] = unicode(target)
    if 'target' in a.target:
        target = a.target['target']['expression']
        found = False
        for k in _type_map.keys():
            if _type_map[k] == target:
                kw['type'] = unicode(k)
                found = True
                break
        if not found:
            if target.startswith('('):
                kw['filter'] = unicode(target)
            else:
                # See if the target is a group. If so we set the
                # targetgroup attr, otherwise we consider it a subtree
                try:
                    targetdn = DN(target.replace('ldap:///',''))
                except ValueError as e:
                    raise errors.ValidationError(name='subtree', error=_("invalid DN (%s)") % e.message)
                if targetdn.endswith(DN(api.env.container_group, api.env.basedn)):
                    kw['targetgroup'] = targetdn[0]['cn']
                else:
                    kw['subtree'] = unicode(target)

    groupdn = a.bindrule['expression']
    groupdn = groupdn.replace('ldap:///','')
    if groupdn == 'self':
        kw['selfaci'] = True
    elif groupdn == 'anyone':
        pass
    else:
        groupdn = DN(groupdn)
        if len(groupdn) and groupdn[0].attr == 'cn':
            dn = DN()
            entry = ldap.make_entry(dn)
            try:
                entry = ldap.get_entry(groupdn, ['cn'])
            except errors.NotFound as e:
                # FIXME, use real name here
                if test:
                    dn = DN(('cn', 'test'), api.env.container_permission,
                            api.env.basedn)
                    entry = ldap.make_entry(dn, {'cn': [u'test']})
            if api.env.container_permission in entry.dn:
                kw['permission'] = entry['cn'][0]
            else:
                if 'cn' in entry:
                    kw['group'] = entry['cn'][0]

    return kw
示例#24
0
文件: aci.py 项目: zhoubh/freeipa
def _aci_to_kw(ldap, a, test=False, pkey_only=False):
    """Convert an ACI into its equivalent keywords.

       This is used for the modify operation so we can merge the
       incoming kw and existing ACI and pass the result to
       _make_aci().
    """
    kw = {}
    kw['aciprefix'], kw['aciname'] = _parse_aci_name(a.name)
    if pkey_only:
        return kw
    kw['permissions'] = tuple(a.permissions)
    if 'targetattr' in a.target:
        kw['attrs'] = tuple(
            unicode(e) for e in a.target['targetattr']['expression'])
    if 'targetfilter' in a.target:
        target = a.target['targetfilter']['expression']
        if target.startswith('(memberOf=') or target.startswith('memberOf='):
            _junk, memberof = target.split('memberOf=', 1)
            memberof = DN(memberof)
            kw['memberof'] = memberof['cn']
        else:
            kw['filter'] = unicode(target)
    if 'target' in a.target:
        target = a.target['target']['expression']
        found = False
        for k, value in _type_map.items():
            if value == target:
                kw['type'] = unicode(k)
                found = True
                break
        if not found:
            if target.startswith('('):
                kw['filter'] = unicode(target)
            else:
                # See if the target is a group. If so we set the
                # targetgroup attr, otherwise we consider it a subtree
                try:
                    targetdn = DN(target.replace('ldap:///', ''))
                except ValueError as e:
                    raise errors.ValidationError(name='subtree',
                                                 error=_("invalid DN (%s)") %
                                                 e)
                if targetdn.endswith(
                        DN(api.env.container_group, api.env.basedn)):
                    kw['targetgroup'] = targetdn[0]['cn']
                else:
                    kw['subtree'] = unicode(target)

    groupdn = a.bindrule['expression']
    groupdn = groupdn.replace('ldap:///', '')
    if groupdn == 'self':
        kw['selfaci'] = True
    elif groupdn == 'anyone':
        pass
    else:
        groupdn = DN(groupdn)
        if len(groupdn) and groupdn[0].attr == 'cn':
            dn = DN()
            entry = ldap.make_entry(dn)
            try:
                entry = ldap.get_entry(groupdn, ['cn'])
            except errors.NotFound as e:
                # FIXME, use real name here
                if test:
                    dn = DN(('cn', 'test'), api.env.container_permission,
                            api.env.basedn)
                    entry = ldap.make_entry(dn, {'cn': [u'test']})
            if api.env.container_permission in entry.dn:
                kw['permission'] = entry['cn'][0]
            else:
                if 'cn' in entry:
                    kw['group'] = entry['cn'][0]

    return kw
示例#25
0
    def import_files(self,
                     files,
                     import_keys=False,
                     key_password=None,
                     key_nickname=None):
        """
        Import certificates and a single private key from multiple files

        The files may be in PEM and DER certificate, PKCS#7 certificate chain,
        PKCS#8 and raw private key and PKCS#12 formats.

        :param files: Names of files to import
        :param import_keys: Whether to import private keys
        :param key_password: Password to decrypt private keys
        :param key_nickname: Nickname of the private key to import from PKCS#12
            files
        """
        key_file = None
        extracted_key = None
        extracted_certs = []

        for filename in files:
            try:
                with open(filename, 'rb') as f:
                    data = f.read()
            except IOError as e:
                raise RuntimeError("Failed to open %s: %s" %
                                   (filename, e.strerror))

            # Try to parse the file as PEM file
            matches = list(
                re.finditer(br'-----BEGIN (.+?)-----(.*?)-----END \1-----',
                            data, re.DOTALL))
            if matches:
                loaded = False
                for match in matches:
                    body = match.group()
                    label = match.group(1)
                    line = len(data[:match.start() + 1].splitlines())

                    if label in (b'CERTIFICATE', b'X509 CERTIFICATE',
                                 b'X.509 CERTIFICATE'):
                        try:
                            cert = x509.load_pem_x509_certificate(body)
                        except ValueError as e:
                            if label != b'CERTIFICATE':
                                logger.warning(
                                    "Skipping certificate in %s at line %s: "
                                    "%s", filename, line, e)
                                continue
                        else:
                            extracted_certs.append(cert)
                            loaded = True
                            continue

                    if label in (b'PKCS7', b'PKCS #7 SIGNED DATA',
                                 b'CERTIFICATE'):
                        try:
                            certs = x509.pkcs7_to_certs(body)
                        except ipautil.CalledProcessError as e:
                            if label == b'CERTIFICATE':
                                logger.warning(
                                    "Skipping certificate in %s at line %s: "
                                    "%s", filename, line, e)
                            else:
                                logger.warning(
                                    "Skipping PKCS#7 in %s at line %s: %s",
                                    filename, line, e)
                            continue
                        else:
                            extracted_certs.extend(certs)
                            loaded = True
                            continue

                    if label in (b'PRIVATE KEY', b'ENCRYPTED PRIVATE KEY',
                                 b'RSA PRIVATE KEY', b'DSA PRIVATE KEY',
                                 b'EC PRIVATE KEY'):
                        if not import_keys:
                            continue

                        if key_file:
                            raise RuntimeError(
                                "Can't load private key from both %s and %s" %
                                (key_file, filename))

                        # the args -v2 aes256 -v2prf hmacWithSHA256 are needed
                        # on OpenSSL 1.0.2 (fips mode). As soon as FreeIPA
                        # requires OpenSSL 1.1.0 we'll be able to drop them
                        args = [
                            paths.OPENSSL,
                            'pkcs8',
                            '-topk8',
                            '-v2',
                            'aes256',
                            '-v2prf',
                            'hmacWithSHA256',
                            '-passout',
                            'file:' + self.pwd_file,
                        ]
                        if ((label != b'PRIVATE KEY' and key_password)
                                or label == b'ENCRYPTED PRIVATE KEY'):
                            key_pwdfile = ipautil.write_tmp_file(key_password)
                            args += [
                                '-passin',
                                'file:' + key_pwdfile.name,
                            ]
                        try:
                            result = ipautil.run(args,
                                                 stdin=body,
                                                 capture_output=True)
                        except ipautil.CalledProcessError as e:
                            logger.warning(
                                "Skipping private key in %s at line %s: %s",
                                filename, line, e)
                            continue
                        else:
                            extracted_key = result.raw_output
                            key_file = filename
                            loaded = True
                            continue
                if loaded:
                    continue
                raise RuntimeError("Failed to load %s" % filename)

            # Try to load the file as DER certificate
            try:
                cert = x509.load_der_x509_certificate(data)
            except ValueError:
                pass
            else:
                extracted_certs.append(cert)
                continue

            # Try to import the file as PKCS#12 file
            if import_keys:
                try:
                    self.import_pkcs12(filename, key_password)
                except RuntimeError:
                    pass
                else:
                    if key_file:
                        raise RuntimeError(
                            "Can't load private key from both %s and %s" %
                            (key_file, filename))
                    key_file = filename

                    server_certs = self.find_server_certs()
                    if key_nickname:
                        for nickname, _trust_flags in server_certs:
                            if nickname == key_nickname:
                                break
                        else:
                            raise RuntimeError(
                                "Server certificate \"%s\" not found in %s" %
                                (key_nickname, filename))
                    else:
                        if len(server_certs) > 1:
                            raise RuntimeError(
                                "%s server certificates found in %s, "
                                "expecting only one" %
                                (len(server_certs), filename))

                    continue

            raise RuntimeError("Failed to load %s" % filename)

        if import_keys and not key_file:
            raise RuntimeError("No server certificates found in %s" %
                               (', '.join(files)))

        for cert in extracted_certs:
            nickname = str(DN(cert.subject))
            self.add_cert(cert, nickname, EMPTY_TRUST_FLAGS)

        if extracted_key:
            with tempfile.NamedTemporaryFile() as in_file, \
                    tempfile.NamedTemporaryFile() as out_file:
                for cert in extracted_certs:
                    in_file.write(cert.public_bytes(x509.Encoding.PEM))
                in_file.write(extracted_key)
                in_file.flush()
                out_password = ipautil.ipa_generate_password()
                out_pwdfile = ipautil.write_tmp_file(out_password)
                args = [
                    paths.OPENSSL,
                    'pkcs12',
                    '-export',
                    '-in',
                    in_file.name,
                    '-out',
                    out_file.name,
                    '-passin',
                    'file:' + self.pwd_file,
                    '-passout',
                    'file:' + out_pwdfile.name,
                ]
                try:
                    ipautil.run(args)
                except ipautil.CalledProcessError as e:
                    raise RuntimeError(
                        "No matching certificate found for private key from "
                        "%s" % key_file)

                self.import_pkcs12(out_file.name, out_password)
示例#26
0
文件: aci.py 项目: zhoubh/freeipa
    def execute(self, term=None, **kw):
        ldap = self.api.Backend.ldap2

        entry = ldap.get_entry(self.api.env.basedn, ['aci'])

        acis = _convert_strings_to_acis(entry.get('aci', []))
        results = []

        if term:
            term = term.lower()
            for a in acis:
                if a.name.lower().find(term) != -1 and a not in results:
                    results.append(a)
            acis = list(results)
        else:
            results = list(acis)

        if kw.get('aciname'):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if name != kw['aciname']:
                    results.remove(a)
            acis = list(results)

        if kw.get('aciprefix'):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if prefix != kw['aciprefix']:
                    results.remove(a)
            acis = list(results)

        if kw.get('attrs'):
            for a in acis:
                if not 'targetattr' in a.target:
                    results.remove(a)
                    continue
                alist1 = sorted(
                    [t.lower() for t in a.target['targetattr']['expression']])
                alist2 = sorted([t.lower() for t in kw['attrs']])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get('permission'):
            try:
                self.api.Command['permission_show'](kw['permission'])
            except errors.NotFound:
                pass
            else:
                for a in acis:
                    uri = 'ldap:///%s' % entry.dn
                    if a.bindrule['expression'] != uri:
                        results.remove(a)
                acis = list(results)

        if kw.get('permissions'):
            for a in acis:
                alist1 = sorted(a.permissions)
                alist2 = sorted(kw['permissions'])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get('memberof'):
            try:
                dn = _group_from_memberof(kw['memberof'])
            except errors.NotFound:
                pass
            else:
                memberof_filter = '(memberOf=%s)' % dn
                for a in acis:
                    if 'targetfilter' in a.target:
                        targetfilter = a.target['targetfilter']['expression']
                        if targetfilter != memberof_filter:
                            results.remove(a)
                    else:
                        results.remove(a)

        if kw.get('type'):
            for a in acis:
                if 'target' in a.target:
                    target = a.target['target']['expression']
                else:
                    results.remove(a)
                    continue
                found = False
                for k, value in _type_map.items():
                    if value == target and kw['type'] == k:
                        found = True
                        break
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('selfaci', False) is True:
            for a in acis:
                if a.bindrule['expression'] != u'ldap:///self':
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('group'):
            for a in acis:
                groupdn = a.bindrule['expression']
                groupdn = DN(groupdn.replace('ldap:///', ''))
                try:
                    cn = groupdn[0]['cn']
                except (IndexError, KeyError):
                    cn = None
                if cn is None or cn != kw['group']:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('targetgroup'):
            for a in acis:
                found = False
                if 'target' in a.target:
                    target = a.target['target']['expression']
                    targetdn = DN(target.replace('ldap:///', ''))
                    group_container_dn = DN(api.env.container_group,
                                            api.env.basedn)
                    if targetdn.endswith(group_container_dn):
                        try:
                            cn = targetdn[0]['cn']
                        except (IndexError, KeyError):
                            cn = None
                        if cn == kw['targetgroup']:
                            found = True
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('filter'):
            if not kw['filter'].startswith('('):
                kw['filter'] = unicode('(' + kw['filter'] + ')')
            for a in acis:
                if 'targetfilter' not in a.target or\
                    not a.target['targetfilter']['expression'] or\
                    a.target['targetfilter']['expression'] != kw['filter']:
                    results.remove(a)

        if kw.get('subtree'):
            for a in acis:
                if 'target' in a.target:
                    target = a.target['target']['expression']
                else:
                    results.remove(a)
                    continue
                if kw['subtree'].lower() != target.lower():
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        acis = []
        for result in results:
            if kw.get('raw', False):
                aci = dict(aci=unicode(result))
            else:
                aci = _aci_to_kw(ldap,
                                 result,
                                 pkey_only=kw.get('pkey_only', False))
            acis.append(aci)

        return dict(
            result=acis,
            count=len(acis),
            truncated=False,
        )
示例#27
0
 def update_pki_admin_password(self):
     dn = DN('uid=admin', 'ou=people', 'o=ipaca')
     api.Backend.ldap2.modify_password(dn, self.dirman_password)
示例#28
0
"""
Test adding/removing external members (trusted domain objects) to IPA groups.
These tests are skipped if trust is not established.
"""

import nose
from ipalib import api
from ipapython.dn import DN
from ipatests.test_xmlrpc import objectclasses
from ipatests.test_xmlrpc.xmlrpc_test import (Declarative, fuzzy_uuid,
                                              fuzzy_user_or_group_sid)
import pytest

group_name = u'external_group'
group_desc = u'Test external group'
group_dn = DN(('cn', group_name), api.env.container_group, api.env.basedn)


def get_trusted_group_name():
    trusts = api.Command['trust_find']()
    if trusts['count'] == 0:
        return None

    ad_netbios = trusts['result'][0]['ipantflatname']
    return u'%s\Domain Admins' % ad_netbios


@pytest.mark.tier1
class test_external_members(Declarative):
    @classmethod
    def setup_class(cls):
示例#29
0
    def ipacheckldap(self, thost, trealm, ca_cert_path=None):
        """
        Given a host and kerberos realm verify that it is an IPA LDAP
        server hosting the realm.

        Returns a list [errno, host, realm] or an empty list on error.
        Errno is an error number:
            0 means all ok
            negative number means something went wrong
        """
        if ipaldap is None:
            return [PYTHON_LDAP_NOT_INSTALLED]

        lrealms = []

        # now verify the server is really an IPA server
        try:
            ldap_uri = ipaldap.get_ldap_uri(thost)
            start_tls = False
            if ca_cert_path:
                start_tls = True
            logger.debug("Init LDAP connection to: %s", ldap_uri)
            lh = ipaldap.LDAPClient(ldap_uri,
                                    cacert=ca_cert_path,
                                    start_tls=start_tls,
                                    no_schema=True,
                                    decode_attrs=False)
            try:
                lh.simple_bind(DN(), '')

                # get IPA base DN
                logger.debug("Search LDAP server for IPA base DN")
                basedn = get_ipa_basedn(lh)
            except errors.ACIError:
                logger.debug("LDAP Error: Anonymous access not allowed")
                return [NO_ACCESS_TO_LDAP]
            except errors.DatabaseError as err:
                logger.error("Error checking LDAP: %s", err.strerror)
                # We should only get UNWILLING_TO_PERFORM if the remote LDAP
                # server has minssf > 0 and we have attempted a non-TLS conn.
                if ca_cert_path is None:
                    logger.debug(
                        "Cannot connect to LDAP server. Check that minssf is "
                        "not enabled")
                    return [NO_TLS_LDAP]
                else:
                    return [UNKNOWN_ERROR]

            if basedn is None:
                logger.debug("The server is not an IPA server")
                return [NOT_IPA_SERVER]

            self.basedn = basedn
            self.basedn_source = 'From IPA server %s' % lh.ldap_uri

            # search and return known realms
            logger.debug(
                "Search for (objectClass=krbRealmContainer) in %s (sub)",
                self.basedn)
            try:
                lret = lh.get_entries(DN(('cn', 'kerberos'),
                                         self.basedn), lh.SCOPE_SUBTREE,
                                      "(objectClass=krbRealmContainer)")
            except errors.NotFound:
                # something very wrong
                return [REALM_NOT_FOUND]

            for lres in lret:
                logger.debug("Found: %s", lres.dn)
                [cn] = lres.raw['cn']
                if six.PY3:
                    cn = cn.decode('utf-8')
                lrealms.append(cn)

            if trealm:
                for r in lrealms:
                    if trealm == r:
                        return [SUCCESS, thost, trealm]
                # must match or something is very wrong
                logger.debug(
                    "Realm %s does not match any realm in LDAP "
                    "database", trealm)
                return [REALM_NOT_FOUND]
            else:
                if len(lrealms) != 1:
                    # which one? we can't attach to a multi-realm server
                    # without DNS working
                    logger.debug(
                        "Multiple realms found, cannot decide which realm "
                        "is the correct realm without working DNS")
                    return [REALM_NOT_FOUND]
                else:
                    return [SUCCESS, thost, lrealms[0]]

            # we shouldn't get here
            assert False, "Unknown error in ipadiscovery"

        except errors.DatabaseTimeout:
            logger.debug("LDAP Error: timeout")
            return [NO_LDAP_SERVER]
        except errors.NetworkError as err:
            logger.debug("LDAP Error: %s", err.strerror)
            return [NO_LDAP_SERVER]
        except errors.ACIError:
            logger.debug("LDAP Error: Anonymous access not allowed")
            return [NO_ACCESS_TO_LDAP]
        except errors.DatabaseError as err:
            logger.debug("Error checking LDAP: %s", err.strerror)
            return [UNKNOWN_ERROR]
        except Exception as err:
            logger.debug("Error checking LDAP: %s", err)

            return [UNKNOWN_ERROR]
示例#30
0
 def get_realm_suffix(self):
     return DN(('cn', self.realm), ('cn', 'kerberos'), self.suffix)
示例#31
0
    def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
        # Fill in a default UUID when not specified.
        if entry_attrs.get('ipatokenuniqueid', None) is None:
            entry_attrs['ipatokenuniqueid'] = str(uuid.uuid4())
            dn = DN("ipatokenuniqueid=%s" % entry_attrs['ipatokenuniqueid'], dn)

        if not _check_interval(options.get('ipatokennotbefore', None),
                               options.get('ipatokennotafter', None)):
            raise ValidationError(name='not_after',
                                  error='is before the validity start')

        # Set the object class and defaults for specific token types
        options['type'] = options['type'].lower()
        entry_attrs['objectclass'] = otptoken.object_class + ['ipatoken' + options['type']]
        for ttype, tattrs in TOKEN_TYPES.items():
            if ttype != options['type']:
                for tattr in tattrs:
                    if tattr in entry_attrs:
                        del entry_attrs[tattr]

        # If owner was not specified, default to the person adding this token.
        # If managedby was not specified, attempt a sensible default.
        if 'ipatokenowner' not in entry_attrs or 'managedby' not in entry_attrs:
            cur_dn = DN(self.api.Backend.ldap2.conn.whoami_s()[4:])
            if cur_dn:
                cur_uid = cur_dn[0].value
                prev_uid = entry_attrs.setdefault('ipatokenowner', cur_uid)
                if cur_uid == prev_uid:
                    entry_attrs.setdefault('managedby', cur_dn.ldap_text())

        # Resolve the owner's dn
        _normalize_owner(self.api.Object.user, entry_attrs)

        # Get the issuer for the URI
        owner = entry_attrs.get('ipatokenowner', None)
        issuer = api.env.realm
        if owner is not None:
            try:
                issuer = ldap.get_entry(owner, ['krbprincipalname'])['krbprincipalname'][0]
            except (NotFound, IndexError):
                pass

        # Check if key is not empty
        if entry_attrs['ipatokenotpkey'] is None:
            raise ValidationError(name='key', error=_(u'cannot be empty'))

        # Build the URI parameters
        args = {}
        args['issuer'] = issuer
        args['secret'] = base64.b32encode(entry_attrs['ipatokenotpkey'])
        args['digits'] = entry_attrs['ipatokenotpdigits']
        args['algorithm'] = entry_attrs['ipatokenotpalgorithm'].upper()
        if options['type'] == 'totp':
            args['period'] = entry_attrs['ipatokentotptimestep']
        elif options['type'] == 'hotp':
            args['counter'] = entry_attrs['ipatokenhotpcounter']

        # Build the URI
        label = urllib.parse.quote(entry_attrs['ipatokenuniqueid'])
        parameters = urllib.parse.urlencode(args)
        uri = u'otpauth://%s/%s:%s?%s' % (options['type'], issuer, label, parameters)
        setattr(context, 'uri', uri)

        attrs_list.append("objectclass")
        return dn
示例#32
0
# This is a tuple instead of a dict so that it is immutable.
# To create a dict with this config, just "d = dict(DEFAULT_CONFIG)".
DEFAULT_CONFIG = (
    ('api_version', API_VERSION),
    ('version', VERSION),

    # Domain, realm, basedn:
    # Following values do not have any reasonable default.
    # Do not initialize them so the code which depends on them blows up early
    # and does not do crazy stuff with default values instead of real ones.
    # ('domain', 'example.com'),
    # ('realm', 'EXAMPLE.COM'),
    # ('basedn', DN(('dc', 'example'), ('dc', 'com'))),

    # LDAP containers:
    ('container_accounts', DN(('cn', 'accounts'))),
    ('container_user', DN(('cn', 'users'), ('cn', 'accounts'))),
    ('container_deleteuser',
     DN(('cn', 'deleted users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
    ('container_stageuser',
     DN(('cn', 'staged users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
    ('container_group', DN(('cn', 'groups'), ('cn', 'accounts'))),
    ('container_service', DN(('cn', 'services'), ('cn', 'accounts'))),
    ('container_host', DN(('cn', 'computers'), ('cn', 'accounts'))),
    ('container_hostgroup', DN(('cn', 'hostgroups'), ('cn', 'accounts'))),
    ('container_rolegroup', DN(('cn', 'roles'), ('cn', 'accounts'))),
    ('container_permission', DN(('cn', 'permissions'), ('cn', 'pbac'))),
    ('container_privilege', DN(('cn', 'privileges'), ('cn', 'pbac'))),
    ('container_automount', DN(('cn', 'automount'))),
    ('container_policies', DN(('cn', 'policies'))),
    ('container_configs', DN(('cn', 'configs'), ('cn', 'policies'))),
示例#33
0
class TestDN(unittest.TestCase):
    def setUp(self):
        # ava1 must sort before ava2
        self.attr1    = 'cn'
        self.value1   = u'Bob'
        self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
        self.ava1     = AVA(self.attr1, self.value1)

        self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
        self.rdn1     = RDN((self.attr1, self.value1))

        self.attr2    = 'ou'
        self.value2   = u'people'
        self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
        self.ava2     = AVA(self.attr2, self.value2)

        self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
        self.rdn2     = RDN((self.attr2, self.value2))

        self.str_dn1 = self.str_rdn1
        self.dn1 = DN(self.rdn1)

        self.str_dn2 = self.str_rdn2
        self.dn2 = DN(self.rdn2)

        self.str_dn3 = '%s,%s' % (self.str_rdn1, self.str_rdn2)
        self.dn3 = DN(self.rdn1, self.rdn2)

        self.base_rdn1 = RDN(('dc', 'redhat'))
        self.base_rdn2 = RDN(('dc', 'com'))
        self.base_dn = DN(self.base_rdn1, self.base_rdn2)

        self.container_rdn1 = RDN(('cn', 'sudorules'))
        self.container_rdn2 = RDN(('cn', 'sudo'))
        self.container_dn = DN(self.container_rdn1, self.container_rdn2)

        self.base_container_dn = DN((self.attr1, self.value1),
                                    self.container_dn, self.base_dn)

        self.x500name = x509.Name([
            x509.NameAttribute(
                x509.NameOID.ORGANIZATIONAL_UNIT_NAME, self.value2),
            x509.NameAttribute(x509.NameOID.COMMON_NAME, self.value1),
        ])

    def assertExpectedClass(self, klass, obj, component):
        self.assertIs(obj.__class__, expected_class(klass, component))

    def test_create(self):
        # Create with single attr,value pair
        dn1 = DN((self.attr1, self.value1))
        self.assertEqual(len(dn1), 1)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
        self.assertIsInstance(dn1[0].attr, unicode)
        self.assertIsInstance(dn1[0].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)

        # Create with single attr,value pair passed as a tuple
        dn1 = DN((self.attr1, self.value1))
        self.assertEqual(len(dn1), 1)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)

        # Creation with multiple attr,value string pairs should fail
        with self.assertRaises(ValueError):
            dn1 = DN(self.attr1, self.value1, self.attr2, self.value2)

        # Create with multiple attr,value pairs passed as tuples & lists
        dn1 = DN((self.attr1, self.value1), [self.attr2, self.value2])
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[1], self.rdn2)

        # Create with multiple attr,value pairs passed as tuple and RDN
        dn1 = DN((self.attr1, self.value1), RDN((self.attr2, self.value2)))
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[1], self.rdn2)

        # Create with multiple attr,value pairs but reverse
        # constructor parameter ordering. RDN ordering should also be
        # reversed because DN's are a ordered sequence of RDN's
        dn1 = DN((self.attr2, self.value2), (self.attr1, self.value1))
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn2)
        self.assertEqual(dn1[1], self.rdn1)

        # Create with single RDN object
        dn1 = DN(self.rdn1)
        self.assertEqual(len(dn1), 1)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)

        # Create with multiple RDN objects, assure ordering is preserved.
        dn1 = DN(self.rdn1, self.rdn2)
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[1], self.rdn2)

        # Create with multiple RDN objects in different order, assure
        # ordering is preserved.
        dn1 = DN(self.rdn2, self.rdn1)
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn2)
        self.assertEqual(dn1[1], self.rdn1)

        # Create with single string with 1 RDN
        dn1 = DN(self.str_rdn1)
        self.assertEqual(len(dn1), 1)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)

        # Create with single string with 2 RDN's
        dn1 = DN(self.str_dn3)
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[1], self.rdn2)

        # Create with a python-cryptography 'Name'
        dn1 = DN(self.x500name)
        self.assertEqual(len(dn1), 2)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')
            self.assertIsInstance(dn1[i].attr, unicode)
            self.assertIsInstance(dn1[i].value, unicode)
        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[1], self.rdn2)

        # Create with RDN, and 2 DN's (e.g. attr + container + base)
        dn1 = DN((self.attr1, self.value1), self.container_dn, self.base_dn)
        self.assertEqual(len(dn1), 5)
        dn_str = ','.join([str(self.rdn1),
                            str(self.container_rdn1), str(self.container_rdn2),
                            str(self.base_rdn1), str(self.base_rdn2)])
        self.assertEqual(str(dn1), dn_str)

    def test_str(self):
        dn1 = DN(self.dn1)
        dn2 = DN(self.dn2)
        dn3 = DN(self.dn3)

        self.assertEqual(str(dn1), self.str_dn1)
        self.assertIsInstance(str(dn1), str)

        self.assertEqual(str(dn2), self.str_dn2)
        self.assertIsInstance(str(dn2), str)

        self.assertEqual(str(dn3), self.str_dn3)
        self.assertIsInstance(str(dn3), str)

    def test_cmp(self):
        # Equality
        dn1 = DN((self.attr1, self.value1))

        self.assertTrue(dn1 == self.dn1)
        self.assertFalse(dn1 != self.dn1)

        self.assertTrue(dn1 == self.str_dn1)
        self.assertFalse(dn1 != self.str_dn1)

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 0)

        # Make dn1's attr greater
        with self.assertRaises(AttributeError):
            dn1[0].attr = self.attr1 + "1"
        dn1 = DN((self.attr1 + "1", self.value1))

        self.assertFalse(dn1 == self.dn1)
        self.assertTrue(dn1 != self.dn1)

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 1)

        result = cmp(self.dn1, dn1)
        self.assertEqual(result, -1)

        # Reset dn1's attr, should be equal again
        with self.assertRaises(AttributeError):
            dn1[0].attr = self.attr1
        dn1 = DN((self.attr1, self.value1))

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 0)

        # Make dn1's value greater
        # attr will be equal, this tests secondary comparision component
        with self.assertRaises(AttributeError):
            dn1[0].value = self.value1 + "1"
        dn1 = DN((self.attr1, self.value1 + "1"))

        result = cmp(dn1, self.dn1)
        self.assertEqual(result, 1)

        result = cmp(self.dn1, dn1)
        self.assertEqual(result, -1)

        # Make sure dn's with more rdn's are greater
        result = cmp(self.dn1, self.dn3)
        self.assertEqual(result, -1)
        result = cmp(self.dn3, self.dn1)
        self.assertEqual(result, 1)


        # Test startswith, endswith
        container_dn = DN(self.container_dn)
        base_container_dn = DN(self.base_container_dn)

        self.assertTrue(base_container_dn.startswith(self.rdn1))
        self.assertTrue(base_container_dn.startswith(self.dn1))
        self.assertTrue(base_container_dn.startswith(self.dn1 + container_dn))
        self.assertFalse(base_container_dn.startswith(self.dn2))
        self.assertFalse(base_container_dn.startswith(self.rdn2))
        self.assertTrue(base_container_dn.startswith((self.dn1)))
        self.assertTrue(base_container_dn.startswith((self.rdn1)))
        self.assertFalse(base_container_dn.startswith((self.rdn2)))
        self.assertTrue(base_container_dn.startswith((self.rdn2, self.rdn1)))
        self.assertTrue(base_container_dn.startswith((self.dn1, self.dn2)))

        self.assertTrue(base_container_dn.endswith(self.base_dn))
        self.assertTrue(base_container_dn.endswith(container_dn + self.base_dn))
        self.assertFalse(base_container_dn.endswith(DN(self.base_rdn1)))
        self.assertTrue(base_container_dn.endswith(DN(self.base_rdn2)))
        self.assertTrue(base_container_dn.endswith((DN(self.base_rdn1), DN(self.base_rdn2))))

        # Test "in" membership
        self.assertTrue(self.container_rdn1 in container_dn)
        self.assertTrue(container_dn in container_dn)
        self.assertFalse(self.base_rdn1 in container_dn)

        self.assertTrue(self.container_rdn1 in base_container_dn)
        self.assertTrue(container_dn in base_container_dn)
        self.assertTrue(container_dn + self.base_dn in
                        base_container_dn)
        self.assertTrue(self.dn1 + container_dn + self.base_dn in
                        base_container_dn)
        self.assertTrue(self.dn1 + container_dn + self.base_dn ==
                        base_container_dn)

        self.assertFalse(self.container_rdn1 in self.base_dn)

    def test_indexing(self):
        dn1 = DN(self.dn1)
        dn2 = DN(self.dn2)
        dn3 = DN(self.dn3)

        self.assertEqual(dn1[0], self.rdn1)
        self.assertEqual(dn1[self.rdn1.attr], self.rdn1.value)
        with self.assertRaises(KeyError):
            dn1['foo']  # pylint: disable=pointless-statement

        self.assertEqual(dn2[0], self.rdn2)
        self.assertEqual(dn2[self.rdn2.attr], self.rdn2.value)
        with self.assertRaises(KeyError):
            dn2['foo']  # pylint: disable=pointless-statement

        self.assertEqual(dn3[0], self.rdn1)
        self.assertEqual(dn3[self.rdn1.attr], self.rdn1.value)
        self.assertEqual(dn3[1], self.rdn2)
        self.assertEqual(dn3[self.rdn2.attr], self.rdn2.value)
        with self.assertRaises(KeyError):
            dn3['foo']  # pylint: disable=pointless-statement

        with self.assertRaises(TypeError):
            dn3[1.0]  # pylint: disable=pointless-statement

    def test_assignments(self):
        dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
        with self.assertRaises(TypeError):
            # pylint: disable=unsupported-assignment-operation
            dn[0] = RDN('t=a')
        with self.assertRaises(TypeError):
            # pylint: disable=unsupported-assignment-operation
            dn[0:1] = [RDN('t=a'), RDN('t=b')]

    def test_iter(self):
        dn1 = DN(self.dn1)
        dn2 = DN(self.dn2)
        dn3 = DN(self.dn3)

        self.assertEqual(len(dn1), 1)
        self.assertEqual(dn1[:], self.rdn1)
        for i, ava in enumerate(dn1):
            if i == 0:
                self.assertEqual(ava, self.rdn1)
            else:
                self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn1)))

        self.assertEqual(len(dn2), 1)
        self.assertEqual(dn2[:], self.rdn2)
        for i, ava in enumerate(dn2):
            if i == 0:
                self.assertEqual(ava, self.rdn2)
            else:
                self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn2)))

        self.assertEqual(len(dn3), 2)
        self.assertEqual(dn3[:], DN(self.rdn1, self.rdn2))
        for i, ava in enumerate(dn3):
            if i == 0:
                self.assertEqual(ava, self.rdn1)
            elif i == 1:
                self.assertEqual(ava, self.rdn2)
            else:
                self.fail("got iteration index %d, but len=%d" % (i, len(dn3)))

    def test_concat(self):
        dn1 = DN((self.attr1, self.value1))
        dn2 = DN([self.attr2, self.value2])

        # in-place addtion

        dn1 += dn2
        self.assertEqual(dn1, self.dn3)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')


        dn1 = DN((self.attr1, self.value1))
        dn1 += self.rdn2
        self.assertEqual(dn1, self.dn3)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')


        dn1 = DN((self.attr1, self.value1))
        dn1 += self.dn2
        self.assertEqual(dn1, self.dn3)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')


        dn1 = DN((self.attr1, self.value1))
        dn1 += self.str_dn2
        self.assertEqual(dn1, self.dn3)
        self.assertExpectedClass(DN, dn1, 'self')
        for i in range(0, len(dn1)):
            self.assertExpectedClass(DN, dn1[i], 'RDN')
            for j in range(0, len(dn1[i])):
                self.assertExpectedClass(DN, dn1[i][j], 'AVA')


        # concatenation
        dn1 = DN((self.attr1, self.value1))
        dn3 = dn1 + dn2
        self.assertEqual(dn3, self.dn3)
        self.assertExpectedClass(DN, dn3, 'self')
        for i in range(0, len(dn3)):
            self.assertExpectedClass(DN, dn3[i], 'RDN')
            for j in range(0, len(dn3[i])):
                self.assertExpectedClass(DN, dn3[i][j], 'AVA')


        dn1 = DN((self.attr1, self.value1))
        dn3 = dn1 + self.rdn2
        self.assertEqual(dn3, self.dn3)
        self.assertExpectedClass(DN, dn3, 'self')
        for i in range(0, len(dn3)):
            self.assertExpectedClass(DN, dn3[i], 'RDN')
            for j in range(0, len(dn3[i])):
                self.assertExpectedClass(DN, dn3[i][j], 'AVA')

        dn3 = dn1 + self.str_rdn2
        self.assertEqual(dn3, self.dn3)
        self.assertExpectedClass(DN, dn3, 'self')
        for i in range(0, len(dn3)):
            self.assertExpectedClass(DN, dn3[i], 'RDN')
            self.assertExpectedClass(DN, dn3[i][0], 'AVA')

        dn3 = dn1 + self.str_dn2
        self.assertEqual(dn3, self.dn3)
        self.assertExpectedClass(DN, dn3, 'self')
        self.assertExpectedClass(DN, dn3, 'self')
        for i in range(0, len(dn3)):
            self.assertExpectedClass(DN, dn3[i], 'RDN')
            for j in range(0, len(dn3[i])):
                self.assertExpectedClass(DN, dn3[i][j], 'AVA')

        dn3 = dn1 + self.dn2
        self.assertEqual(dn3, self.dn3)
        self.assertExpectedClass(DN, dn3, 'self')
        self.assertExpectedClass(DN, dn3, 'self')
        for i in range(0, len(dn3)):
            self.assertExpectedClass(DN, dn3[i], 'RDN')
            for j in range(0, len(dn3[i])):
                self.assertExpectedClass(DN, dn3[i][j], 'AVA')

    def test_find(self):
        #        -10 -9  -8     -7  -6  -5  -4     -3  -2  -1
        dn = DN('t=0,t=1,cn=bob,t=3,t=4,t=5,cn=bob,t=7,t=8,t=9')
        pat = DN('cn=bob')

        # forward
        self.assertEqual(dn.find(pat),          2)
        self.assertEqual(dn.find(pat,  1),      2)
        self.assertEqual(dn.find(pat,  1,  3),  2)
        self.assertEqual(dn.find(pat,  2,  3),  2)
        self.assertEqual(dn.find(pat,  6),      6)

        self.assertEqual(dn.find(pat,  7),     -1)
        self.assertEqual(dn.find(pat,  1,  2), -1)

        with self.assertRaises(ValueError):
            self.assertEqual(dn.index(pat,  7),     -1)
        with self.assertRaises(ValueError):
            self.assertEqual(dn.index(pat,  1,  2), -1)

        # reverse
        self.assertEqual(dn.rfind(pat),          6)
        self.assertEqual(dn.rfind(pat, -4),      6)
        self.assertEqual(dn.rfind(pat,  6),      6)
        self.assertEqual(dn.rfind(pat,  6,  8),  6)
        self.assertEqual(dn.rfind(pat,  6,  8),  6)
        self.assertEqual(dn.rfind(pat, -8),      6)
        self.assertEqual(dn.rfind(pat, -8, -4),  6)
        self.assertEqual(dn.rfind(pat, -8, -5),  2)

        self.assertEqual(dn.rfind(pat,  7),     -1)
        self.assertEqual(dn.rfind(pat, -3),     -1)

        with self.assertRaises(ValueError):
            self.assertEqual(dn.rindex(pat,  7),     -1)
        with self.assertRaises(ValueError):
            self.assertEqual(dn.rindex(pat, -3),     -1)

    def test_replace(self):
        # pylint: disable=no-member
        dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
        with self.assertRaises(AttributeError):
            dn.replace  # pylint: disable=pointless-statement

    def test_hashing(self):
        # create DN's that are equal but differ in case
        dn1 = DN((self.attr1.lower(), self.value1.upper()))
        dn2 = DN((self.attr1.upper(), self.value1.lower()))

        # DNs that are equal should hash to the same value.
        self.assertEqual(dn1, dn2)

        # Good, everyone's equal, now verify their hash values

        self.assertEqual(hash(dn1), hash(dn2))

        # Different DN objects with the same value should
        # map to 1 common key and 1 member in a set. The key and
        # member are based on the object's value.

        dn1_a = DN(self.dn1)
        dn1_b = DN(self.dn1)

        dn2_a = DN(self.dn2)
        dn2_b = DN(self.dn2)

        dn3_a = DN(self.dn3)
        dn3_b = DN(self.dn3)

        self.assertEqual(dn1_a, dn1_b)
        self.assertEqual(dn2_a, dn2_b)
        self.assertEqual(dn3_a, dn3_b)

        d = dict()
        s = set()

        d[dn1_a] = str(dn1_a)
        d[dn1_b] = str(dn1_b)
        d[dn2_a] = str(dn2_a)
        d[dn2_b] = str(dn2_b)

        s.add(dn1_a)
        s.add(dn1_b)
        s.add(dn2_a)
        s.add(dn2_b)

        self.assertEqual(len(d), 2)
        self.assertEqual(len(s), 2)
        self.assertEqual(sorted(d), sorted([dn1_a, dn2_a]))
        self.assertEqual(sorted(s), sorted([dn1_a, dn2_a]))

        self.assertTrue(dn1_a in d)
        self.assertTrue(dn1_b in d)
        self.assertTrue(dn2_a in d)
        self.assertTrue(dn2_b in d)
        self.assertFalse(dn3_a in d)
        self.assertFalse(dn3_b in d)

        self.assertTrue(dn1_a in s)
        self.assertTrue(dn1_b in s)
        self.assertTrue(dn2_a in s)
        self.assertTrue(dn2_b in s)
        self.assertFalse(dn3_a in s)
        self.assertFalse(dn3_b in s)

    def test_x500_text(self):
        # null DN x500 ordering and LDAP ordering are the same
        nulldn = DN()
        self.assertEqual(nulldn.ldap_text(), nulldn.x500_text())

        # reverse a DN with a single RDN
        self.assertEqual(self.dn1.ldap_text(), self.dn1.x500_text())

        # reverse a DN with 2 RDNs
        dn3_x500 = self.dn3.x500_text()
        dn3_rev = DN(self.rdn2, self.rdn1)
        self.assertEqual(dn3_rev.ldap_text(), dn3_x500)

        # reverse a longer DN
        longdn_x500 = self.base_container_dn.x500_text()
        longdn_rev = DN(longdn_x500)
        l = len(self.base_container_dn)
        for i in range(l):
            self.assertEqual(longdn_rev[i], self.base_container_dn[l-1-i])
示例#34
0
文件: tasks.py 项目: zpytela/freeipa
    def write_ca_certificates_dir(self, directory, ca_certs):
        # pylint: disable=ipa-forbidden-import
        from ipalib import x509  # FixMe: break import cycle
        # pylint: enable=ipa-forbidden-import

        path = Path(directory)
        try:
            path.mkdir(mode=0o755, exist_ok=True)
        except Exception:
            logger.error("Could not create %s", path)
            raise

        for cert, nickname, trusted, _ext_key_usage in ca_certs:
            if not trusted:
                continue

            # I'm not handling errors here because they have already
            # been checked by the time we get here
            subject = DN(cert.subject)
            issuer = DN(cert.issuer)

            # Construct the certificate filename using the Subject DN so that
            # the user can see which CA a particular file is for, and include
            # the serial number to disambiguate clashes where a subordinate CA
            # had a new certificate issued.
            #
            # Strictly speaking, certificates are uniquely idenified by (Issuer
            # DN, Serial Number). Do we care about the possibility of a clash
            # where a subordinate CA had two certificates issued by different
            # CAs who used the same serial number?)
            filename = f'{subject.ldap_text()} {cert.serial_number}.crt'

            # pylint: disable=old-division
            cert_path = path / filename
            # pylint: enable=old-division
            try:
                f = open(cert_path, 'w')
            except Exception:
                logger.error("Could not create %s", cert_path)
                raise

            with f:
                try:
                    os.fchmod(f.fileno(), 0o644)
                except Exception:
                    logger.error("Could not set mode of %s", cert_path)
                    raise

                try:
                    f.write(f"""\
This file was created by IPA. Do not edit.

Description: {nickname}
Subject: {subject.ldap_text()}
Issuer: {issuer.ldap_text()}
Serial Number (dec): {cert.serial_number}
Serial Number (hex): {cert.serial_number:#x}

""")
                    pem = cert.public_bytes(x509.Encoding.PEM).decode('ascii')
                    f.write(pem)
                except Exception:
                    logger.error("Could not write to %s", cert_path)
                    raise

        return True
示例#35
0
    def setup_admin(self):
        self.admin_user = "******" % self.fqdn
        self.admin_password = ipautil.ipa_generate_password()
        self.admin_dn = DN(('uid', self.admin_user), self.ipaca_people)
        # remove user if left-over exists
        try:
            api.Backend.ldap2.delete_entry(self.admin_dn)
        except errors.NotFound:
            pass

        # add user
        entry = api.Backend.ldap2.make_entry(
            self.admin_dn,
            objectclass=[
                "top", "person", "organizationalPerson", "inetOrgPerson",
                "cmsuser"
            ],
            uid=[self.admin_user],
            cn=[self.admin_user],
            sn=[self.admin_user],
            usertype=['adminType'],
            mail=['root@localhost'],
            userPassword=[self.admin_password],
            userstate=['1'])
        api.Backend.ldap2.add_entry(entry)

        wait_groups = []
        for group in self.admin_groups:
            group_dn = DN(('cn', group), self.ipaca_groups)
            mod = [(ldap.MOD_ADD, 'uniqueMember', [self.admin_dn])]
            try:
                api.Backend.ldap2.modify_s(group_dn, mod)
            except ldap.TYPE_OR_VALUE_EXISTS:
                # already there
                return None
            else:
                wait_groups.append(group_dn)

        # Now wait until the other server gets replicated this data
        master_conn = ipaldap.LDAPClient.from_hostname_secure(self.master_host)
        logger.debug("Waiting %s seconds for %s to appear on %s",
                     api.env.replication_wait_timeout, self.admin_dn,
                     master_conn)
        deadline = time.time() + api.env.replication_wait_timeout
        while time.time() < deadline:
            time.sleep(1)
            try:
                master_conn.simple_bind(self.admin_dn, self.admin_password)
            except errors.ACIError:
                # user not replicated yet
                pass
            else:
                logger.debug("Successfully logged in as %s", self.admin_dn)
                break
        else:
            logger.error("Unable to log in as %s on %s", self.admin_dn,
                         master_conn)
            logger.info("[hint] tune with replication_wait_timeout")
            raise errors.NotFound(reason="{} did not replicate to {}".format(
                self.admin_dn, master_conn))

        # wait for group membership
        for group_dn in wait_groups:
            replication.wait_for_entry(
                master_conn,
                group_dn,
                timeout=api.env.replication_wait_timeout,
                attr='uniqueMember',
                attrvalue=self.admin_dn)
示例#36
0
class DogtagInstance(service.Service):
    """
    This is the base class for a Dogtag 10+ instance, which uses a
    shared tomcat instance and DS to host the relevant subsystems.

    It contains functions that will be common to installations of the
    CA, KRA, and eventually TKS and TPS.
    """

    # Mapping of nicknames for tracking requests, and the profile to
    # use for that certificate.  'configure_renewal()' reads this
    # dict.  The profile MUST be specified.
    tracking_reqs = dict()

    # HSM state is shared between CA and KRA
    hsm_sstore = 'pki_hsm'

    # override token for specific nicknames
    token_names = dict()

    def get_token_name(self, nickname):
        """Look up token name for nickname."""
        return self.token_names.get(nickname, self.token_name)

    ipaca_groups = DN(('ou', 'groups'), ('o', 'ipaca'))
    ipaca_people = DN(('ou', 'people'), ('o', 'ipaca'))
    groups_aci = (
        b'(targetfilter="(objectClass=groupOfUniqueNames)")'
        b'(targetattr="cn || description || objectclass || uniquemember")'
        b'(version 3.0; acl "Allow users from o=ipaca to read groups"; '
        b'allow (read, search, compare) '
        b'userdn="ldap:///uid=*,ou=people,o=ipaca";)')

    def __init__(self,
                 realm,
                 subsystem,
                 service_desc,
                 host_name=None,
                 nss_db=paths.PKI_TOMCAT_ALIAS_DIR,
                 service_prefix=None,
                 config=None):
        """Initializer"""

        super(DogtagInstance, self).__init__('pki-tomcatd',
                                             service_desc=service_desc,
                                             realm_name=realm,
                                             service_user=constants.PKI_USER,
                                             service_prefix=service_prefix)

        self.admin_password = None
        self.fqdn = host_name
        self.pkcs12_info = None
        self.clone = False

        self.basedn = None
        self.admin_user = "******"
        self.admin_dn = DN(('uid', self.admin_user), self.ipaca_people)
        self.admin_groups = None
        self.tmp_agent_db = None
        self.subsystem = subsystem
        # replication parameters
        self.master_host = None
        self.master_replication_port = 389
        self.nss_db = nss_db
        self.config = config  # Path to CS.cfg

        # filled out by configure_instance
        self.pki_config_override = None
        self.ca_subject = None
        self.subject_base = None
        self.ajp_secret = None

    def is_installed(self):
        """
        Determine if subsystem instance has been installed.

        Returns True/False
        """
        return os.path.exists(
            os.path.join(paths.VAR_LIB_PKI_TOMCAT_DIR, self.subsystem.lower()))

    def spawn_instance(self, cfg_file, nolog_list=()):
        """
        Create and configure a new Dogtag instance using pkispawn.
        Passes in a configuration file with IPA-specific
        parameters.
        """
        subsystem = self.subsystem
        args = [paths.PKISPAWN, "-s", subsystem, "-f", cfg_file]

        with open(cfg_file) as f:
            logger.debug('Contents of pkispawn configuration file (%s):\n%s',
                         cfg_file, ipautil.nolog_replace(f.read(), nolog_list))

        try:
            ipautil.run(args, nolog=nolog_list)
        except ipautil.CalledProcessError as e:
            self.handle_setup_error(e)

    def clean_pkispawn_files(self):
        if self.tmp_agent_db is not None:
            logger.debug("Removing %s", self.tmp_agent_db)
            shutil.rmtree(self.tmp_agent_db, ignore_errors=True)

        client_dir = os.path.join('/root/.dogtag/pki-tomcat/',
                                  self.subsystem.lower())
        logger.debug("Removing %s", client_dir)
        shutil.rmtree(client_dir, ignore_errors=True)

    def restart_instance(self):
        self.restart('pki-tomcat')

    def start_instance(self):
        self.start('pki-tomcat')

    def stop_instance(self):
        try:
            self.stop('pki-tomcat')
        except Exception:
            logger.debug("%s", traceback.format_exc())
            logger.critical("Failed to stop the Dogtag instance."
                            "See the installation log for details.")

    def enable_client_auth_to_db(self):
        """
        Enable client auth connection to the internal db.
        """
        sub_system_nickname = "subsystemCert cert-pki-ca"
        if self.token_name != INTERNAL_TOKEN:
            # TODO: Dogtag 10.6.9 does not like "internal" prefix.
            sub_system_nickname = '{}:{}'.format(self.token_name,
                                                 sub_system_nickname)

        with stopped_service('pki-tomcatd', 'pki-tomcat'):
            directivesetter.set_directive(
                self.config,
                'authz.instance.DirAclAuthz.ldap.ldapauth.authtype',
                'SslClientAuth',
                quotes=False,
                separator='=')
            directivesetter.set_directive(
                self.config,
                'authz.instance.DirAclAuthz.ldap.ldapauth.clientCertNickname',
                sub_system_nickname,
                quotes=False,
                separator='=')
            directivesetter.set_directive(
                self.config,
                'authz.instance.DirAclAuthz.ldap.ldapconn.port',
                '636',
                quotes=False,
                separator='=')
            directivesetter.set_directive(
                self.config,
                'authz.instance.DirAclAuthz.ldap.ldapconn.secureConn',
                'true',
                quotes=False,
                separator='=')

            directivesetter.set_directive(self.config,
                                          'internaldb.ldapauth.authtype',
                                          'SslClientAuth',
                                          quotes=False,
                                          separator='=')

            directivesetter.set_directive(
                self.config,
                'internaldb.ldapauth.clientCertNickname',
                sub_system_nickname,
                quotes=False,
                separator='=')
            directivesetter.set_directive(self.config,
                                          'internaldb.ldapconn.port',
                                          '636',
                                          quotes=False,
                                          separator='=')
            directivesetter.set_directive(self.config,
                                          'internaldb.ldapconn.secureConn',
                                          'true',
                                          quotes=False,
                                          separator='=')
            # Remove internaldb password as is not needed anymore
            directivesetter.set_directive(paths.PKI_TOMCAT_PASSWORD_CONF,
                                          'internaldb',
                                          None,
                                          separator='=')

    def uninstall(self):
        if self.is_installed():
            self.print_msg("Unconfiguring %s" % self.subsystem)

        try:
            ipautil.run(
                [paths.PKIDESTROY, "-i", 'pki-tomcat', "-s", self.subsystem])
        except ipautil.CalledProcessError as e:
            logger.critical("failed to uninstall %s instance %s",
                            self.subsystem, e)

    def __is_newer_tomcat_version(self, default=None):
        try:
            result = ipautil.run([paths.BIN_TOMCAT, "version"],
                                 capture_output=True)
            sn = re.search(
                r'Server number:\s+([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)',
                result.output)
            if sn is None:
                logger.info(
                    "tomcat version cannot be parsed, "
                    "default to pre-%s", default)
                return False
            v = tasks.parse_ipa_version(sn.group(1))
            if v >= tasks.parse_ipa_version(default):
                return True
        except ipautil.CalledProcessError as e:
            logger.info(
                "failed to discover tomcat version, "
                "default to pre-%s, error: %s", default, str(e))
        return False

    def secure_ajp_connector(self):
        """ Update AJP connector to use a password protection  """

        server_xml = lxml.etree.parse(paths.PKI_TOMCAT_SERVER_XML)
        doc = server_xml.getroot()

        # no AJP connector means no need to update anything
        connectors = doc.xpath('//Connector[@port="8009"]')
        if len(connectors) == 0:
            return

        # AJP connector is set on port 8009. Use non-greedy search to find it
        connector = connectors[0]

        # Detect tomcat version and choose the right option name
        # pre-9.0.31.0 uses 'requiredSecret'
        # 9.0.31.0 or later uses 'secret'
        secretattr = 'requiredSecret'
        oldattr = 'requiredSecret'
        if self.__is_newer_tomcat_version('9.0.31.0'):
            secretattr = 'secret'

        rewrite = True
        if secretattr in connector.attrib:
            # secret is already in place
            # Perhaps, we need to synchronize it with Apache configuration
            self.ajp_secret = connector.attrib[secretattr]
            rewrite = False
        else:
            if oldattr in connector.attrib:
                self.ajp_secret = connector.attrib[oldattr]
                connector.attrib[secretattr] = self.ajp_secret
                del connector.attrib[oldattr]
            else:
                # Generate password, don't use special chars to not break XML
                self.ajp_secret = ipautil.ipa_generate_password(special=None)
                connector.attrib[secretattr] = self.ajp_secret

        if rewrite:
            pent = pwd.getpwnam(constants.PKI_USER)
            with open(paths.PKI_TOMCAT_SERVER_XML, "wb") as fd:
                server_xml.write(fd, pretty_print=True, encoding="utf-8")
                os.fchmod(fd.fileno(), 0o660)
                os.fchown(fd.fileno(), pent.pw_uid, pent.pw_gid)

    def http_proxy(self):
        """ Update the http proxy file  """
        template_filename = (os.path.join(paths.USR_SHARE_IPA_DIR,
                                          "ipa-pki-proxy.conf.template"))
        sub_dict = dict(
            DOGTAG_PORT=8009,
            CLONE='' if self.clone else '#',
            FQDN=self.fqdn,
            DOGTAG_AJP_SECRET='',
        )
        if self.ajp_secret:
            sub_dict['DOGTAG_AJP_SECRET'] = "secret={}".format(self.ajp_secret)
        template = ipautil.template_file(template_filename, sub_dict)
        with open(paths.HTTPD_IPA_PKI_PROXY_CONF, "w") as fd:
            fd.write(template)
            os.fchmod(fd.fileno(), 0o640)

    def configure_certmonger_renewal_helpers(self):
        """
        Create a new CA type for certmonger that will retrieve updated
        certificates from the dogtag master server.
        """
        cmonger = services.knownservices.certmonger
        cmonger.enable()
        if not services.knownservices.dbus.is_running():
            # some platforms protect dbus with RefuseManualStart=True
            services.knownservices.dbus.start()
        cmonger.start()

        bus = dbus.SystemBus()
        obj = bus.get_object('org.fedorahosted.certmonger',
                             '/org/fedorahosted/certmonger')
        iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
        for suffix, args in [
            ('', ''),
            ('-reuse', ' --reuse-existing'),
            ('-selfsigned', ' --force-self-signed'),
        ]:
            name = RENEWAL_CA_NAME + suffix
            path = iface.find_ca_by_nickname(name)
            if not path:
                command = paths.DOGTAG_IPA_CA_RENEW_AGENT_SUBMIT + args
                iface.add_known_ca(
                    name,
                    command,
                    dbus.Array([], dbus.Signature('s')),
                    # Give dogtag extra time to generate cert
                    timeout=CA_DBUS_TIMEOUT)

    def __get_pin(self, token_name=INTERNAL_TOKEN):
        try:
            return certmonger.get_pin(token_name)
        except IOError as e:
            logger.debug('Unable to determine PIN for the Dogtag instance: %s',
                         e)
            raise RuntimeError(e)

    def configure_renewal(self):
        """ Configure certmonger to renew system certs """

        for nickname in self.tracking_reqs:
            token_name = self.get_token_name(nickname)
            pin = self.__get_pin(token_name)
            try:
                certmonger.start_tracking(
                    certpath=self.nss_db,
                    ca=RENEWAL_CA_NAME,
                    nickname=nickname,
                    token_name=token_name,
                    pin=pin,
                    pre_command='stop_pkicad',
                    post_command='renew_ca_cert "%s"' % nickname,
                    profile=self.tracking_reqs[nickname],
                )
            except RuntimeError as e:
                logger.error(
                    "certmonger failed to start tracking certificate: %s", e)

    def stop_tracking_certificates(self, stop_certmonger=True):
        """
        Stop tracking our certificates. Called on uninstall.  Also called
        during upgrade to fix discrepancies.

        """
        logger.debug(
            "Configuring certmonger to stop tracking system certificates "
            "for %s", self.subsystem)

        cmonger = services.knownservices.certmonger
        if not services.knownservices.dbus.is_running():
            # some platforms protect dbus with RefuseManualStart=True
            services.knownservices.dbus.start()
        cmonger.start()

        for nickname in self.tracking_reqs:
            try:
                certmonger.stop_tracking(self.nss_db, nickname=nickname)
            except RuntimeError as e:
                logger.error(
                    "certmonger failed to stop tracking certificate: %s", e)

        if stop_certmonger:
            cmonger.stop()

    def update_cert_cs_cfg(self, directive, cert):
        """
        When renewing a Dogtag subsystem certificate the configuration file
        needs to get the new certificate as well.

        ``directive`` is the directive to update in CS.cfg
        cert is IPACertificate.
        cs_cfg is the path to the CS.cfg file
        """

        with stopped_service('pki-tomcatd', 'pki-tomcat'):
            directivesetter.set_directive(
                self.config,
                directive,
                # the cert must be only the base64 string without headers
                (base64.b64encode(cert.public_bytes(
                    x509.Encoding.DER)).decode('ascii')),
                quotes=False,
                separator='=')

    def get_admin_cert(self):
        """
        Get the certificate for the admin user by checking the ldap entry
        for the user.  There should be only one certificate per user.
        """
        logger.debug('Trying to find the certificate for the admin user')
        conn = None

        try:
            conn = ipaldap.LDAPClient.from_realm(self.realm)
            conn.external_bind()

            entry_attrs = conn.get_entry(self.admin_dn, ['usercertificate'])
            admin_cert = entry_attrs.get('usercertificate')[0]

            # TODO(edewata) Add check to warn if there is more than one cert.
        finally:
            if conn is not None:
                conn.unbind()

        return admin_cert

    def handle_setup_error(self, e):
        logger.critical("Failed to configure %s instance: %s", self.subsystem,
                        e)
        logger.critical("See the installation logs and the following "
                        "files/directories for more information:")
        logger.critical("  %s", paths.TOMCAT_TOPLEVEL_DIR)

        raise RuntimeError("%s configuration failed." % self.subsystem)

    def add_ipaca_aci(self):
        """Add ACI to allow ipaca users to read their own group information

        Dogtag users aren't allowed to enumerate their own groups. The
        setup_admin() method needs the permission to wait, until all group
        information has been replicated.
        """
        dn = self.ipaca_groups
        mod = [(ldap.MOD_ADD, 'aci', [self.groups_aci])]
        try:
            api.Backend.ldap2.modify_s(dn, mod)
        except ldap.TYPE_OR_VALUE_EXISTS:
            logger.debug("%s already has ACI to read group information", dn)
        else:
            logger.debug("Added ACI to read groups to %s", dn)

    def setup_admin(self):
        self.admin_user = "******" % self.fqdn
        self.admin_password = ipautil.ipa_generate_password()
        self.admin_dn = DN(('uid', self.admin_user), self.ipaca_people)
        # remove user if left-over exists
        try:
            api.Backend.ldap2.delete_entry(self.admin_dn)
        except errors.NotFound:
            pass

        # add user
        entry = api.Backend.ldap2.make_entry(
            self.admin_dn,
            objectclass=[
                "top", "person", "organizationalPerson", "inetOrgPerson",
                "cmsuser"
            ],
            uid=[self.admin_user],
            cn=[self.admin_user],
            sn=[self.admin_user],
            usertype=['adminType'],
            mail=['root@localhost'],
            userPassword=[self.admin_password],
            userstate=['1'])
        api.Backend.ldap2.add_entry(entry)

        wait_groups = []
        for group in self.admin_groups:
            group_dn = DN(('cn', group), self.ipaca_groups)
            mod = [(ldap.MOD_ADD, 'uniqueMember', [self.admin_dn])]
            try:
                api.Backend.ldap2.modify_s(group_dn, mod)
            except ldap.TYPE_OR_VALUE_EXISTS:
                # already there
                return None
            else:
                wait_groups.append(group_dn)

        # Now wait until the other server gets replicated this data
        master_conn = ipaldap.LDAPClient.from_hostname_secure(self.master_host)
        logger.debug("Waiting %s seconds for %s to appear on %s",
                     api.env.replication_wait_timeout, self.admin_dn,
                     master_conn)
        deadline = time.time() + api.env.replication_wait_timeout
        while time.time() < deadline:
            time.sleep(1)
            try:
                master_conn.simple_bind(self.admin_dn, self.admin_password)
            except errors.ACIError:
                # user not replicated yet
                pass
            else:
                logger.debug("Successfully logged in as %s", self.admin_dn)
                break
        else:
            logger.error("Unable to log in as %s on %s", self.admin_dn,
                         master_conn)
            logger.info("[hint] tune with replication_wait_timeout")
            raise errors.NotFound(reason="{} did not replicate to {}".format(
                self.admin_dn, master_conn))

        # wait for group membership
        for group_dn in wait_groups:
            replication.wait_for_entry(
                master_conn,
                group_dn,
                timeout=api.env.replication_wait_timeout,
                attr='uniqueMember',
                attrvalue=self.admin_dn)

    def __remove_admin_from_group(self, group):
        dn = DN(('cn', group), self.ipaca_groups)
        mod = [(ldap.MOD_DELETE, 'uniqueMember', self.admin_dn)]
        try:
            api.Backend.ldap2.modify_s(dn, mod)
        except ldap.NO_SUCH_ATTRIBUTE:
            # already removed
            pass

    def teardown_admin(self):
        for group in self.admin_groups:
            self.__remove_admin_from_group(group)
        api.Backend.ldap2.delete_entry(self.admin_dn)

    def backup_config(self):
        """
        Create a backup copy of CS.cfg
        """
        config = self.config
        bak = config + '.ipabkp'
        if services.knownservices['pki_tomcatd'].is_running('pki-tomcat'):
            raise RuntimeError(
                "Dogtag must be stopped when creating backup of %s" % config)
        shutil.copy(config, bak)
        # shutil.copy() doesn't copy owner
        s = os.stat(config)
        os.chown(bak, s.st_uid, s.st_gid)

    def reindex_task(self, force=False):
        """Reindex ipaca entries

        pkispawn sometimes does not run its indextasks. This leads to slow
        unindexed filters on attributes such as description, which is used
        to log in with a certificate. Explicitly reindex attribute that
        should have been reindexed by CA's indextasks.ldif.

        See https://pagure.io/dogtagpki/issue/3083
        """
        state_name = 'reindex_task'
        if not force and sysupgrade.get_upgrade_state('dogtag', state_name):
            return

        cn = "indextask_ipaca_{}".format(int(time.time()))
        dn = DN(('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config'))
        entry = api.Backend.ldap2.make_entry(
            dn,
            objectClass=['top', 'extensibleObject'],
            cn=[cn],
            nsInstance=['ipaca'],  # Dogtag PKI database
            nsIndexAttribute=[
                # from pki/base/ca/shared/conf/indextasks.ldif
                'archivedBy',
                'certstatus',
                'clientId',
                'dataType',
                'dateOfCreate',
                'description',
                'duration',
                'extension',
                'issuedby',
                'issuername',
                'metaInfo',
                'notafter',
                'notbefore',
                'ownername',
                'publicKeyData',
                'requestid',
                'requestowner',
                'requestsourceid',
                'requeststate',
                'requesttype',
                'revInfo',
                'revokedOn',
                'revokedby',
                'serialno',
                'status',
                'subjectname',
            ],
            ttl=[10],
        )
        logger.debug('Creating ipaca reindex task %s', dn)
        api.Backend.ldap2.add_entry(entry)
        logger.debug('Waiting for task...')
        exitcode = replication.wait_for_task(api.Backend.ldap2, dn)
        logger.debug('Task %s has finished with exit code %i', dn, exitcode)
        sysupgrade.set_upgrade_state('dogtag', state_name, True)

    def set_hsm_state(self, config):
        section_name = self.subsystem.upper()
        assert section_name == 'CA'
        if config.getboolean(section_name, 'pki_hsm_enable', fallback=False):
            enable = True
            token_name = config.get(section_name, 'pki_token_name')
        else:
            enable = False
            token_name = INTERNAL_TOKEN
        self.sstore.backup_state(self.hsm_sstore, "enabled", enable)
        self.sstore.backup_state(self.hsm_sstore, "token_name", token_name)

    def restore_hsm_state(self):
        return (
            self.sstore.restore_state(self.hsm_sstore, "enabled"),
            self.sstore.restore_state(self.hsm_sstore, "token_name"),
        )

    @property
    def hsm_enabled(self):
        """Is HSM support enabled?"""
        return self.sstore.get_state(self.hsm_sstore, "enabled")

    @property
    def token_name(self):
        """HSM token name"""
        return self.sstore.get_state(self.hsm_sstore, "token_name")

    def _configure_clone(self, subsystem_config, security_domain_hostname,
                         clone_pkcs12_path):
        subsystem_config.update(
            # Security domain registration
            pki_security_domain_hostname=security_domain_hostname,
            pki_security_domain_https_port=443,
            pki_security_domain_user=self.admin_user,
            pki_security_domain_password=self.admin_password,
            # Clone
            pki_clone=True,
            pki_clone_pkcs12_path=clone_pkcs12_path,
            pki_clone_pkcs12_password=self.dm_password,
            pki_clone_replication_security="TLS",
            pki_clone_replication_master_port=self.master_replication_port,
            pki_clone_replication_clone_port=389,
            pki_clone_replicate_schema=False,
            pki_clone_uri="https://%s" %
            ipautil.format_netloc(self.master_host, 443),
        )

    def _create_spawn_config(self, subsystem_config):
        loader = PKIIniLoader(subsystem=self.subsystem,
                              fqdn=self.fqdn,
                              domain=api.env.domain,
                              subject_base=self.subject_base,
                              ca_subject=self.ca_subject,
                              admin_user=self.admin_user,
                              admin_password=self.admin_password,
                              dm_password=self.dm_password,
                              pki_config_override=self.pki_config_override)
        return loader.create_spawn_config(subsystem_config)
示例#37
0
文件: aci.py 项目: msrb/freeipa
def _aci_to_kw(ldap, a, test=False, pkey_only=False):
    """Convert an ACI into its equivalent keywords.

       This is used for the modify operation so we can merge the
       incoming kw and existing ACI and pass the result to
       _make_aci().
    """
    kw = {}
    kw["aciprefix"], kw["aciname"] = _parse_aci_name(a.name)
    if pkey_only:
        return kw
    kw["permissions"] = tuple(a.permissions)
    if "targetattr" in a.target:
        kw["attrs"] = tuple(unicode(e) for e in a.target["targetattr"]["expression"])
    if "targetfilter" in a.target:
        target = a.target["targetfilter"]["expression"]
        if target.startswith("(memberOf=") or target.startswith("memberOf="):
            (junk, memberof) = target.split("memberOf=", 1)
            memberof = DN(memberof)
            kw["memberof"] = memberof["cn"]
        else:
            kw["filter"] = unicode(target)
    if "target" in a.target:
        target = a.target["target"]["expression"]
        found = False
        for k in _type_map.keys():
            if _type_map[k] == target:
                kw["type"] = unicode(k)
                found = True
                break
        if not found:
            if target.startswith("("):
                kw["filter"] = unicode(target)
            else:
                # See if the target is a group. If so we set the
                # targetgroup attr, otherwise we consider it a subtree
                try:
                    targetdn = DN(target.replace("ldap:///", ""))
                except ValueError as e:
                    raise errors.ValidationError(name="subtree", error=_("invalid DN (%s)") % e.message)
                if targetdn.endswith(DN(api.env.container_group, api.env.basedn)):
                    kw["targetgroup"] = targetdn[0]["cn"]
                else:
                    kw["subtree"] = unicode(target)

    groupdn = a.bindrule["expression"]
    groupdn = groupdn.replace("ldap:///", "")
    if groupdn == "self":
        kw["selfaci"] = True
    elif groupdn == "anyone":
        pass
    else:
        groupdn = DN(groupdn)
        if len(groupdn) and groupdn[0].attr == "cn":
            dn = DN()
            entry = ldap.make_entry(dn)
            try:
                entry = ldap.get_entry(groupdn, ["cn"])
            except errors.NotFound as e:
                # FIXME, use real name here
                if test:
                    dn = DN(("cn", "test"), api.env.container_permission, api.env.basedn)
                    entry = ldap.make_entry(dn, {"cn": [u"test"]})
            if api.env.container_permission in entry.dn:
                kw["permission"] = entry["cn"][0]
            else:
                if "cn" in entry:
                    kw["group"] = entry["cn"][0]

    return kw
示例#38
0
class LDAPUpdate:
    action_keywords = {
        "default", "add", "remove", "only", "onlyifexist", "deleteentry",
        "replace", "addifnew", "addifexist"
    }
    index_suffix = DN(('cn', 'index'), ('cn', 'userRoot'),
                      ('cn', 'ldbm database'), ('cn', 'plugins'),
                      ('cn', 'config'))

    def __init__(self,
                 dm_password=_sentinel,
                 sub_dict=None,
                 online=_sentinel,
                 ldapi=_sentinel,
                 api=api):
        '''
        :parameters:
            dm_password
                deprecated and no longer used
            sub_dict
                substitution dictionary
            online
                deprecated and no longer used
            ldapi
                deprecated and no longer used
            api
                bootstrapped API object (for configuration)

        Data Structure Example:
        -----------------------

        dn_by_rdn_count = {
            3: 'cn=config,dc=example,dc=com':
            4: 'cn=bob,ou=people,dc=example,dc=com',
        }

        all_updates = [
            {
                'dn': 'cn=config,dc=example,dc=com',
                'default': [
                    dict(attr='attr1', value='default1'),
                ],
                'updates': [
                    dict(action='action', attr='attr1', value='value1'),
                    dict(action='replace', attr='attr2', value=['old', 'new']),
                ]
            },
            {
                'dn': 'cn=bob,ou=people,dc=example,dc=com',
                'default': [
                    dict(attr='attr3', value='default3'),
                ],
                'updates': [
                    dict(action='action', attr='attr3', value='value3'),
                    dict(action='action', attr='attr4', value='value4'),
                }
            }
        ]

        Please notice the replace action requires two values in list

        The default and update lists are "dispositions"

        Plugins:

        Plugins has to be specified in update file to be executed, using
        'plugin' directive

        Example:
        plugin: update_uniqueness_plugins_to_new_syntax

        Each plugin returns two values:

        1. restart: dirsrv will be restarted AFTER this update is
                     applied.
        2. updates: A list of updates to be applied.

        The value of an update is a dictionary with the following possible
        values:
          - dn: DN, equal to the dn attribute
          - updates: list of updates against the dn
          - default: list of the default entry to be added if it doesn't
                     exist
          - deleteentry: list of dn's to be deleted (typically single dn)

        For example, this update file:

          dn: cn=global_policy,cn=$REALM,cn=kerberos,$SUFFIX
          replace:krbPwdLockoutDuration:10::600
          replace: krbPwdMaxFailure:3::6

        Generates this list which contain the update dictionary:

        [
          {
            'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
            'updates': [
              dict(action='replace', attr='krbPwdLockoutDuration',
                   value=['10','600']),
              dict(action='replace', attr='krbPwdMaxFailure',
                   value=['3','6']),
            ]
          }
        ]

        Here is another example showing how a default entry is configured:

          dn: cn=Managed Entries,cn=etc,$SUFFIX
          default: objectClass: nsContainer
          default: objectClass: top
          default: cn: Managed Entries

        This generates:

        [
          {
            'dn': 'cn=Managed Entries,cn=etc,dc=example,dc=com',
            'default': [
              dict(attr='objectClass', value='nsContainer'),
              dict(attr='objectClass', value='top'),
              dict(attr='cn', value='Managed Entries'),
            ]
          }
        ]

        Note that the variable substitution in both examples has been completed.

        Either may make changes directly in LDAP or can return updates in
        update format.

        '''
        if any(arg is not _sentinel for arg in (dm_password, online, ldapi)):
            warnings.warn(
                "dm_password, online, and ldapi arguments are deprecated",
                DeprecationWarning,
                stacklevel=2)
        self.sub_dict = sub_dict if sub_dict is not None else {}
        self.conn = None
        self.modified = False
        self.ldapuri = ipaldap.realm_to_ldapi_uri(api.env.realm)

        default_sub = dict(
            REALM=api.env.realm,
            DOMAIN=api.env.domain,
            SUFFIX=api.env.basedn,
            ESCAPED_SUFFIX=str(api.env.basedn),
            FQDN=api.env.host,
            LIBARCH=paths.LIBARCH,
            TIME=int(time.time()),
            MIN_DOMAIN_LEVEL=str(constants.MIN_DOMAIN_LEVEL),
            MAX_DOMAIN_LEVEL=str(constants.MAX_DOMAIN_LEVEL),
            STRIP_ATTRS=" ".join(constants.REPL_AGMT_STRIP_ATTRS),
            EXCLUDES="(objectclass=*) $ EXCLUDE %s" %
            (" ".join(constants.REPL_AGMT_EXCLUDES)),
            TOTAL_EXCLUDES="(objectclass=*) $ EXCLUDE %s" %
            (" ".join(constants.REPL_AGMT_TOTAL_EXCLUDES)),
            SELINUX_USERMAP_DEFAULT=platformconstants.SELINUX_USERMAP_DEFAULT,
            SELINUX_USERMAP_ORDER=platformconstants.SELINUX_USERMAP_ORDER,
            FIPS="#" if tasks.is_fips_enabled() else "",
        )
        for k, v in default_sub.items():
            self.sub_dict.setdefault(k, v)

        self.api = create_api(mode=None)
        self.api.bootstrap(in_server=True,
                           context='updates',
                           confdir=paths.ETC_IPA,
                           ldap_uri=self.ldapuri)
        self.api.finalize()

    def _template_str(self, s):
        try:
            return ipautil.template_str(s, self.sub_dict)
        except KeyError as e:
            raise BadSyntax("Unknown template keyword %s" % e)

    def read_file(self, filename):
        if filename == '-':
            fd = sys.stdin
        else:
            fd = open(filename)
        text = fd.readlines()
        if fd != sys.stdin: fd.close()
        return text

    def parse_update_file(self, data_source_name, source_data, all_updates):
        """Parse the update file into a dictonary of lists and apply the update
           for each DN in the file."""
        update = {}
        logical_line = ""
        dn = None
        lcount = 0

        def emit_item(logical_line):
            '''
            Given a logical line containing an item to process perform the following:

            * Strip leading & trailing whitespace
            * Substitute any variables
            * Strip again and skip empty/commented lines after substitution
            * Get the action, attribute, and value
            * Each update has one list per disposition, append to specified disposition list
            '''

            logical_line = logical_line.strip()
            if logical_line == '':
                return

            # Perform variable substitution on constructued line
            logical_line = self._template_str(logical_line)

            # skip line if substitution has added a comment. FIPS mode
            # disables some lines that way.
            logical_line = logical_line.strip()
            if not logical_line or logical_line.startswith('#'):
                return

            items = logical_line.split(':', 2)

            if len(items) == 0:
                raise BadSyntax("Bad formatting on line %s:%d: %s" %
                                (data_source_name, lcount, logical_line))

            action = items[0].strip().lower()

            if action not in self.action_keywords:
                raise BadSyntax("Unknown update action '%s', data source=%s" %
                                (action, data_source_name))

            if action == 'deleteentry':
                new_value = None
                disposition = "deleteentry"
            else:
                if len(items) != 3:
                    raise BadSyntax("Bad formatting on line %s:%d: %s" %
                                    (data_source_name, lcount, logical_line))

                attr = items[1].strip()
                # do not strip here, we need detect '::' due to base64 encoded
                # values, strip may result into fake detection
                value = items[2]

                # detect base64 encoding
                # value which start with ':' are base64 encoded
                # decode it as a binary value
                if value.startswith(':'):
                    value = value[1:]
                    binary = True
                else:
                    binary = False
                value = value.strip()

                if action == 'replace':
                    try:
                        value = value.split('::', 1)
                    except ValueError:
                        raise BadSyntax(
                            "Bad syntax in replace on line %s:%d: %s, needs to "
                            "be in the format old::new in %s" %
                            (data_source_name, lcount, logical_line, value))
                else:
                    value = [value]

                if binary:
                    for i, v in enumerate(value):
                        try:
                            value[i] = base64.b64decode(v)
                        except (TypeError, ValueError) as e:
                            raise BadSyntax(
                                "Base64 encoded value %s on line %s:%d: %s is "
                                "incorrect (%s)" %
                                (v, data_source_name, lcount, logical_line, e))
                else:
                    for i, v in enumerate(value):
                        if isinstance(v, unicode):
                            value[i] = v.encode('utf-8')

                if action != 'replace':
                    value = value[0]

                if action == "default":
                    new_value = {'attr': attr, 'value': value}
                    disposition = "default"
                else:
                    new_value = {
                        'action': action,
                        "attr": attr,
                        'value': value
                    }
                    disposition = "updates"

            disposition_list = update.setdefault(disposition, [])
            disposition_list.append(new_value)

        def emit_update(update):
            '''
            When processing a dn is completed emit the update by appending it
            into list of all updates
            '''
            dn = update.get('dn')
            assert isinstance(dn, DN)
            all_updates.append(update)

        def emit_plugin_update(update):
            '''
            When processing a plugin is complete emit the plugin update by
            appending it into list of all updates
            '''
            all_updates.append(update)

        # Iterate over source input lines
        for source_line in source_data:
            lcount += 1

            # strip trailing whitespace and newline
            source_line = source_line.rstrip()

            # skip comments and empty lines
            if source_line.startswith('#') or source_line == '':
                continue

            state = None
            emit_previous_dn = False

            # parse special keywords
            if source_line.lower().startswith('dn:'):
                state = 'dn'
                emit_previous_dn = True
            elif source_line.lower().startswith('plugin:'):
                state = 'plugin'
                emit_previous_dn = True

            if emit_previous_dn and dn is not None:
                # Emit previous dn
                emit_item(logical_line)
                logical_line = ''
                emit_update(update)
                update = {}
                dn = None

            if state == 'dn':
                # Starting new dn
                dn = source_line[3:].strip()
                dn = DN(self._template_str(dn))
                update['dn'] = dn
            elif state == 'plugin':
                # plugin specification is online only
                plugin_name = source_line[7:].strip()
                if not plugin_name:
                    raise BadSyntax("plugin name is not defined")
                update['plugin'] = plugin_name
                emit_plugin_update(update)
                update = {}
            else:
                # Process items belonging to dn
                if dn is None:
                    raise BadSyntax(
                        "dn is not defined in the update, data source=%s" %
                        (data_source_name))

                # If continuation line, append to existing logical line & continue,
                # otherwise flush the previous item.
                if source_line.startswith(' '):
                    logical_line += source_line[1:]
                    continue
                emit_item(logical_line)
                logical_line = source_line

        if dn is not None:
            emit_item(logical_line)
            logical_line = ''
            emit_update(update)
            update = {}

        return all_updates

    def create_index_task(self, *attributes):
        """Create a task to update an index for attributes"""

        # Sleep a bit to ensure previous operations are complete
        time.sleep(5)

        cn_uuid = uuid.uuid1()
        # cn_uuid.time is in nanoseconds, but other users of LDAPUpdate expect
        # seconds in 'TIME' so scale the value down
        self.sub_dict['TIME'] = int(cn_uuid.time / 1e9)
        cn = "indextask_%s_%s" % (cn_uuid.time, cn_uuid.clock_seq)
        dn = DN(('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config'))

        e = self.conn.make_entry(
            dn,
            objectClass=['top', 'extensibleObject'],
            cn=[cn],
            nsInstance=['userRoot'],
            nsIndexAttribute=list(attributes),
        )

        logger.debug("Creating task %s to index attributes: %s", dn,
                     ', '.join(attributes))

        self.conn.add_entry(e)

        return dn

    def monitor_index_task(self, dn):
        """Give a task DN monitor it and wait until it has completed (or failed)
        """

        assert isinstance(dn, DN)

        # Pause for a moment to give the task time to be created
        time.sleep(1)

        attrlist = ['nstaskstatus', 'nstaskexitcode']
        entry = None

        while True:
            try:
                entry = self.conn.get_entry(dn, attrlist)
            except errors.NotFound as e:
                logger.error("Task not found: %s", dn)
                return
            except errors.DatabaseError as e:
                logger.error("Task lookup failure %s", e)
                return

            status = entry.single_value.get('nstaskstatus')
            if status is None:
                # task doesn't have a status yet
                time.sleep(1)
                continue

            if "finished" in status.lower():
                logger.debug("Indexing finished")
                break

            logger.debug("Indexing in progress")
            time.sleep(1)

        return

    def _create_default_entry(self, dn, default):
        """Create the default entry from the values provided.

           The return type is ipaldap.LDAPEntry
        """
        assert isinstance(dn, DN)
        entry = self.conn.make_entry(dn)

        if not default:
            # This means that the entire entry needs to be created with add
            return entry

        for item in default:
            # We already do syntax-parsing so this is safe
            attr = item['attr']
            value = item['value']

            e = entry.raw.get(attr)
            if e:
                # multi-valued attribute
                e = list(e)
                e.append(value)
            else:
                e = [value]

            entry.raw[attr] = e
        entry.reset_modlist()

        return entry

    def _get_entry(self, dn):
        """Retrieve an object from LDAP.

           The return type is ipaldap.LDAPEntry
        """
        assert isinstance(dn, DN)
        searchfilter = "objectclass=*"
        sattrs = ["*", "aci", "attributeTypes", "objectClasses"]
        scope = self.conn.SCOPE_BASE

        return self.conn.get_entries(dn, scope, searchfilter, sattrs)

    def _apply_update_disposition(self, updates, entry):
        """
        updates is a list of changes to apply
        entry is the thing to apply them to

        Returns the modified entry
        """
        if not updates:
            return entry

        only = {}
        for update in updates:
            # We already do syntax-parsing so this is safe
            action = update['action']
            attr = update['attr']
            update_value = update['value']

            # do not mix comparison of bytes and unicode, everything in this
            # function should be compared as bytes
            if isinstance(update_value, (list, tuple)):
                update_value = [
                    v.encode('utf-8') if isinstance(v, unicode) else v
                    for v in update_value
                ]
            elif isinstance(update_value, unicode):
                update_value = update_value.encode('utf-8')

            entry_values = entry.raw.get(attr, [])
            if action == 'remove':
                logger.debug("remove: '%s' from %s, current value %s",
                             safe_output(attr, update_value), attr,
                             safe_output(attr, entry_values))
                try:
                    entry_values.remove(update_value)
                except ValueError:
                    logger.debug("remove: '%s' not in %s",
                                 safe_output(attr, update_value), attr)
                else:
                    entry.raw[attr] = entry_values
                    logger.debug('remove: updated value %s',
                                 safe_output(attr, entry_values))
            elif action == 'add':
                logger.debug("add: '%s' to %s, current value %s",
                             safe_output(attr, update_value), attr,
                             safe_output(attr, entry_values))
                # Remove it, ignoring errors so we can blindly add it later
                try:
                    entry_values.remove(update_value)
                except ValueError:
                    pass
                entry_values.append(update_value)
                logger.debug('add: updated value %s',
                             safe_output(attr, entry_values))
                entry.raw[attr] = entry_values
            elif action == 'addifnew':
                logger.debug("addifnew: '%s' to %s, current value %s",
                             safe_output(attr, update_value), attr,
                             safe_output(attr, entry_values))
                # Only add the attribute if it doesn't exist. Only works
                # with single-value attributes. Entry must exist.
                if entry.get('objectclass') and len(entry_values) == 0:
                    entry_values.append(update_value)
                    logger.debug('addifnew: set %s to %s', attr,
                                 safe_output(attr, entry_values))
                    entry.raw[attr] = entry_values
            elif action == 'addifexist':
                logger.debug("addifexist: '%s' to %s, current value %s",
                             safe_output(attr, update_value), attr,
                             safe_output(attr, entry_values))
                # Only add the attribute if the entry doesn't exist. We
                # determine this based on whether it has an objectclass
                if entry.get('objectclass'):
                    entry_values.append(update_value)
                    logger.debug('addifexist: set %s to %s', attr,
                                 safe_output(attr, entry_values))
                    entry.raw[attr] = entry_values
            elif action == 'only':
                logger.debug("only: set %s to '%s', current value %s", attr,
                             safe_output(attr, update_value),
                             safe_output(attr, entry_values))
                if only.get(attr):
                    entry_values.append(update_value)
                else:
                    entry_values = [update_value]
                    only[attr] = True
                entry.raw[attr] = entry_values
                logger.debug('only: updated value %s',
                             safe_output(attr, entry_values))
            elif action == 'onlyifexist':
                logger.debug("onlyifexist: '%s' to %s, current value %s",
                             safe_output(attr, update_value), attr,
                             safe_output(attr, entry_values))
                # Only set the attribute if the entry exist's. We
                # determine this based on whether it has an objectclass
                if entry.get('objectclass'):
                    if only.get(attr):
                        entry_values.append(update_value)
                    else:
                        entry_values = [update_value]
                        only[attr] = True
                    logger.debug('onlyifexist: set %s to %s', attr,
                                 safe_output(attr, entry_values))
                    entry.raw[attr] = entry_values
            elif action == 'deleteentry':
                # skip this update type, it occurs in  __delete_entries()
                return None
            elif action == 'replace':
                # replace values were store as list
                old, new = update_value

                try:
                    entry_values.remove(old)
                except ValueError:
                    logger.debug('replace: %s not found, skipping',
                                 safe_output(attr, old))
                else:
                    entry_values.append(new)
                    logger.debug('replace: updated value %s',
                                 safe_output(attr, entry_values))
                    entry.raw[attr] = entry_values

        return entry

    def print_entity(self, e, message=None):
        """The entity object currently lacks a str() method"""
        logger.debug("---------------------------------------------")
        if message:
            logger.debug("%s", message)
        logger.debug("dn: %s", e.dn)
        for a, value in e.raw.items():
            logger.debug('%s:', a)
            for l in value:
                logger.debug("\t%s", safe_output(a, l))

    def _update_record(self, update):
        found = False

        new_entry = self._create_default_entry(update.get('dn'),
                                               update.get('default'))

        try:
            e = self._get_entry(new_entry.dn)
            if len(e) > 1:
                # we should only ever get back one entry
                raise BadSyntax(
                    "More than 1 entry returned on a dn search!? %s" %
                    new_entry.dn)
            entry = e[0]
            found = True
            logger.debug("Updating existing entry: %s", entry.dn)
        except errors.NotFound:
            # Doesn't exist, start with the default entry
            entry = new_entry
            logger.debug("New entry: %s", entry.dn)
        except errors.DatabaseError:
            # Doesn't exist, start with the default entry
            entry = new_entry
            logger.debug("New entry, using default value: %s", entry.dn)

        self.print_entity(entry, "Initial value")

        # Bring this entry up to date
        entry = self._apply_update_disposition(update.get('updates'), entry)
        if entry is None:
            # It might be None if it is just deleting an entry
            return None, False

        self.print_entity(entry, "Final value after applying updates")

        added = False
        updated = False
        if not found:
            try:
                if len(entry):
                    # addifexist may result in an entry with only a
                    # dn defined. In that case there is nothing to do.
                    # It means the entry doesn't exist, so skip it.
                    try:
                        self.conn.add_entry(entry)
                    except errors.NotFound:
                        # parent entry of the added entry does not exist
                        # this may not be an error (e.g. entries in NIS container)
                        logger.error(
                            "Parent DN of %s may not exist, cannot "
                            "create the entry", entry.dn)
                        return entry, False
                added = True
                self.modified = True
            except Exception as e:
                logger.error("Add failure %s", e)
        else:
            # Update LDAP
            try:
                changes = entry.generate_modlist()
                if len(changes) >= 1:
                    updated = True
                safe_changes = []
                for (type, attr, values) in changes:
                    safe_changes.append((type, attr, safe_output(attr,
                                                                 values)))
                logger.debug("%s", safe_changes)
                logger.debug("Updated %d", updated)
                if updated:
                    self.conn.update_entry(entry)
                logger.debug("Done")
            except errors.EmptyModlist:
                logger.debug("Entry already up-to-date")
                updated = False
            except errors.DatabaseError as e:
                logger.error("Update failed: %s", e)
                updated = False
            except errors.DuplicateEntry as e:
                logger.debug("Update already exists, skip it: %s", e)
                updated = False
            except errors.ACIError as e:
                logger.error("Update failed: %s", e)
                updated = False

            if updated:
                self.modified = True

        return entry, added or updated

    def _delete_record(self, updates):
        """
        Delete record
        """

        dn = updates['dn']
        try:
            logger.debug("Deleting entry %s", dn)
            self.conn.delete_entry(dn)
            self.modified = True
        except errors.NotFound as e:
            logger.debug("%s did not exist:%s", dn, e)
            self.modified = True
        except errors.DatabaseError as e:
            logger.error("Delete failed: %s", e)

    def get_all_files(self, root, recursive=False):
        """Get all update files"""
        f = []
        for path, _subdirs, files in os.walk(root):
            for name in files:
                if fnmatch.fnmatch(name, "*.update"):
                    f.append(os.path.join(path, name))
            if not recursive:
                break
        f.sort()
        return f

    def _run_update_plugin(self, plugin_name):
        logger.debug("Executing upgrade plugin: %s", plugin_name)
        restart_ds, updates = self.api.Updater[plugin_name]()
        if updates:
            self._run_updates(updates)
        # restart may be required even if no updates were returned
        # from plugin, plugin may change LDAP data directly
        if restart_ds:
            self.close_connection()
            self.restart_ds()
            self.create_connection()

    def create_connection(self):
        if self.conn is None:
            self.api.Backend.ldap2.connect(time_limit=UPDATE_SEARCH_TIME_LIMIT,
                                           size_limit=0)
            self.conn = self.api.Backend.ldap2

    def _run_updates(self, all_updates):
        index_attributes = set()
        for update in all_updates:
            if 'deleteentry' in update:
                self._delete_record(update)
            elif 'plugin' in update:
                self._run_update_plugin(update['plugin'])
            else:
                entry, modified = self._update_record(update)
                if modified and entry.dn.endswith(self.index_suffix):
                    index_attributes.add(entry.single_value['cn'])

        if index_attributes:
            # The LDAPUpdate framework now keeps record of all changed/added
            # indices and batches all changed attribute in a single index
            # task. This makes updates much faster when multiple indices are
            # added or modified.
            task_dn = self.create_index_task(*sorted(index_attributes))
            self.monitor_index_task(task_dn)

    def update(self, files, ordered=True):
        """Execute the update. files is a list of the update files to use.
        :param ordered: Update files are executed in alphabetical order

        returns True if anything was changed, otherwise False
        """
        self.modified = False
        try:
            self.create_connection()

            upgrade_files = files
            if ordered:
                upgrade_files = sorted(files)

            for f in upgrade_files:
                start = time.time()
                try:
                    logger.debug("Parsing update file '%s'", f)
                    data = self.read_file(f)
                except Exception as e:
                    logger.error("error reading update file '%s'", f)
                    raise RuntimeError(e)

                all_updates = []
                self.parse_update_file(f, data, all_updates)
                self._run_updates(all_updates)
                dur = time.time() - start
                logger.debug("LDAP update duration: %s %.03f sec",
                             f,
                             dur,
                             extra={'timing': ('ldapupdate', f, None, dur)})
        finally:
            self.close_connection()

        return self.modified

    def close_connection(self):
        """Close ldap connection"""
        if self.conn:
            self.api.Backend.ldap2.disconnect()
            self.conn = None

    def restart_ds(self):
        logger.debug('Restarting directory server to apply updates')
        installutils.restart_dirsrv()
示例#39
0
    def test_find(self):
        #        -10 -9  -8     -7  -6  -5  -4     -3  -2  -1
        dn = DN('t=0,t=1,cn=bob,t=3,t=4,t=5,cn=bob,t=7,t=8,t=9')
        pat = DN('cn=bob')

        # forward
        self.assertEqual(dn.find(pat),          2)
        self.assertEqual(dn.find(pat,  1),      2)
        self.assertEqual(dn.find(pat,  1,  3),  2)
        self.assertEqual(dn.find(pat,  2,  3),  2)
        self.assertEqual(dn.find(pat,  6),      6)

        self.assertEqual(dn.find(pat,  7),     -1)
        self.assertEqual(dn.find(pat,  1,  2), -1)

        with self.assertRaises(ValueError):
            self.assertEqual(dn.index(pat,  7),     -1)
        with self.assertRaises(ValueError):
            self.assertEqual(dn.index(pat,  1,  2), -1)

        # reverse
        self.assertEqual(dn.rfind(pat),          6)
        self.assertEqual(dn.rfind(pat, -4),      6)
        self.assertEqual(dn.rfind(pat,  6),      6)
        self.assertEqual(dn.rfind(pat,  6,  8),  6)
        self.assertEqual(dn.rfind(pat,  6,  8),  6)
        self.assertEqual(dn.rfind(pat, -8),      6)
        self.assertEqual(dn.rfind(pat, -8, -4),  6)
        self.assertEqual(dn.rfind(pat, -8, -5),  2)

        self.assertEqual(dn.rfind(pat,  7),     -1)
        self.assertEqual(dn.rfind(pat, -3),     -1)

        with self.assertRaises(ValueError):
            self.assertEqual(dn.rindex(pat,  7),     -1)
        with self.assertRaises(ValueError):
            self.assertEqual(dn.rindex(pat, -3),     -1)
示例#40
0
    def parse_update_file(self, data_source_name, source_data, all_updates):
        """Parse the update file into a dictonary of lists and apply the update
           for each DN in the file."""
        update = {}
        logical_line = ""
        dn = None
        lcount = 0

        def emit_item(logical_line):
            '''
            Given a logical line containing an item to process perform the following:

            * Strip leading & trailing whitespace
            * Substitute any variables
            * Strip again and skip empty/commented lines after substitution
            * Get the action, attribute, and value
            * Each update has one list per disposition, append to specified disposition list
            '''

            logical_line = logical_line.strip()
            if logical_line == '':
                return

            # Perform variable substitution on constructued line
            logical_line = self._template_str(logical_line)

            # skip line if substitution has added a comment. FIPS mode
            # disables some lines that way.
            logical_line = logical_line.strip()
            if not logical_line or logical_line.startswith('#'):
                return

            items = logical_line.split(':', 2)

            if len(items) == 0:
                raise BadSyntax("Bad formatting on line %s:%d: %s" %
                                (data_source_name, lcount, logical_line))

            action = items[0].strip().lower()

            if action not in self.action_keywords:
                raise BadSyntax("Unknown update action '%s', data source=%s" %
                                (action, data_source_name))

            if action == 'deleteentry':
                new_value = None
                disposition = "deleteentry"
            else:
                if len(items) != 3:
                    raise BadSyntax("Bad formatting on line %s:%d: %s" %
                                    (data_source_name, lcount, logical_line))

                attr = items[1].strip()
                # do not strip here, we need detect '::' due to base64 encoded
                # values, strip may result into fake detection
                value = items[2]

                # detect base64 encoding
                # value which start with ':' are base64 encoded
                # decode it as a binary value
                if value.startswith(':'):
                    value = value[1:]
                    binary = True
                else:
                    binary = False
                value = value.strip()

                if action == 'replace':
                    try:
                        value = value.split('::', 1)
                    except ValueError:
                        raise BadSyntax(
                            "Bad syntax in replace on line %s:%d: %s, needs to "
                            "be in the format old::new in %s" %
                            (data_source_name, lcount, logical_line, value))
                else:
                    value = [value]

                if binary:
                    for i, v in enumerate(value):
                        try:
                            value[i] = base64.b64decode(v)
                        except (TypeError, ValueError) as e:
                            raise BadSyntax(
                                "Base64 encoded value %s on line %s:%d: %s is "
                                "incorrect (%s)" %
                                (v, data_source_name, lcount, logical_line, e))
                else:
                    for i, v in enumerate(value):
                        if isinstance(v, unicode):
                            value[i] = v.encode('utf-8')

                if action != 'replace':
                    value = value[0]

                if action == "default":
                    new_value = {'attr': attr, 'value': value}
                    disposition = "default"
                else:
                    new_value = {
                        'action': action,
                        "attr": attr,
                        'value': value
                    }
                    disposition = "updates"

            disposition_list = update.setdefault(disposition, [])
            disposition_list.append(new_value)

        def emit_update(update):
            '''
            When processing a dn is completed emit the update by appending it
            into list of all updates
            '''
            dn = update.get('dn')
            assert isinstance(dn, DN)
            all_updates.append(update)

        def emit_plugin_update(update):
            '''
            When processing a plugin is complete emit the plugin update by
            appending it into list of all updates
            '''
            all_updates.append(update)

        # Iterate over source input lines
        for source_line in source_data:
            lcount += 1

            # strip trailing whitespace and newline
            source_line = source_line.rstrip()

            # skip comments and empty lines
            if source_line.startswith('#') or source_line == '':
                continue

            state = None
            emit_previous_dn = False

            # parse special keywords
            if source_line.lower().startswith('dn:'):
                state = 'dn'
                emit_previous_dn = True
            elif source_line.lower().startswith('plugin:'):
                state = 'plugin'
                emit_previous_dn = True

            if emit_previous_dn and dn is not None:
                # Emit previous dn
                emit_item(logical_line)
                logical_line = ''
                emit_update(update)
                update = {}
                dn = None

            if state == 'dn':
                # Starting new dn
                dn = source_line[3:].strip()
                dn = DN(self._template_str(dn))
                update['dn'] = dn
            elif state == 'plugin':
                # plugin specification is online only
                plugin_name = source_line[7:].strip()
                if not plugin_name:
                    raise BadSyntax("plugin name is not defined")
                update['plugin'] = plugin_name
                emit_plugin_update(update)
                update = {}
            else:
                # Process items belonging to dn
                if dn is None:
                    raise BadSyntax(
                        "dn is not defined in the update, data source=%s" %
                        (data_source_name))

                # If continuation line, append to existing logical line & continue,
                # otherwise flush the previous item.
                if source_line.startswith(' '):
                    logical_line += source_line[1:]
                    continue
                emit_item(logical_line)
                logical_line = source_line

        if dn is not None:
            emit_item(logical_line)
            logical_line = ''
            emit_update(update)
            update = {}

        return all_updates
示例#41
0
 for attr in entry_attrs.keys():
     if ldap.has_dn_syntax(attr):
         for ind, value in enumerate(entry_attrs[attr]):
             if not isinstance(value, DN):
                 # value is not DN instance, the automatic encoding may have
                 # failed due to missing schema or the remote attribute type OID was
                 # not detected as DN type. Try to work this around
                 api.log.debug(
                     "%s: value %s of type %s in attribute %s is not a DN" ", convert it",
                     pkey,
                     value,
                     type(value),
                     attr,
                 )
                 try:
                     value = DN(value)
                 except ValueError, e:
                     api.log.warn(
                         "%s: skipping normalization of value %s of type %s "
                         "in attribute %s which could not be converted to DN: %s",
                         pkey,
                         value,
                         type(value),
                         attr,
                         e,
                     )
                     continue
             try:
                 remote_entry = ds_ldap.get_entry(
                     value, [api.Object.user.primary_key.name, api.Object.group.primary_key.name]
                 )
示例#42
0
 def get_dn(self, *keys, **kwargs):
     if keys[-1] is not None:
         return self.api.Object.user.get_dn(*keys, **kwargs)
     return DN(self.container_dn, api.env.basedn)
示例#43
0
        failed[pkey] = unicode(_krb_err_msg % principal)

    # Fix any attributes with DN syntax that point to entries in the old
    # tree

    for attr in entry_attrs.keys():
        if ldap.has_dn_syntax(attr):
            for ind, value in enumerate(entry_attrs[attr]):
                if not isinstance(value, DN):
                    # value is not DN instance, the automatic encoding may have
                    # failed due to missing schema or the remote attribute type OID was
                    # not detected as DN type. Try to work this around
                    api.log.debug('%s: value %s of type %s in attribute %s is not a DN'
                        ', convert it', pkey, value, type(value), attr)
                    try:
                        value = DN(value)
                    except ValueError, e:
                        api.log.warn('%s: skipping normalization of value %s of type %s '
                            'in attribute %s which could not be converted to DN: %s',
                                pkey, value, type(value), attr, e)
                        continue
                try:
                    remote_entry = ds_ldap.get_entry(value, [api.Object.user.primary_key.name, api.Object.group.primary_key.name])
                except errors.NotFound:
                    api.log.warn('%s: attribute %s refers to non-existent entry %s' % (pkey, attr, value))
                    continue
                if value.endswith(search_bases['user']):
                    primary_key = api.Object.user.primary_key.name
                    container = api.env.container_user
                elif value.endswith(search_bases['group']):
                    primary_key = api.Object.group.primary_key.name
示例#44
0
class krbtpolicy(baseldap.LDAPObject):
    """
    Kerberos Ticket Policy object
    """
    container_dn = DN(('cn', api.env.realm), ('cn', 'kerberos'))
    object_name = _('kerberos ticket policy settings')
    default_attributes = ['krbmaxticketlife', 'krbmaxrenewableage']
    limit_object_classes = ['krbticketpolicyaux']
    # permission_filter_objectclasses is deliberately missing,
    # so it is not possible to create a permission of `--type krbtpolicy`.
    # This is because we need two permissions to cover both global and per-user
    # policies.
    managed_permissions = {
        'System: Read Default Kerberos Ticket Policy': {
            'non_object': True,
            'replaces_global_anonymous_aci': True,
            'ipapermtargetfilter': ['(objectclass=krbticketpolicyaux)'],
            'ipapermlocation': DN(container_dn, api.env.basedn),
            'ipapermright': {'read', 'search', 'compare'},
            'ipapermdefaultattr': {
                'krbdefaultencsalttypes', 'krbmaxrenewableage',
                'krbmaxticketlife', 'krbsupportedencsalttypes',
                'objectclass',
            },
            'default_privileges': {
                'Kerberos Ticket Policy Readers',
            },
        },
        'System: Read User Kerberos Ticket Policy': {
            'non_object': True,
            'replaces_global_anonymous_aci': True,
            'ipapermlocation': DN(api.env.container_user, api.env.basedn),
            'ipapermtargetfilter': ['(objectclass=krbticketpolicyaux)'],
            'ipapermright': {'read', 'search', 'compare'},
            'ipapermdefaultattr': {
                'krbmaxrenewableage', 'krbmaxticketlife',
            },
            'default_privileges': {
                'Kerberos Ticket Policy Readers',
            },
        },
    }

    label = _('Kerberos Ticket Policy')
    label_singular = _('Kerberos Ticket Policy')

    takes_params = (
        Str('uid?',
            cli_name='user',
            label=_('User name'),
            doc=_('Manage ticket policy for specific user'),
            primary_key=True,
        ),
        Int('krbmaxticketlife?',
            cli_name='maxlife',
            label=_('Max life'),
            doc=_('Maximum ticket life (seconds)'),
            minvalue=1,
        ),
        Int('krbmaxrenewableage?',
            cli_name='maxrenew',
            label=_('Max renew'),
            doc=_('Maximum renewable age (seconds)'),
            minvalue=1,
        ),
    )

    def get_dn(self, *keys, **kwargs):
        if keys[-1] is not None:
            return self.api.Object.user.get_dn(*keys, **kwargs)
        return DN(self.container_dn, api.env.basedn)
示例#45
0
def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs):
    assert isinstance(dn, DN)
    attr_blacklist = ['krbprincipalkey','memberofindirect','memberindirect']
    attr_blacklist.extend(kwargs.get('attr_blacklist', []))
    ds_ldap = ctx['ds_ldap']
    search_bases = kwargs.get('search_bases', None)
    valid_gids = kwargs['valid_gids']
    invalid_gids = kwargs['invalid_gids']

    if 'gidnumber' not in entry_attrs:
        raise errors.NotFound(reason=_('%(user)s is not a POSIX user') % dict(user=pkey))
    else:
        # See if the gidNumber at least points to a valid group on the remote
        # server.
        if entry_attrs['gidnumber'][0] in invalid_gids:
            api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
                         % (entry_attrs['gidnumber'][0], pkey))
        elif entry_attrs['gidnumber'][0] not in valid_gids:
            try:
                remote_entry = ds_ldap.find_entry_by_attr(
                    'gidnumber', entry_attrs['gidnumber'][0], 'posixgroup',
                    [''], search_bases['group']
                )
                valid_gids.add(entry_attrs['gidnumber'][0])
            except errors.NotFound:
                api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
                             % (entry_attrs['gidnumber'][0], pkey))
                invalid_gids.add(entry_attrs['gidnumber'][0])
            except errors.SingleMatchExpected as e:
                # GID number matched more groups, this should not happen
                api.log.warning('GID number %s of migrated user %s should match 1 group, but it matched %d groups' \
                             % (entry_attrs['gidnumber'][0], pkey, e.found))
            except errors.LimitsExceeded as e:
                api.log.warning('Search limit exceeded searching for GID %s' % entry_attrs['gidnumber'][0])

    # We don't want to create a UPG so set the magic value in description
    # to let the DS plugin know.
    entry_attrs.setdefault('description', [])
    entry_attrs['description'].append(NO_UPG_MAGIC)

    # fill in required attributes by IPA
    entry_attrs['ipauniqueid'] = 'autogenerate'
    if 'homedirectory' not in entry_attrs:
        homes_root = config.get('ipahomesrootdir', (paths.HOME_DIR, ))[0]
        home_dir = '%s/%s' % (homes_root, pkey)
        home_dir = home_dir.replace('//', '/').rstrip('/')
        entry_attrs['homedirectory'] = home_dir

    if 'loginshell' not in entry_attrs:
        default_shell = config.get('ipadefaultloginshell', [paths.SH])[0]
        entry_attrs.setdefault('loginshell', default_shell)

    # do not migrate all attributes
    for attr in attr_blacklist:
        entry_attrs.pop(attr, None)

    # do not migrate all object classes
    if 'objectclass' in entry_attrs:
        for object_class in kwargs.get('oc_blacklist', []):
            try:
                entry_attrs['objectclass'].remove(object_class)
            except ValueError:  # object class not present
                pass

    _create_kerberos_principals(ldap, pkey, entry_attrs, failed)

    # Fix any attributes with DN syntax that point to entries in the old
    # tree

    for attr in entry_attrs.keys():
        if ldap.has_dn_syntax(attr):
            for ind, value in enumerate(entry_attrs[attr]):
                if not isinstance(value, DN):
                    # value is not DN instance, the automatic encoding may have
                    # failed due to missing schema or the remote attribute type OID was
                    # not detected as DN type. Try to work this around
                    api.log.debug('%s: value %s of type %s in attribute %s is not a DN'
                        ', convert it', pkey, value, type(value), attr)
                    try:
                        value = DN(value)
                    except ValueError as e:
                        api.log.warning('%s: skipping normalization of value %s of type %s '
                            'in attribute %s which could not be converted to DN: %s',
                                pkey, value, type(value), attr, e)
                        continue
                try:
                    remote_entry = ds_ldap.get_entry(value, [api.Object.user.primary_key.name, api.Object.group.primary_key.name])
                except errors.NotFound:
                    api.log.warning('%s: attribute %s refers to non-existent entry %s' % (pkey, attr, value))
                    continue
                if value.endswith(search_bases['user']):
                    primary_key = api.Object.user.primary_key.name
                    container = api.env.container_user
                elif value.endswith(search_bases['group']):
                    primary_key = api.Object.group.primary_key.name
                    container = api.env.container_group
                else:
                    api.log.warning('%s: value %s in attribute %s does not belong into any known container' % (pkey, value, attr))
                    continue

                if not remote_entry.get(primary_key):
                    api.log.warning('%s: there is no primary key %s to migrate for %s' % (pkey, primary_key, attr))
                    continue

                api.log.debug('converting DN value %s for %s in %s' % (value, attr, dn))
                rdnval = remote_entry[primary_key][0].lower()
                entry_attrs[attr][ind] = DN((primary_key, rdnval), container, api.env.basedn)

    return dn
示例#46
0
    def _finalize_core(self, **defaults):
        """
        Complete initialization of standard IPA environment.

        This method will perform the following steps:

            1. Call `Env._bootstrap()` if it hasn't already been called.

            2. Merge-in variables from the configuration file ``self.conf``
               (if it exists) by calling `Env._merge_from_file()`.

            3. Merge-in variables from the defaults configuration file
               ``self.conf_default`` (if it exists) by calling
               `Env._merge_from_file()`.

            4. Intelligently fill-in the *in_server* , *logdir*, *log*, and
               *jsonrpc_uri* variables if they haven't already been set.

            5. Merge-in the variables in ``defaults`` by calling `Env._merge()`.
               In normal circumstances ``defaults`` will simply be those
               specified in `constants.DEFAULT_CONFIG`.

        After this method is called, all the environment variables used by all
        the built-in plugins will be available.  As such, this method should be
        called *before* any plugins are loaded.

        After this method has finished, the `Env` instance is still writable
        so that 3rd-party plugins can set variables they may require as the
        plugins are registered.

        Also see `Env._finalize()`, the final method in the bootstrap sequence.

        :param defaults: Internal defaults for all built-in variables.
        """
        self.__doing('_finalize_core')
        self.__do_if_not_done('_bootstrap')

        # Merge in context config file and then default config file:
        if self.__d.get('mode', None) != 'dummy':
            self._merge_from_file(self.conf)
            self._merge_from_file(self.conf_default)

        # Determine if in_server:
        if 'in_server' not in self:
            self.in_server = (self.context == 'server')

        # Set logdir:
        if 'logdir' not in self:
            if self.in_tree or not self.in_server:
                self.logdir = self._join('dot_ipa', 'log')
            else:
                self.logdir = path.join('/', 'var', 'log', 'ipa')

        # Set log file:
        if 'log' not in self:
            self.log = self._join('logdir', '%s.log' % self.context)

        # Workaround for ipa-server-install --uninstall. When no config file
        # is available, we set realm, domain, and basedn to RFC 2606 reserved
        # suffix to suppress attribute errors during uninstallation.
        if (self.in_server and self.context == 'installer' and
                not getattr(self, 'config_loaded', False)):
            if 'realm' not in self:
                self.realm = 'UNCONFIGURED.INVALID'
            if 'domain' not in self:
                self.domain = self.realm.lower()

        if 'basedn' not in self and 'domain' in self:
            self.basedn = DN(*(('dc', dc) for dc in self.domain.split('.')))

        # Derive xmlrpc_uri from server
        # (Note that this is done before deriving jsonrpc_uri from xmlrpc_uri
        # and server from jsonrpc_uri so that when only server or xmlrpc_uri
        # is specified, all 3 keys have a value.)
        if 'xmlrpc_uri' not in self and 'server' in self:
            self.xmlrpc_uri = 'https://{}/ipa/xml'.format(self.server)

        # Derive ldap_uri from server
        if 'ldap_uri' not in self and 'server' in self:
            self.ldap_uri = 'ldap://{}'.format(self.server)

        # Derive jsonrpc_uri from xmlrpc_uri
        if 'jsonrpc_uri' not in self:
            if 'xmlrpc_uri' in self:
                xmlrpc_uri = self.xmlrpc_uri
            else:
                xmlrpc_uri = defaults.get('xmlrpc_uri')
            if xmlrpc_uri:
                (scheme, netloc, uripath, params, query, fragment
                        ) = urlparse(xmlrpc_uri)
                uripath = uripath.replace('/xml', '/json', 1)
                self.jsonrpc_uri = urlunparse((
                        scheme, netloc, uripath, params, query, fragment))

        if 'server' not in self:
            if 'jsonrpc_uri' in self:
                jsonrpc_uri = self.jsonrpc_uri
            else:
                jsonrpc_uri = defaults.get('jsonrpc_uri')
            if jsonrpc_uri:
                parsed = urlparse(jsonrpc_uri)
                self.server = parsed.netloc

        self._merge(**defaults)

        # set the best known TLS version if min/max versions are not set
        if 'tls_version_min' not in self:
            self.tls_version_min = TLS_VERSIONS[-1]
        elif self.tls_version_min not in TLS_VERSIONS:
            raise errors.EnvironmentError(
                "Unknown TLS version '{ver}' set in tls_version_min."
                .format(ver=self.tls_version_min))

        if 'tls_version_max' not in self:
            self.tls_version_max = TLS_VERSIONS[-1]
        elif self.tls_version_max not in TLS_VERSIONS:
            raise errors.EnvironmentError(
                "Unknown TLS version '{ver}' set in tls_version_max."
                .format(ver=self.tls_version_max))

        if self.tls_version_max < self.tls_version_min:
            raise errors.EnvironmentError(
                "tls_version_min is set to a higher TLS version than "
                "tls_version_max.")
示例#47
0
文件: aci.py 项目: guanwei/freeipa
    def execute(self, term=None, **kw):
        ldap = self.api.Backend.ldap2

        entry = ldap.get_entry(self.api.env.basedn, ['aci'])

        acis = _convert_strings_to_acis(entry.get('aci', []))
        results = []

        if term:
            term = term.lower()
            for a in acis:
                if a.name.lower().find(term) != -1 and a not in results:
                    results.append(a)
            acis = list(results)
        else:
            results = list(acis)

        if kw.get('aciname'):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if name != kw['aciname']:
                    results.remove(a)
            acis = list(results)

        if kw.get('aciprefix'):
            for a in acis:
                prefix, name = _parse_aci_name(a.name)
                if prefix != kw['aciprefix']:
                    results.remove(a)
            acis = list(results)

        if kw.get('attrs'):
            for a in acis:
                if not 'targetattr' in a.target:
                    results.remove(a)
                    continue
                alist1 = sorted(
                    [t.lower() for t in a.target['targetattr']['expression']]
                )
                alist2 = sorted([t.lower() for t in kw['attrs']])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get('permission'):
            try:
                self.api.Command['permission_show'](
                    kw['permission']
                )
            except errors.NotFound:
                pass
            else:
                for a in acis:
                    uri = 'ldap:///%s' % entry.dn
                    if a.bindrule['expression'] != uri:
                        results.remove(a)
                acis = list(results)

        if kw.get('permissions'):
            for a in acis:
                alist1 = sorted(a.permissions)
                alist2 = sorted(kw['permissions'])
                if len(set(alist1) & set(alist2)) != len(alist2):
                    results.remove(a)
            acis = list(results)

        if kw.get('memberof'):
            try:
                dn = _group_from_memberof(kw['memberof'])
            except errors.NotFound:
                pass
            else:
                memberof_filter = '(memberOf=%s)' % dn
                for a in acis:
                    if 'targetfilter' in a.target:
                        targetfilter = a.target['targetfilter']['expression']
                        if targetfilter != memberof_filter:
                            results.remove(a)
                    else:
                        results.remove(a)

        if kw.get('type'):
            for a in acis:
                if 'target' in a.target:
                    target = a.target['target']['expression']
                else:
                    results.remove(a)
                    continue
                found = False
                for k in _type_map.keys():
                    if _type_map[k] == target and kw['type'] == k:
                        found = True
                        break
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('selfaci', False) is True:
            for a in acis:
                if a.bindrule['expression'] != u'ldap:///self':
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('group'):
            for a in acis:
                groupdn = a.bindrule['expression']
                groupdn = DN(groupdn.replace('ldap:///',''))
                try:
                    cn = groupdn[0]['cn']
                except (IndexError, KeyError):
                    cn = None
                if cn is None or cn != kw['group']:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('targetgroup'):
            for a in acis:
                found = False
                if 'target' in a.target:
                    target = a.target['target']['expression']
                    targetdn = DN(target.replace('ldap:///',''))
                    group_container_dn = DN(api.env.container_group, api.env.basedn)
                    if targetdn.endswith(group_container_dn):
                        try:
                            cn = targetdn[0]['cn']
                        except (IndexError, KeyError):
                            cn = None
                        if cn == kw['targetgroup']:
                            found = True
                if not found:
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        if kw.get('filter'):
            if not kw['filter'].startswith('('):
                kw['filter'] = unicode('('+kw['filter']+')')
            for a in acis:
                if 'targetfilter' not in a.target or\
                    not a.target['targetfilter']['expression'] or\
                    a.target['targetfilter']['expression'] != kw['filter']:
                    results.remove(a)

        if kw.get('subtree'):
            for a in acis:
                if 'target' in a.target:
                    target = a.target['target']['expression']
                else:
                    results.remove(a)
                    continue
                if kw['subtree'].lower() != target.lower():
                    try:
                        results.remove(a)
                    except ValueError:
                        pass

        acis = []
        for result in results:
            if kw.get('raw', False):
                aci = dict(aci=unicode(result))
            else:
                aci = _aci_to_kw(ldap, result,
                        pkey_only=kw.get('pkey_only', False))
            acis.append(aci)

        return dict(
            result=acis,
            count=len(acis),
            truncated=False,
        )
示例#48
0
def assert_deepequal(expected, got, doc='', stack=tuple()):
    """
    Recursively check for type and equality.

    If a value in expected is callable then it will used as a callback to
    test for equality on the got value. The callback is passed the got
    value and returns True if equal, False otherwise.

    If the tests fails, it will raise an ``AssertionError`` with detailed
    information, including the path to the offending value.  For example:

    >>> expected = [u'Hello', dict(world=1)]
    >>> got = [u'Hello', dict(world=1.0)]
    >>> expected == got
    True
    >>> assert_deepequal(
    ...    expected, got, doc='Testing my nested data')  # doctest: +ELLIPSIS
    Traceback (most recent call last):
      ...
    AssertionError: assert_deepequal: type(expected) is not type(got).
      Testing my nested data
      type(expected) = <... 'int'>
      type(got) = <... 'float'>
      expected = 1
      got = 1.0
      path = (..., 'world')

    Note that lists and tuples are considered equivalent, and the order of
    their elements does not matter.
    """
    try:
        pretty_print = pytest.config.getoption("pretty_print")
    except (AttributeError, ValueError):
        pretty_print = False

    if pretty_print:
        expected_str = struct_to_string(expected, EXPECTED_LEN)
        got_str = struct_to_string(got, GOT_LEN)
    else:
        expected_str = repr(expected)
        got_str = repr(got)

    if isinstance(expected, tuple):
        expected = list(expected)
    if isinstance(got, tuple):
        got = list(got)
    if isinstance(expected, DN):
        if isinstance(got, str):
            got = DN(got)
    if (not (isinstance(expected, Fuzzy) or callable(expected)
             or type(expected) is type(got))):
        raise AssertionError(
            TYPE % (doc, type(expected), type(got), expected, got, stack))
    if isinstance(expected, (list, tuple)):
        if len(expected) != len(got):
            raise AssertionError(
                LEN %
                (doc, len(expected), len(got), expected_str, got_str, stack))
        # Sort list elements, unless they are dictionaries
        if expected and isinstance(expected[0], dict):
            s_got = got
            s_expected = expected
        else:
            try:
                s_got = sorted(got)
            except TypeError:
                s_got = got
            try:
                s_expected = sorted(expected)
            except TypeError:
                s_expected = expected
        for (i, e_sub) in enumerate(s_expected):
            g_sub = s_got[i]
            assert_deepequal(e_sub, g_sub, doc, stack + (i, ))
    elif isinstance(expected, dict):
        missing = set(expected).difference(got)
        extra = set(got).difference(expected)
        if missing or extra:
            raise AssertionError(KEYS % (doc, sorted(missing), sorted(extra),
                                         expected_str, got_str, stack))
        for key in sorted(expected):
            e_sub = expected[key]
            g_sub = got[key]
            assert_deepequal(e_sub, g_sub, doc, stack + (key, ))
    elif callable(expected):
        if not expected(got):
            raise AssertionError(VALUE % (doc, expected, got, stack))
    elif expected != got:
        raise AssertionError(VALUE % (doc, expected, got, stack))