예제 #1
0
def test_nss_ssca_users(topo):
    """Validate that we can submit user certs to the ds ca for signing.

    :id: a47e47ed-2056-440b-8797-d13fa89098f6
    :steps:
        1. Find the ssca path.
        2. Assert it exists
        3. Create user certificates from the ssca
    :expectedresults:
        1. It works.
        2. It works.
        3. It works.
    """

    ssca = NssSsl(dbpath=topo.standalone.get_ssca_dir())

    if not ssca._db_exists():
        ssca.reinit()
        if not ssca._rsa_ca_exists():
            ssca.create_rsa_ca()

    # It better exist now!
    assert(ssca._rsa_ca_exists() is True)

    # Check making users certs. They should never conflict
    for user in ('william', 'noriko', 'mark'):
        # Create the user cert
        assert(ssca.create_rsa_user(user) is not None)
        # Assert it exists now
        assert(ssca._rsa_user_exists(user) is True)
    assert(ssca._rsa_user_exists('non_existen') is False)
예제 #2
0
def cacert_add(inst, basedn, log, args):
    """Add CA certificate
    """
    # Verify file and certificate name
    os.path.isfile(args.file)
    tlsdb = NssSsl(dirsrv=inst)
    if not tlsdb._db_exists(even_partial=True):  # we want to be very careful
        log.info('Security database does not exist. Creating a new one in {}.'.
                 format(inst.get_cert_dir()))
        tlsdb.reinit()

    try:
        tlsdb.get_cert_details(args.name)
        raise ValueError("Certificate already exists with the same name")
    except ValueError:
        pass

    # Add the cert
    tlsdb.add_cert(args.name, args.file, ca=True)
예제 #3
0
def cert_add(inst, basedn, log, args):
    """Add server certificate
    """
    # Verify file and certificate name
    os.path.isfile(args.file)
    tlsdb = NssSsl(dirsrv=inst)
    if not tlsdb._db_exists(even_partial=True):  # we want to be very careful
        log.info('Security database does not exist. Creating a new one in {}.'.
                 format(inst.get_cert_dir()))
        tlsdb.reinit()

    try:
        tlsdb.get_cert_details(args.name)
        raise ValueError("Certificate already exists with the same name")
    except ValueError:
        pass

    if args.primary_cert:
        # This is the server's primary certificate, update RSA entry
        RSA(inst).set('nsSSLPersonalitySSL', args.name)

    # Add the cert
    tlsdb.add_cert(args.name, args.file)
예제 #4
0
    def _install_ds(self, general, slapd, backends):
        """
        Actually install the Ds from the dicts provided.

        You should never call this directly, as it bypasses assertions.
        """
        ######################## WARNING #############################
        # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING
        # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE
        # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION.
        #
        # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**>

        ### This first section is about creating the *minimal* required paths and config to get
        # directory server to start: After this, we then perform all configuration as online
        # changes from after this point.

        # Create dse.ldif with a temporary root password.
        # This is done first, because instances are found for removal and listing by detecting
        # the present of their dse.ldif!!!!
        # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif
        # Variables are done with %KEY%.
        self.log.debug("ACTION: Creating dse.ldif")
        try:
            os.umask(
                0o007
            )  # For parent dirs that get created -> sets 770 for perms
            os.makedirs(slapd['config_dir'], mode=0o770)
        except OSError:
            pass

        # Get suffix for some plugin defaults (if possible)
        # annoyingly for legacy compat backend takes TWO key types
        # and we have to now deal with that ....
        #
        # Create ds_suffix here else it won't be in scope ....
        ds_suffix = ''
        if len(backends) > 0:
            ds_suffix = normalizeDN(backends[0]['nsslapd-suffix'])

        dse = ""
        with open(
                os.path.join(slapd['data_dir'], 'dirsrv', 'data',
                             'template-dse.ldif')) as template_dse:
            for line in template_dse.readlines():
                dse += line.replace('%', '{', 1).replace('%', '}', 1)

        # Check if we are in a container, if so don't use /dev/shm for the db home dir
        # as containers typically don't allocate enough space for dev/shm and we don't
        # want to unexpectedly break the server after an upgrade
        #
        # If we know we are are in a container, we don't need to re-detect on systemd.
        # It actually turns out if you add systemd-detect-virt, that pulls in system
        # which subsequently breaks containers starting as instance.start then believes
        # it COULD check the ds status. The times we need to check for systemd are mainly
        # in other environments that use systemd natively in their containers.
        container_result = 1
        if not self.containerised:
            container_result = subprocess.run(["systemd-detect-virt", "-c"],
                                              stdout=subprocess.PIPE)
        if self.containerised or container_result.returncode == 0:
            # In a container, set the db_home_dir to the db path
            self.log.debug(
                "Container detected setting db home directory to db directory."
            )
            slapd['db_home_dir'] = slapd['db_dir']

        with open(os.path.join(slapd['config_dir'], 'dse.ldif'),
                  'w') as file_dse:
            dse_fmt = dse.format(
                schema_dir=slapd['schema_dir'],
                lock_dir=slapd['lock_dir'],
                tmp_dir=slapd['tmp_dir'],
                cert_dir=slapd['cert_dir'],
                ldif_dir=slapd['ldif_dir'],
                bak_dir=slapd['backup_dir'],
                run_dir=slapd['run_dir'],
                inst_dir=slapd['inst_dir'],
                log_dir=slapd['log_dir'],
                fqdn=general['full_machine_name'],
                ds_port=slapd['port'],
                ds_user=slapd['user'],
                rootdn=slapd['root_dn'],
                instance_name=slapd['instance_name'],
                ds_passwd=self.
                _secure_password,  # We set our own password here, so we can connect and mod.
                # This is because we never know the users input root password as they can validly give
                # us a *hashed* input.
                ds_suffix=ds_suffix,
                config_dir=slapd['config_dir'],
                db_dir=slapd['db_dir'],
                db_home_dir=slapd['db_home_dir'],
                db_lib=slapd['db_lib'],
                ldapi_enabled="on",
                ldapi=slapd['ldapi'],
                ldapi_autobind="on",
            )
            file_dse.write(dse_fmt)

        self.log.info("Create file system structures ...")
        # Create all the needed paths
        # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir?
        for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir',
                     'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
            self.log.debug("ACTION: creating %s", slapd[path])
            try:
                os.umask(
                    0o007
                )  # For parent dirs that get created -> sets 770 for perms
                os.makedirs(slapd[path], mode=0o770)
            except OSError:
                pass
            os.chown(slapd[path], slapd['user_uid'], slapd['group_gid'])

        # /var/lock/dirsrv needs special attention...
        parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir))
        os.chown(parentdir, slapd['user_uid'], slapd['group_gid'])

        ### Warning! We need to down the directory under db too for .restore to work.
        # During a restore, the db dir is deleted and recreated, which is why we need
        # to own it for a restore.
        #
        # However, in a container, we can't always guarantee this due to how the volumes
        # work and are mounted. Specifically, if we have an anonymous volume we will
        # NEVER be able to own it, but in a true deployment it is reasonable to expect
        # we DO own it. Thus why we skip it in this specific context
        if not self.containerised:
            db_parent = os.path.join(slapd['db_dir'], '..')
            os.chown(db_parent, slapd['user_uid'], slapd['group_gid'])

        # Copy correct data to the paths.
        # Copy in the schema
        #  This is a little fragile, make it better.
        # It won't matter when we move schema to usr anyway ...

        _ds_shutil_copytree(
            os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'),
            slapd['schema_dir'])
        os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid'])
        os.chmod(slapd['schema_dir'], 0o770)

        # Copy in the collation
        srcfile = os.path.join(slapd['sysconf_dir'],
                               'dirsrv/config/slapd-collations.conf')
        dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf')
        shutil.copy(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # Copy in the certmap configuration
        srcfile = os.path.join(slapd['sysconf_dir'],
                               'dirsrv/config/certmap.conf')
        dstfile = os.path.join(slapd['config_dir'], 'certmap.conf')
        shutil.copy(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # If we are on the correct platform settings, systemd
        if general['systemd']:
            # Should create the symlink we need, but without starting it.
            result = subprocess.run(
                ["systemctl", "enable",
                 "dirsrv@%s" % slapd['instance_name']],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            args = ' '.join(ensure_list_str(result.args))
            stdout = ensure_str(result.stdout)
            stderr = ensure_str(result.stderr)
            # Systemd encodes some odd charecters into it's symlink output on newer versions which
            # can trip up the logger.
            self.log.debug(
                f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode(
                    "utf-8"))

            # Setup tmpfiles_d
            tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[
                'instance_name'] + ".conf"
            with open(tmpfile_d, "w") as TMPFILE_D:
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['run_dir'], slapd['user'], slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['lock_dir'].replace(
                        "slapd-" + slapd['instance_name'], ""), slapd['user'],
                    slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['lock_dir'], slapd['user'], slapd['group']))

        # Else we need to detect other init scripts?
        # WB: No, we just install and assume that docker will start us ...

        # Bind sockets to our type?

        # Create certdb in sysconfidir
        self.log.debug("ACTION: Creating certificate database is %s",
                       slapd['cert_dir'])

        # BELOW THIS LINE - all actions are now ONLINE changes to the directory server.
        # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS.

        # Should I move this import? I think this prevents some recursion
        from lib389 import DirSrv
        ds_instance = DirSrv(self.verbose, containerised=self.containerised)
        if self.containerised:
            ds_instance.systemd_override = general['systemd']

        # By default SUSE does something extremely silly - it creates a hostname
        # that CANT be resolved by DNS. As a result this causes all installs to
        # fail. We need to guarantee that we only connect to localhost here, as
        # it's the only stable and guaranteed way to connect to the instance
        # at this point.
        #
        # Use ldapi which would prevent the need
        # to configure a temp root pw in the setup phase.
        args = {
            SER_HOST: "localhost",
            SER_PORT: slapd['port'],
            SER_SERVERID_PROP: slapd['instance_name'],
            SER_ROOT_DN: slapd['root_dn'],
            SER_ROOT_PW: self._raw_secure_password,
            SER_DEPLOYED_DIR: slapd['prefix'],
            SER_LDAPI_ENABLED: 'on',
            SER_LDAPI_SOCKET: slapd['ldapi'],
            SER_LDAPI_AUTOBIND: 'on'
        }

        ds_instance.allocate(args)
        # Does this work?
        assert_c(ds_instance.exists(),
                 "Instance failed to install, does not exist when expected")

        # Create a certificate database.
        tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir'])
        if not tlsdb._db_exists():
            tlsdb.reinit()

        if slapd['self_sign_cert']:
            self.log.info("Create self-signed certificate database ...")
            etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/')
            ssca_path = os.path.join(etc_dirsrv_path, 'ssca/')
            ssca = NssSsl(dbpath=ssca_path)
            # If it doesn't exist, create a CA DB
            if not ssca._db_exists():
                ssca.reinit()
                ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months'])
            # If CA is expired or will expire soon,
            # Reissue it and resign the existing certs that were signed by the cert previously
            elif ssca.rsa_ca_needs_renew():
                ca = ssca.renew_rsa_ca(
                    months=slapd['self_sign_cert_valid_months'])
                # Import CA to the existing instances except the one we install now (we import it later)
                for dir in os.listdir(etc_dirsrv_path):
                    if dir.startswith("slapd-") and dir != slapd['cert_dir']:
                        tlsdb_inst = NssSsl(
                            dbpath=os.path.join(etc_dirsrv_path, dir))
                        tlsdb_inst.import_rsa_crt(ca)

            csr = tlsdb.create_rsa_key_and_csr(
                alt_names=[general['full_machine_name']])
            (ca, crt) = ssca.rsa_ca_sign_csr(csr)
            tlsdb.import_rsa_crt(ca, crt)
            if general['selinux']:
                # Set selinux port label
                selinux_label_port(slapd['secure_port'])

        # Do selinux fixups
        if general['selinux']:
            self.log.info("Perform SELinux labeling ...")
            selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
                             'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
                             'run_dir', 'schema_dir', 'tmp_dir')
            for path in selinux_paths:
                selinux_restorecon(slapd[path])

            selinux_label_port(slapd['port'])

        # Start the server
        # Make changes using the temp root
        self.log.debug(f"asan_enabled={ds_instance.has_asan()}")
        self.log.debug(
            f"libfaketime installed ={'libfaketime' in sys.modules}")
        assert_c(
            not ds_instance.has_asan() or 'libfaketime' not in sys.modules,
            "libfaketime python module is incompatible with ASAN build.")
        ds_instance.start(timeout=60)
        ds_instance.open()

        # In some cases we may want to change log settings
        # ds_instance.config.enable_log('audit')

        # Create the configs related to this version.
        base_config = get_config(general['defaults'])
        base_config_inst = base_config(ds_instance)
        base_config_inst.apply_config(install=True)

        # Setup TLS with the instance.

        # We *ALWAYS* set secure port, even if security is off, because it breaks
        # tests with standalone.enable_tls if we do not. It's only when security; on
        # that we actually start listening on it.
        if not slapd['secure_port']:
            slapd['secure_port'] = "636"
        ds_instance.config.set('nsslapd-secureport',
                               '%s' % slapd['secure_port'])
        if slapd['self_sign_cert']:
            ds_instance.config.set('nsslapd-security', 'on')

        # Before we create any backends, create any extra default indexes that may be
        # dynamically provisioned, rather than from template-dse.ldif. Looking at you
        # entryUUID (requires rust enabled).
        #
        # Indexes defaults to default_index_dn
        indexes = Indexes(ds_instance)
        if ds_instance.ds_paths.rust_enabled:
            indexes.create(
                properties={
                    'cn': 'entryUUID',
                    'nsSystemIndex': 'false',
                    'nsIndexType': ['eq', 'pres'],
                })

        # Create the backends as listed
        # Load example data if needed.
        for backend in backends:
            self.log.info(
                f"Create database backend: {backend['nsslapd-suffix']} ...")
            is_sample_entries_in_props = "sample_entries" in backend
            create_suffix_entry_in_props = backend.pop('create_suffix_entry',
                                                       False)
            ds_instance.backends.create(properties=backend)
            if not is_sample_entries_in_props and create_suffix_entry_in_props:
                # Set basic ACIs
                c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                suffix_rdn_attr = backend['nsslapd-suffix'].split(
                    '=')[0].lower()
                if suffix_rdn_attr == 'dc':
                    domain = create_base_domain(ds_instance,
                                                backend['nsslapd-suffix'])
                    domain.add('aci', dc_aci)
                elif suffix_rdn_attr == 'o':
                    org = create_base_org(ds_instance,
                                          backend['nsslapd-suffix'])
                    org.add('aci', o_aci)
                elif suffix_rdn_attr == 'ou':
                    orgunit = create_base_orgunit(ds_instance,
                                                  backend['nsslapd-suffix'])
                    orgunit.add('aci', ou_aci)
                elif suffix_rdn_attr == 'cn':
                    cn = create_base_cn(ds_instance, backend['nsslapd-suffix'])
                    cn.add('aci', cn_aci)
                elif suffix_rdn_attr == 'c':
                    c = create_base_c(ds_instance, backend['nsslapd-suffix'])
                    c.add('aci', c_aci)
                else:
                    # Unsupported rdn
                    raise ValueError(
                        "Suffix RDN '{}' in '{}' is not supported.  Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'"
                        .format(suffix_rdn_attr, backend['nsslapd-suffix']))

        # Create all required sasl maps: if we have a single backend ...
        # our default maps are really really bad, and we should feel bad.
        # they basically only work with a single backend, and they'll break
        # GSSAPI in some cases too :(
        if len(backends) > 0:
            self.log.debug("Adding sasl maps for suffix %s" %
                           backend['nsslapd-suffix'])
            backend = backends[0]
            saslmappings = SaslMappings(ds_instance)
            saslmappings.create(
                properties={
                    'cn': 'rfc 2829 u syntax',
                    'nsSaslMapRegexString': '^u:\\(.*\\)',
                    'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                    'nsSaslMapFilterTemplate': '(uid=\\1)'
                })
            # I think this is for LDAPI
            saslmappings.create(
                properties={
                    'cn': 'uid mapping',
                    'nsSaslMapRegexString': '^[^:@]+$',
                    'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                    'nsSaslMapFilterTemplate': '(uid=&)'
                })
        else:
            self.log.debug("Skipping default SASL maps - no backend found!")

        self.log.info("Perform post-installation tasks ...")
        # Change the root password finally
        ds_instance.config.set('nsslapd-rootpw', slapd['root_password'])

        # We need to log the password when containerised
        if self.containerised:
            self.log.debug("Root DN password: {}".format(
                slapd['root_password']))

        # Complete.
        if general['start']:
            # Restart for changes to take effect - this could be removed later
            ds_instance.restart(post_open=False)
        else:
            # Just stop the instance now.
            ds_instance.stop()

        self.log.debug(" 🎉 Instance setup complete")
예제 #5
0
    def _install_ds(self, general, slapd, backends):
        """
        Actually install the Ds from the dicts provided.

        You should never call this directly, as it bypasses assertions.
        """
        ######################## WARNING #############################
        # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING
        # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE
        # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION.
        #
        # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**>


        ### This first section is about creating the *minimal* required paths and config to get
        # directory server to start: After this, we then perform all configuration as online
        # changes from after this point.

        # Create dse.ldif with a temporary root password.
        # This is done first, because instances are found for removal and listing by detecting
        # the present of their dse.ldif!!!!
        # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif
        # Variables are done with %KEY%.
        self.log.debug("ACTION: Creating dse.ldif")
        try:
            os.umask(0o007)  # For parent dirs that get created -> sets 770 for perms
            os.makedirs(slapd['config_dir'], mode=0o770)
        except OSError:
            pass

        # Get suffix for some plugin defaults (if possible)
        # annoyingly for legacy compat backend takes TWO key types
        # and we have to now deal with that ....
        #
        # Create ds_suffix here else it won't be in scope ....
        ds_suffix = ''
        if len(backends) > 0:
            ds_suffix = normalizeDN(backends[0]['nsslapd-suffix'])

        dse = ""
        with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse:
            for line in template_dse.readlines():
                dse += line.replace('%', '{', 1).replace('%', '}', 1)

        with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
            file_dse.write(dse.format(
                schema_dir=slapd['schema_dir'],
                lock_dir=slapd['lock_dir'],
                tmp_dir=slapd['tmp_dir'],
                cert_dir=slapd['cert_dir'],
                ldif_dir=slapd['ldif_dir'],
                bak_dir=slapd['backup_dir'],
                run_dir=slapd['run_dir'],
                inst_dir=slapd['inst_dir'],
                log_dir=slapd['log_dir'],
                fqdn=general['full_machine_name'],
                ds_port=slapd['port'],
                ds_user=slapd['user'],
                rootdn=slapd['root_dn'],
                ds_passwd=self._secure_password,  # We set our own password here, so we can connect and mod.
                # This is because we never know the users input root password as they can validily give
                # us a *hashed* input.
                ds_suffix=ds_suffix,
                config_dir=slapd['config_dir'],
                db_dir=slapd['db_dir'],
            ))

        # Create all the needed paths
        # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir?
        for path in ('backup_dir', 'cert_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
            self.log.debug("ACTION: creating %s", slapd[path])
            try:
                os.umask(0o007)  # For parent dirs that get created -> sets 770 for perms
                os.makedirs(slapd[path], mode=0o770)
            except OSError:
                pass
            os.chown(slapd[path], slapd['user_uid'], slapd['group_gid'])

        # /var/lock/dirsrv needs special attention...
        parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir))
        os.chown(parentdir, slapd['user_uid'], slapd['group_gid'])

        ### Warning! We need to down the directory under db too for .restore to work.
        # See dblayer.c for more!
        db_parent = os.path.join(slapd['db_dir'], '..')
        os.chown(db_parent, slapd['user_uid'], slapd['group_gid'])

        # Copy correct data to the paths.
        # Copy in the schema
        #  This is a little fragile, make it better.
        # It won't matter when we move schema to usr anyway ...

        _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir'])
        os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid'])
        os.chmod(slapd['schema_dir'], 0o770)

        # Copy in the collation
        srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf')
        dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf')
        shutil.copy2(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # Copy in the certmap configuration
        srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf')
        dstfile = os.path.join(slapd['config_dir'], 'certmap.conf')
        shutil.copy2(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # If we are on the correct platform settings, systemd
        if general['systemd']:
            # Should create the symlink we need, but without starting it.
            subprocess.check_call(["systemctl",
                                   "enable",
                                   "dirsrv@%s" % slapd['instance_name']])

            # Setup tmpfiles_d
            tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd['instance_name'] + ".conf"
            with open(tmpfile_d, "w") as TMPFILE_D:
                TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['run_dir'], slapd['user'], slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'].replace("slapd-" + slapd['instance_name'], ""),
                                                           slapd['user'], slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'], slapd['user'], slapd['group']))

        # Else we need to detect other init scripts?
        # WB: No, we just install and assume that docker will start us ...

        # Bind sockets to our type?


        # Create certdb in sysconfidir
        self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir'])

        # BELOWE THIS LINE - all actions are now ONLINE changes to the directory server.
        # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS.

        # Should I move this import? I think this prevents some recursion
        from lib389 import DirSrv
        ds_instance = DirSrv(self.verbose)
        if self.containerised:
            ds_instance.systemd = general['systemd']
        args = {
            SER_PORT: slapd['port'],
            SER_SERVERID_PROP: slapd['instance_name'],
            SER_ROOT_DN: slapd['root_dn'],
            SER_ROOT_PW: self._raw_secure_password,
            SER_DEPLOYED_DIR: slapd['prefix']
        }

        ds_instance.allocate(args)
        # Does this work?
        assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected")

        # Create a certificate database.
        tlsdb = NssSsl(dbpath=slapd['cert_dir'])
        if not tlsdb._db_exists():
            tlsdb.reinit()

        if slapd['self_sign_cert']:
            etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/')
            ssca_path = os.path.join(etc_dirsrv_path, 'ssca/')
            ssca = NssSsl(dbpath=ssca_path)
            # If it doesn't exist, create a CA DB
            if not ssca._db_exists():
                ssca.reinit()
                ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months'])
            # If CA is expired or will expire soon,
            # Reissue it and resign the existing certs that were signed by the cert previously
            elif ssca.rsa_ca_needs_renew():
                ca = ssca.renew_rsa_ca(months=slapd['self_sign_cert_valid_months'])
                # Import CA to the existing instances except the one we install now (we import it later)
                for dir in os.listdir(etc_dirsrv_path):
                    if dir.startswith("slapd-") and dir != slapd['cert_dir']:
                        tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
                        tlsdb_inst.import_rsa_crt(ca)

            csr = tlsdb.create_rsa_key_and_csr()
            (ca, crt) = ssca.rsa_ca_sign_csr(csr)
            tlsdb.import_rsa_crt(ca, crt)
            if general['selinux']:
                # Set selinux port label
                selinux_label_port(slapd['secure_port'])

        # Do selinux fixups
        if general['selinux']:
            selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir',
                             'lock_dir', 'log_dir', 'run_dir', 'schema_dir', 'tmp_dir')
            for path in selinux_paths:
                selinux_restorecon(slapd[path])

            selinux_label_port(slapd['port'])

        # Start the server
        # Make changes using the temp root
        ds_instance.start(timeout=60)
        ds_instance.open()

        # In some cases we may want to change log settings
        # ds_instance.config.enable_log('audit')

        # Create the configs related to this version.
        base_config = get_config(general['defaults'])
        base_config_inst = base_config(ds_instance)
        base_config_inst.apply_config(install=True)

        # Setup TLS with the instance.

        # We *ALWAYS* set secure port, even if security is off, because it breaks
        # tests with standalone.enable_tls if we do not. It's only when security; on
        # that we actually start listening on it.
        if not slapd['secure_port']:
            slapd['secure_port'] = "636"
        ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port'])
        if slapd['self_sign_cert']:
            ds_instance.config.set('nsslapd-security', 'on')

        # Create the backends as listed
        # Load example data if needed.
        for backend in backends:
            is_sample_entries_in_props = "sample_entries" in backend
            create_suffix_entry_in_props = backend.pop('create_suffix_entry', False)
            ds_instance.backends.create(properties=backend)
            if not is_sample_entries_in_props and create_suffix_entry_in_props:
                domain = create_base_domain(ds_instance, backend['nsslapd-suffix'])
                # Set basic ACI
                domain.add('aci', [
                    # Allow reading the base domain object
                    '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)',
                    # Allow reading the ou
                    '(targetattr="ou || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                ])

        # Initialise ldapi socket information. IPA expects this ....
        ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name'])
        ds_instance.config.set('nsslapd-ldapifilepath', ldapi_path)
        ds_instance.config.set('nsslapd-ldapilisten', 'on')
        ds_instance.config.set('nsslapd-ldapiautobind', 'on')
        ds_instance.config.set('nsslapd-ldapimaprootdn', slapd['root_dn'])


        # Create all required sasl maps: if we have a single backend ...
        # our default maps are really really bad, and we should feel bad.
        # they basically only work with a single backend, and they'll break
        # GSSAPI in some cases too :(
        if len(backends) > 0:
            self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix'])
            backend = backends[0]
            saslmappings = SaslMappings(ds_instance)
            saslmappings.create(properties={
                'cn': 'rfc 2829 u syntax',
                'nsSaslMapRegexString': '^u:\\(.*\\)',
                'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                'nsSaslMapFilterTemplate': '(uid=\\1)'
            })
            # I think this is for LDAPI
            saslmappings.create(properties={
                'cn': 'uid mapping',
                'nsSaslMapRegexString': '^[^:@]+$',
                'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                'nsSaslMapFilterTemplate': '(uid=&)'
            })
        else:
            self.log.debug("Skipping default SASL maps - no backend found!")

        # Change the root password finally
        ds_instance.config.set('nsslapd-rootpw', slapd['root_password'])

        # We need to log the password when containerised
        if self.containerised:
            self.log.debug("Root DN password: {}".format(slapd['root_password']))

        # Complete.
        if general['start']:
            # Restart for changes to take effect - this could be removed later
            ds_instance.restart(post_open=False)
        else:
            # Just stop the instance now.
            ds_instance.stop()
예제 #6
0
def _remove_ssca_db(topology):
    ssca = NssSsl(dbpath=topology[0].get_ssca_dir())
    if ssca._db_exists():
        return ssca.remove_db()
    else:
        return True
예제 #7
0
파일: setup.py 프로젝트: nextoa/389-ds-base
    def _install_ds(self, general, slapd, backends):
        """
        Actually install the Ds from the dicts provided.

        You should never call this directly, as it bypasses assert_cions.
        """
        # register the instance to /etc/sysconfig
        # We do this first so that we can trick remove-ds.pl if needed.
        # There may be a way to create this from template like the dse.ldif ...
        initconfig = ""
        with open("%s/dirsrv/config/template-initconfig" % slapd['sysconf_dir']) as template_init:
            for line in template_init.readlines():
                initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_')
        try:
            os.makedirs("%s/sysconfig" % slapd['sysconf_dir'], mode=0o775)
        except FileExistsError:
            pass
        with open("%s/sysconfig/dirsrv-%s" % (slapd['sysconf_dir'], slapd['instance_name']), 'w') as f:
            f.write(initconfig.format(
                SERVER_DIR=slapd['lib_dir'],
                SERVERBIN_DIR=slapd['sbin_dir'],
                CONFIG_DIR=slapd['config_dir'],
                INST_DIR=slapd['inst_dir'],
                RUN_DIR=slapd['run_dir'],
                DS_ROOT='',
                PRODUCT_NAME='slapd',
            ))

        # Create all the needed paths
        # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir,
        for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
            if self.verbose:
                self.log.info("ACTION: creating %s" % slapd[path])
            try:
                os.makedirs(slapd[path], mode=0o775)
            except OSError:
                pass
            os.chown(slapd[path], slapd['user_uid'], slapd['group_gid'])
        ### Warning! We need to down the directory under db too for .restore to work.
        # See dblayer.c for more!
        db_parent = os.path.join(slapd['db_dir'], '..')
        os.chown(db_parent, slapd['user_uid'], slapd['group_gid'])

        # Copy correct data to the paths.
        # Copy in the schema
        #  This is a little fragile, make it better.
        # It won't matter when we move schema to usr anyway ...

        _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir'])
        os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid'])

        # Copy in the collation
        srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf')
        dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf')
        shutil.copy2(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])

        # Copy in the certmap configuration
        srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf')
        dstfile = os.path.join(slapd['config_dir'], 'certmap.conf')
        shutil.copy2(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])

        # If we are on the correct platform settings, systemd
        if general['systemd'] and not self.containerised:
            # Should create the symlink we need, but without starting it.
            subprocess.check_call(["/usr/bin/systemctl",
                                    "enable",
                                    "dirsrv@%s" % slapd['instance_name']])
        # Else we need to detect other init scripts?

        # Bind sockets to our type?

        # Create certdb in sysconfidir
        if self.verbose:
            self.log.info("ACTION: Creating certificate database is %s" % slapd['cert_dir'])

        # Create dse.ldif with a temporary root password.
        # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif
        # Variables are done with %KEY%.
        # You could cheat and read it in, do a replace of % to { and } then use format?
        if self.verbose:
            self.log.info("ACTION: Creating dse.ldif")
        dse = ""
        with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse:
            for line in template_dse.readlines():
                dse += line.replace('%', '{', 1).replace('%', '}', 1)

        with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
            file_dse.write(dse.format(
                schema_dir=slapd['schema_dir'],
                lock_dir=slapd['lock_dir'],
                tmp_dir=slapd['tmp_dir'],
                cert_dir=slapd['cert_dir'],
                ldif_dir=slapd['ldif_dir'],
                bak_dir=slapd['backup_dir'],
                run_dir=slapd['run_dir'],
                inst_dir="",
                log_dir=slapd['log_dir'],
                fqdn=general['full_machine_name'],
                ds_port=slapd['port'],
                ds_user=slapd['user'],
                rootdn=slapd['root_dn'],
                # ds_passwd=slapd['root_password'],
                ds_passwd=self._secure_password,  # We set our own password here, so we can connect and mod.
                ds_suffix='',
                config_dir=slapd['config_dir'],
                db_dir=slapd['db_dir'],
            ))

        # open the connection to the instance.

        # Should I move this import? I think this prevents some recursion
        from lib389 import DirSrv
        ds_instance = DirSrv(self.verbose)
        ds_instance.containerised = self.containerised
        args = {
            SER_PORT: slapd['port'],
            SER_SERVERID_PROP: slapd['instance_name'],
            SER_ROOT_DN: slapd['root_dn'],
            SER_ROOT_PW: self._raw_secure_password,
            SER_DEPLOYED_DIR: slapd['prefix']
        }

        ds_instance.allocate(args)
        # Does this work?
        assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected")


        # Create a certificate database.
        tlsdb = NssSsl(dbpath=slapd['cert_dir'])
        if not tlsdb._db_exists():
            tlsdb.reinit()

        if slapd['self_sign_cert']:
            # If it doesn't exist, create a cadb.
            ssca_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/ssca/')
            ssca = NssSsl(dbpath=ssca_path)
            if not ssca._db_exists():
                ssca.reinit()
                ssca.create_rsa_ca()

            csr = tlsdb.create_rsa_key_and_csr()
            (ca, crt) = ssca.rsa_ca_sign_csr(csr)
            tlsdb.import_rsa_crt(ca, crt)

        ## LAST CHANCE, FIX PERMISSIONS.
        # Selinux fixups?
        # Restorecon of paths?

        # Start the server
        ds_instance.start(timeout=60)
        ds_instance.open()

        # In some cases we may want to change log settings
        # ds_instance.config.enable_log('audit')

        # Create the configs related to this version.
        base_config = get_config(general['defaults'])
        base_config_inst = base_config(ds_instance)
        base_config_inst.apply_config(install=True)

        # Setup TLS with the instance.
        ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port'])
        if slapd['self_sign_cert']:
            ds_instance.config.set('nsslapd-security', 'on')

        # Create the backends as listed
        # Load example data if needed.
        for backend in backends:
            ds_instance.backends.create(properties=backend)

        # Make changes using the temp root
        # Change the root password finally

        # Initialise ldapi socket information. IPA expects this ....
        ds_instance.config.set('nsslapd-ldapifilepath', ds_instance.get_ldapi_path())
        ds_instance.config.set('nsslapd-ldapilisten', 'on')

        # Complete.
        ds_instance.config.set('nsslapd-rootpw',
                               ensure_str(slapd['root_password']))

        if self.containerised:
            # In a container build we need to stop DirSrv at the end
            ds_instance.stop()
        else:
            # Restart for changes to take effect - this could be removed later
            ds_instance.restart(post_open=False)