예제 #1
0
def instance_remove_all(log, args):
    """Remove all instances - clean sweep!
    """

    inst_names = get_instance_list(args.remove_all)
    if len(inst_names) > 0:
        answer = input(
            "Are you sure you want to remove all the Directory Server instances?  Enter \"Yes\" to continue: "
        )
        if answer != 'Yes':
            print("Aborted removal of all instances")
            return

        # Do it!
        list_inst = DirSrv(verbose=args.verbose)
        insts = list_inst.list(all=True, serverid=inst_names[0])
        for inst in insts:
            remove_inst = DirSrv(verbose=args.verbose)
            remove_inst.allocate(inst)
            try:
                log.info("Removing instance: slapd-" +
                         str(remove_inst.serverid))
                remove_ds_instance(remove_inst)
            except Exception as e:
                log.fatal('Failed to remove all instances: ' + str(e))
                sys.exit(1)
        log.info('All instances have been successfully removed')
    else:
        print("No instances to remove")
예제 #2
0
def topology(request):
    """Create DS Deployment"""

    # Creating standalone instance ...
    if DEBUGGING:
        standalone = DirSrv(verbose=True)
    else:
        standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        """If we are debugging just stop the instances, otherwise remove
        them
        """
        if DEBUGGING:
            standalone.stop(60)
        else:
            standalone.delete()

    request.addfinalizer(fin)

    # Clear out the tmp dir
    standalone.clearTmpDir(__file__)

    return TopologyStandalone(standalone)
예제 #3
0
def topology(request):
    # Master
    #
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {
        SER_HOST: HOST_MASTER,
        SER_PORT: PORT_MASTER,
        SER_SERVERID_PROP: SERVERID_MASTER
    }
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Enable replication
    master.replica.enableReplication(suffix=SUFFIX,
                                     role=ReplicaRole.MASTER,
                                     replicaId=REPLICAID_MASTER)

    # Consumer
    #
    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {
        SER_HOST: HOST_CONSUMER,
        SER_PORT: PORT_CONSUMER,
        SER_SERVERID_PROP: SERVERID_CONSUMER
    }
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Enable replication
    consumer.replica.enableReplication(suffix=SUFFIX,
                                       role=ReplicaRole.CONSUMER)

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()

    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
예제 #4
0
def rootdn_bind(inst, uri=None, fail=False):
    """Helper function to test root DN bind
    """
    newinst = DirSrv(verbose=False)
    args = {SER_PORT: inst.port, SER_SERVERID_PROP: inst.serverid}
    newinst.allocate(args)
    newinst.open(uri=uri, connOnly=True)  # This binds as root dn
예제 #5
0
def test_basic(topology_st, simple_allocate):
    """Check that all DS directories and systemd items were removed

    :id: 9e8bbcda-358d-4e9c-a38c-9b4c3b63308e
    :parametrized: yes
    """

    inst = topology_st.standalone

    # FreeIPA uses local_simple_allocate for the removal process
    if simple_allocate:
        inst = DirSrv(verbose=inst.verbose)
        inst.local_simple_allocate(topology_st.standalone.serverid)

    remove_ds_instance(inst)

    paths = [
        inst.ds_paths.backup_dir, inst.ds_paths.cert_dir,
        inst.ds_paths.config_dir, inst.ds_paths.db_dir,
        inst.get_changelog_dir(), inst.ds_paths.ldif_dir,
        inst.ds_paths.lock_dir, inst.ds_paths.log_dir
    ]
    for path in paths:
        assert not os.path.exists(path)

    try:
        subprocess.check_output(
            ['systemctl', 'is-enabled', 'dirsrv@{}'.format(inst.serverid)],
            encoding='utf-8')
    except subprocess.CalledProcessError as ex:
        assert "disabled" in ex.output
예제 #6
0
    def setUp(self):
        #
        # Master
        #
        # Create the master instance
        master = DirSrv(verbose=False)
        master.log.debug("Master allocated")
        args = {
            SER_HOST: HOST_MASTER,
            SER_PORT: PORT_MASTER,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: SERVERID_MASTER
        }
        master.allocate(args)
        if master.exists():
            master.delete()
        master.create()
        master.open()

        # enable replication
        master.replica.enableReplication(suffix=SUFFIX,
                                         role=REPLICAROLE_MASTER,
                                         replicaId=REPLICAID_MASTER)
        self.master = master

        #
        # Consumer
        #
        # Create the consumer instance
        consumer = DirSrv(verbose=False)
        consumer.log.debug("Consumer allocated")
        args = {
            SER_HOST: HOST_CONSUMER,
            SER_PORT: PORT_CONSUMER,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: SERVERID_CONSUMER
        }
        consumer.allocate(args)
        if consumer.exists():
            consumer.delete()
        consumer.create()
        consumer.open()

        # enable replication
        consumer.replica.enableReplication(suffix=SUFFIX,
                                           role=REPLICAROLE_CONSUMER)
        self.consumer = consumer
예제 #7
0
def setup():
    global conn
    conn = DirSrv(**config.auth)
    conn.verbose = True
    conn.added_entries = []
    conn.added_backends = set(['o=mockbe2'])
    conn.added_replicas = []
    harn_nolog()
예제 #8
0
def getReport():
    # Capture data about stoped instances
    # Return a Report (i.e: list of ( sectionName,  text ) tuple )
    # Lets determine the list of instances
    report = []

    def addSection(name, text):
        report.append((name, text))

    instancesOK = []
    instancesKO = []
    for instdir in DirSrv().list(all=True):
        inst = DirSrv()
        inst.allocate(instdir)
        if inst.status():
            instancesOK.append(inst)
        else:
            instancesKO.append(inst)
    text = ""
    # Lets generate the report
    addSection("Running instances",
               loglist([i.getServerId() for i in instancesOK]))
    addSection("Stopped instances",
               loglist([i.getServerId() for i in instancesKO]))

    # Get core file informations
    addSection("Core files", logcorefiles())

    # Get asan file informations
    report.extend(logasanfiles())

    # Get error log informations on stopped servers
    # By default we only log an extract of error log:
    #   Critical, Emergency and standard errors
    #   and the final "server stopped" info line (that denotes a clean stop)
    for inst in instancesKO:
        # Log extract of error log
        path = inst.ds_paths.error_log.format(instance_name=inst.getServerId())
        addSection(f"Extract of instance {inst.getServerId()} error log",
                   logErrors(path))
        # And dbscan -L output
        addSection(f"Database info for instance {inst.getServerId()}",
                   logDbscan(inst))

    return report
예제 #9
0
def setup():
    global conn
    try:
        conn = DirSrv(**config.auth)
        conn.verbose = True
        conn.added_entries = []
    except SERVER_DOWN as e:
        log.error("To run tests you need a working 389 instance %s" %
                  config.auth)
        raise e
def connect_instance(ldapurl, binddn, verbose, starttls):
    dsargs = {SER_LDAP_URL: ldapurl, SER_ROOT_DN: binddn}
    ds = DirSrv(verbose=verbose)
    ds.allocate(dsargs)
    if not ds.can_autobind() and binddn is not None:
        dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " %
                                      (binddn, ldapurl))
    elif binddn is None:
        raise Exception("Must provide a binddn to connect with")
    ds.allocate(dsargs)
    ds.open(starttls=starttls)
    print("")
    return ds
예제 #11
0
    def test_allocated_to_online(self, verbose):
        # Here the instance was already create, check we can connect to it
        # without creating it (especially without serverid value)
        # Allocate the instance
        args = {
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: INSTANCE_SERVERID
        }
        self.instance.log.info("test_allocated_to_online: Create an instance")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        self.instance.allocate(args)
        self.instance.create()
        self.instance.open()
        assert self.instance.serverid != None

        # The instance is create, allocate a new DirSrv
        self.instance.log.info("test_allocated_to_online: instance New")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        assert self.instance.state == DIRSRV_STATE_INIT

        args = {
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
        }
        self.instance.allocate(args)
        self.instance.log.info("test_allocated_to_online: instance Allocated")
        assert self.instance.serverid == None
        assert self.instance.state == DIRSRV_STATE_ALLOCATED

        self.instance.open()
        self.instance.log.info("test_allocated_to_online: instance online")
        assert self.instance.serverid != None
        assert self.instance.serverid == self.instance.inst
        assert self.instance.state == DIRSRV_STATE_ONLINE
예제 #12
0
def setup():
    # uses an existing 389 instance
    # add a suffix
    # add an agreement
    # This setup is quite verbose but to test DirSrv method we should
    # do things manually. A better solution would be to use an LDIF.
    global conn
    conn = DirSrv(**config.auth)
    conn.verbose = True
    conn.added_entries = []
    conn.added_backends = set(['o=mockbe1'])
    conn.added_replicas = []
    """
예제 #13
0
 def setUp(self):
     instance = DirSrv(verbose=False)
     instance.log.debug("Instance allocated")
     args = {SER_HOST:          LOCALHOST,
             SER_PORT:          INSTANCE_PORT,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: INSTANCE_SERVERID
             }
     instance.allocate(args)
     if instance.exists():
         instance.delete()
     instance.create()
     instance.open()
     self.instance = instance
예제 #14
0
def test_user_search_performance():
    inst = DirSrv(verbose=True)
    inst.remote_simple_allocate(f"ldaps://{TARGET_HOST}", password="******")
    # Need a better way to set this.
    inst.host = TARGET_HOST
    inst.port = TARGET_PORT
    inst.open(reqcert=ldap.OPT_X_TLS_NEVER)
    assert_data_present(inst)
    r1 = _do_search_performance(inst, 1)
    # r2 = _do_search_performance(inst, 4)
    # r3 = _do_search_performance(inst, 6)
    # r4 = _do_search_performance(inst, 8)
    # r5 = _do_search_performance(inst, 12)
    r6 = _do_search_performance(inst, 16)
예제 #15
0
def topology(request):
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {
        SER_HOST: HOST_MASTER,
        SER_PORT: PORT_MASTER,
        SER_SERVERID_PROP: SERVERID_MASTER
    }
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {
        SER_HOST: HOST_CONSUMER,
        SER_PORT: PORT_CONSUMER,
        SER_SERVERID_PROP: SERVERID_CONSUMER
    }
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()

    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
예제 #16
0
def topology(request):
    instance = DirSrv(verbose=DEBUGGING)
    instance.log.debug("Instance allocated")
    args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()

    def fin():
        if instance.exists() and not DEBUGGING:
            instance.delete()

    request.addfinalizer(fin)

    return TopologyInstance(instance)
예제 #17
0
def instance_remove_all(log, args):
    """Remove all instances - clean sweep!
    """

    inst_names = get_instance_list()
    if len(inst_names) > 0:
        log.info("""
About to remove all Directory Server instances!
If this is not what you want, press ctrl-c now ...
        """)
        for i in range(1, 6):
            log.info('%s ...' % (6 - int(i)))
            time.sleep(1)

        # Do it!
        list_inst = DirSrv(verbose=args.verbose)
        insts = list_inst.list(all=True, serverid=inst_names[0])
        no_problems = True
        for inst in insts:
            remove_inst = DirSrv(verbose=args.verbose)
            remove_inst.allocate(inst)
            try:
                log.info("Removing instance: slapd-" +
                         str(remove_inst.serverid))
                remove_ds_instance(remove_inst)
            except Exception as e:
                log.error(
                    f'Failed to remove slapd-{remove_inst.serverid} - Error: {str(e)}'
                )
                no_problems = False
        if no_problems:
            log.info('All instances have been successfully removed')
        else:
            log.info('Some instances were not removed')
    else:
        print("No instances to remove")
예제 #18
0
    def _remove_failed_install(self, serverid):
        """The install failed, remove the scraps
        :param serverid - The server ID of the instance
        """
        inst = DirSrv()

        # Allocate the instance based on name
        insts = []
        insts = inst.list(serverid=serverid)

        if len(insts) != 1:
            self.log.error("No such instance to remove {}".format(serverid))
            return
        inst.allocate(insts[0])
        remove_ds_instance(inst, force=True)
예제 #19
0
def topology(request):

    instance = DirSrv(verbose=DEBUGGING)
    instance.log.debug("Instance allocated")
    args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()

    # Create the setupDs
    lc = LogCapture()
    # Give it the right types.
    sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log)

    # Get the dicts from Type2Base, as though they were from _validate_ds_2_config
    # IE get the defaults back just from Slapd2Base.collect
    # Override instance name, root password, port and secure port.

    general_options = General2Base(lc.log)
    general_options.verify()
    general = general_options.collect()

    slapd_options = Slapd2Base(lc.log)
    slapd_options.set('instance_name', INSTANCE_SERVERID)
    slapd_options.set('port', INSTANCE_PORT)
    slapd_options.set('root_password', PW_DM)
    slapd_options.verify()
    slapd = slapd_options.collect()

    sds.create_from_args(general, slapd, {}, None)
    # Make sure we can connect
    instance.open()

    # Create the example backend with sample entries.
    instance.backends.create(properties={
        'cn': ['userRoot'],
        'nsslapd-suffix': ['dc=example,dc=com'],
    })

    def fin():
        if instance.exists() and not DEBUGGING:
            instance.delete()

    request.addfinalizer(fin)

    return TopologyInstance(instance)
예제 #20
0
def topology(request):
    standalone = DirSrv(verbose=False)
    standalone.log.debug("Instance allocated")
    args = {SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            # SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    standalone.allocate(args)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        standalone.delete()
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
예제 #21
0
    def _offlineDirsrv(args):
        '''
            Function to allocate an offline DirSrv instance.
            This instance is not initialized with the Directory instance
            (__localinit__() and __add_brookers__() are not called)
            The properties set are:
                instance.host
                instance.port
                instance.serverid
                instance.inst
                instance.prefix
                instance.backup
        '''
        from lib389 import DirSrv
        instance = DirSrv(verbose=True)
        instance.allocate(args)

        return instance
예제 #22
0
def connect_instance(dsrc_inst, verbose):
    dsargs = dsrc_inst['args']
    ds = DirSrv(verbose=verbose)
    ds.allocate(dsargs)
    if not ds.can_autobind() and dsrc_inst['binddn'] is not None:
        dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " %
                                      (dsrc_inst['binddn'], dsrc_inst['uri']))
    elif not ds.can_autobind() and dsrc_inst['binddn'] is None:
        raise Exception("Must provide a binddn to connect with")
    ds.allocate(dsargs)
    ds.open(saslmethod=dsrc_inst['saslmech'],
            certdir=dsrc_inst['tls_cacertdir'],
            reqcert=dsrc_inst['tls_reqcert'],
            usercert=dsrc_inst['tls_cert'],
            userkey=dsrc_inst['tls_key'],
            starttls=dsrc_inst['starttls'],
            connOnly=True)
    return ds
예제 #23
0
def topology(request):
    # Create the realm
    instance = DirSrv(verbose=False)
    instance.log.debug("Instance allocated")
    args = {SER_PORT: INSTANCE_PORT,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()
    instance.create()
    instance.open()

    def fin():
        if instance.exists():
            instance.delete()
    request.addfinalizer(fin)

    return TopologyInstance(instance)
예제 #24
0
def topology(request):
    instance = DirSrv(verbose=False)

    args = {
        SER_HOST: LOCALHOST,
        SER_PORT: INSTANCE_PORT,
        SER_SERVERID_PROP: INSTANCE_SERVERID
    }
    instance.allocate(args)

    if instance.exists():
        instance.delete()

    def fin():
        if instance.exists():
            instance.delete()

    request.addfinalizer(fin)

    return TopologyInstance(instance)
예제 #25
0
def create_instance(config_attr):
    log.info('create_instance - Installs the instance and Sets the value of InstScriptsEnabled to true OR false.')

    log.info("Set up the instance and set the config_attr")
    instance_data = generate_ds_params(1, ReplicaRole.STANDALONE)
    # Create instance
    standalone = DirSrv(verbose=False)

    # Args for the instance
    args_instance[SER_HOST] = instance_data[SER_HOST]
    args_instance[SER_PORT] = instance_data[SER_PORT]
    args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP]
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_instance[SER_INST_SCRIPTS_ENABLED] = config_attr
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()
    return standalone
예제 #26
0
def topology(request):
    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    # Delete each instance in the end
    def fin():
        #standalone.delete()
        pass
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
예제 #27
0
def topology(request):
    standalone = DirSrv(verbose=False)
    standalone.log.debug("Instance allocated")
    args = {
        SER_PORT: INSTANCE_PORT,
        # SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
        SER_SERVERID_PROP: INSTANCE_SERVERID
    }
    standalone.allocate(args)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()
    standalone.config.set('nsslapd-accesslog-logbuffering', 'off')

    def fin():
        standalone.delete()

    request.addfinalizer(fin)
    # We have to wait for time to elapse for the access log to be flushed.

    return TopologyStandalone(standalone)
예제 #28
0
def connect_instance(dsrc_inst, verbose, args):
    dsargs = dsrc_inst['args']
    if '//' not in dsargs['ldapurl']:
        # Connecting to the local instance
        dsargs['server-id'] = dsargs['ldapurl']
        # We have an instance name - generate url from dse.ldif
        ldapurl, certdir = get_ldapurl_from_serverid(dsargs['ldapurl'])
        if ldapurl is not None:
            dsargs['ldapurl'] = ldapurl
            if 'ldapi://' in ldapurl:
                dsargs['ldapi_enabled'] = 'on'
                dsargs['ldapi_socket'] = ldapurl.replace('ldapi://', '')
                dsargs['ldapi_autobind'] = 'on'
            elif 'ldaps://' in ldapurl:
                dsrc_inst['tls_cert'] = certdir
        else:
            # The instance name does not match any instances
            raise ValueError("Could not find configuration for instance: " + dsargs['ldapurl'])

    ds = DirSrv(verbose=verbose)
    # We do an empty allocate here to determine if we can autobind ... (really
    # we should actually be inspect the URL ...)
    ds.allocate(dsargs)

    if args.pwdfile is not None or args.bindpw is not None or args.prompt is True:
        if args.pwdfile is not None:
            # Read password from file
            try:
                with open(args.pwdfile, "r") as f:
                    dsargs[SER_ROOT_PW] = f.readline().rstrip()
            except EnvironmentError as e:
                raise ValueError("Failed to open password file: " + str(e))
        elif args.bindpw is not None:
            # Password provided
            # This shouldn't be needed? dsrc already inherits the args ...
            dsargs[SER_ROOT_PW] = args.bindpw
        else:
            # No password or we chose to prompt
            dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri']))
    elif not ds.can_autobind():
        # No LDAPI, prompt for password, and bind DN if necessary
        if dsrc_inst['binddn'] is None:
            dn = ""
            while dn == "":
                dn = input("Enter Bind DN: ")
            dsrc_inst['binddn'] = dn
        dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri']))

    # Allocate is an awful interface that we should stop using, but for now
    # just directly map the dsrc_inst args in (remember, dsrc_inst DOES
    # overlay cli args into the map ...)
    dsargs[SER_ROOT_DN] = dsrc_inst['binddn']

    ds = DirSrv(verbose=verbose)
    ds.allocate(dsargs)
    ds.open(saslmethod=dsrc_inst['saslmech'],
            certdir=dsrc_inst['tls_cacertdir'],
            reqcert=dsrc_inst['tls_reqcert'],
            usercert=dsrc_inst['tls_cert'],
            userkey=dsrc_inst['tls_key'],
            starttls=dsrc_inst['starttls'], connOnly=True)
    if ds.serverid is not None and ds.serverid.startswith("slapd-"):
        ds.serverid = ds.serverid.replace("slapd-", "", 1)
    return ds
예제 #29
0
    def _install_ds(self, general, slapd, backends):
        """
        Actually install the Ds from the dicts provided.

        You should never call this directly, as it bypasses assertions.
        """
        ######################## WARNING #############################
        # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING
        # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE
        # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION.
        #
        # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**>

        ### This first section is about creating the *minimal* required paths and config to get
        # directory server to start: After this, we then perform all configuration as online
        # changes from after this point.

        # Create dse.ldif with a temporary root password.
        # This is done first, because instances are found for removal and listing by detecting
        # the present of their dse.ldif!!!!
        # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif
        # Variables are done with %KEY%.
        self.log.debug("ACTION: Creating dse.ldif")
        try:
            os.umask(
                0o007
            )  # For parent dirs that get created -> sets 770 for perms
            os.makedirs(slapd['config_dir'], mode=0o770)
        except OSError:
            pass

        # Get suffix for some plugin defaults (if possible)
        # annoyingly for legacy compat backend takes TWO key types
        # and we have to now deal with that ....
        #
        # Create ds_suffix here else it won't be in scope ....
        ds_suffix = ''
        if len(backends) > 0:
            ds_suffix = normalizeDN(backends[0]['nsslapd-suffix'])

        dse = ""
        with open(
                os.path.join(slapd['data_dir'], 'dirsrv', 'data',
                             'template-dse.ldif')) as template_dse:
            for line in template_dse.readlines():
                dse += line.replace('%', '{', 1).replace('%', '}', 1)

        # Check if we are in a container, if so don't use /dev/shm for the db home dir
        # as containers typically don't allocate enough space for dev/shm and we don't
        # want to unexpectedly break the server after an upgrade
        #
        # If we know we are are in a container, we don't need to re-detect on systemd.
        # It actually turns out if you add systemd-detect-virt, that pulls in system
        # which subsequently breaks containers starting as instance.start then believes
        # it COULD check the ds status. The times we need to check for systemd are mainly
        # in other environments that use systemd natively in their containers.
        container_result = 1
        if not self.containerised:
            container_result = subprocess.run(["systemd-detect-virt", "-c"],
                                              stdout=subprocess.PIPE)
        if self.containerised or container_result.returncode == 0:
            # In a container, set the db_home_dir to the db path
            self.log.debug(
                "Container detected setting db home directory to db directory."
            )
            slapd['db_home_dir'] = slapd['db_dir']

        with open(os.path.join(slapd['config_dir'], 'dse.ldif'),
                  'w') as file_dse:
            dse_fmt = dse.format(
                schema_dir=slapd['schema_dir'],
                lock_dir=slapd['lock_dir'],
                tmp_dir=slapd['tmp_dir'],
                cert_dir=slapd['cert_dir'],
                ldif_dir=slapd['ldif_dir'],
                bak_dir=slapd['backup_dir'],
                run_dir=slapd['run_dir'],
                inst_dir=slapd['inst_dir'],
                log_dir=slapd['log_dir'],
                fqdn=general['full_machine_name'],
                ds_port=slapd['port'],
                ds_user=slapd['user'],
                rootdn=slapd['root_dn'],
                instance_name=slapd['instance_name'],
                ds_passwd=self.
                _secure_password,  # We set our own password here, so we can connect and mod.
                # This is because we never know the users input root password as they can validly give
                # us a *hashed* input.
                ds_suffix=ds_suffix,
                config_dir=slapd['config_dir'],
                db_dir=slapd['db_dir'],
                db_home_dir=slapd['db_home_dir'],
                db_lib=slapd['db_lib'],
                ldapi_enabled="on",
                ldapi=slapd['ldapi'],
                ldapi_autobind="on",
            )
            file_dse.write(dse_fmt)

        self.log.info("Create file system structures ...")
        # Create all the needed paths
        # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir?
        for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir',
                     'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
            self.log.debug("ACTION: creating %s", slapd[path])
            try:
                os.umask(
                    0o007
                )  # For parent dirs that get created -> sets 770 for perms
                os.makedirs(slapd[path], mode=0o770)
            except OSError:
                pass
            os.chown(slapd[path], slapd['user_uid'], slapd['group_gid'])

        # /var/lock/dirsrv needs special attention...
        parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir))
        os.chown(parentdir, slapd['user_uid'], slapd['group_gid'])

        ### Warning! We need to down the directory under db too for .restore to work.
        # During a restore, the db dir is deleted and recreated, which is why we need
        # to own it for a restore.
        #
        # However, in a container, we can't always guarantee this due to how the volumes
        # work and are mounted. Specifically, if we have an anonymous volume we will
        # NEVER be able to own it, but in a true deployment it is reasonable to expect
        # we DO own it. Thus why we skip it in this specific context
        if not self.containerised:
            db_parent = os.path.join(slapd['db_dir'], '..')
            os.chown(db_parent, slapd['user_uid'], slapd['group_gid'])

        # Copy correct data to the paths.
        # Copy in the schema
        #  This is a little fragile, make it better.
        # It won't matter when we move schema to usr anyway ...

        _ds_shutil_copytree(
            os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'),
            slapd['schema_dir'])
        os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid'])
        os.chmod(slapd['schema_dir'], 0o770)

        # Copy in the collation
        srcfile = os.path.join(slapd['sysconf_dir'],
                               'dirsrv/config/slapd-collations.conf')
        dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf')
        shutil.copy(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # Copy in the certmap configuration
        srcfile = os.path.join(slapd['sysconf_dir'],
                               'dirsrv/config/certmap.conf')
        dstfile = os.path.join(slapd['config_dir'], 'certmap.conf')
        shutil.copy(srcfile, dstfile)
        os.chown(dstfile, slapd['user_uid'], slapd['group_gid'])
        os.chmod(dstfile, 0o440)

        # If we are on the correct platform settings, systemd
        if general['systemd']:
            # Should create the symlink we need, but without starting it.
            result = subprocess.run(
                ["systemctl", "enable",
                 "dirsrv@%s" % slapd['instance_name']],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            args = ' '.join(ensure_list_str(result.args))
            stdout = ensure_str(result.stdout)
            stderr = ensure_str(result.stderr)
            # Systemd encodes some odd charecters into it's symlink output on newer versions which
            # can trip up the logger.
            self.log.debug(
                f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode(
                    "utf-8"))

            # Setup tmpfiles_d
            tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[
                'instance_name'] + ".conf"
            with open(tmpfile_d, "w") as TMPFILE_D:
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['run_dir'], slapd['user'], slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['lock_dir'].replace(
                        "slapd-" + slapd['instance_name'], ""), slapd['user'],
                    slapd['group']))
                TMPFILE_D.write("d {} 0770 {} {}\n".format(
                    slapd['lock_dir'], slapd['user'], slapd['group']))

        # Else we need to detect other init scripts?
        # WB: No, we just install and assume that docker will start us ...

        # Bind sockets to our type?

        # Create certdb in sysconfidir
        self.log.debug("ACTION: Creating certificate database is %s",
                       slapd['cert_dir'])

        # BELOW THIS LINE - all actions are now ONLINE changes to the directory server.
        # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS.

        # Should I move this import? I think this prevents some recursion
        from lib389 import DirSrv
        ds_instance = DirSrv(self.verbose, containerised=self.containerised)
        if self.containerised:
            ds_instance.systemd_override = general['systemd']

        # By default SUSE does something extremely silly - it creates a hostname
        # that CANT be resolved by DNS. As a result this causes all installs to
        # fail. We need to guarantee that we only connect to localhost here, as
        # it's the only stable and guaranteed way to connect to the instance
        # at this point.
        #
        # Use ldapi which would prevent the need
        # to configure a temp root pw in the setup phase.
        args = {
            SER_HOST: "localhost",
            SER_PORT: slapd['port'],
            SER_SERVERID_PROP: slapd['instance_name'],
            SER_ROOT_DN: slapd['root_dn'],
            SER_ROOT_PW: self._raw_secure_password,
            SER_DEPLOYED_DIR: slapd['prefix'],
            SER_LDAPI_ENABLED: 'on',
            SER_LDAPI_SOCKET: slapd['ldapi'],
            SER_LDAPI_AUTOBIND: 'on'
        }

        ds_instance.allocate(args)
        # Does this work?
        assert_c(ds_instance.exists(),
                 "Instance failed to install, does not exist when expected")

        # Create a certificate database.
        tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir'])
        if not tlsdb._db_exists():
            tlsdb.reinit()

        if slapd['self_sign_cert']:
            self.log.info("Create self-signed certificate database ...")
            etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/')
            ssca_path = os.path.join(etc_dirsrv_path, 'ssca/')
            ssca = NssSsl(dbpath=ssca_path)
            # If it doesn't exist, create a CA DB
            if not ssca._db_exists():
                ssca.reinit()
                ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months'])
            # If CA is expired or will expire soon,
            # Reissue it and resign the existing certs that were signed by the cert previously
            elif ssca.rsa_ca_needs_renew():
                ca = ssca.renew_rsa_ca(
                    months=slapd['self_sign_cert_valid_months'])
                # Import CA to the existing instances except the one we install now (we import it later)
                for dir in os.listdir(etc_dirsrv_path):
                    if dir.startswith("slapd-") and dir != slapd['cert_dir']:
                        tlsdb_inst = NssSsl(
                            dbpath=os.path.join(etc_dirsrv_path, dir))
                        tlsdb_inst.import_rsa_crt(ca)

            csr = tlsdb.create_rsa_key_and_csr(
                alt_names=[general['full_machine_name']])
            (ca, crt) = ssca.rsa_ca_sign_csr(csr)
            tlsdb.import_rsa_crt(ca, crt)
            if general['selinux']:
                # Set selinux port label
                selinux_label_port(slapd['secure_port'])

        # Do selinux fixups
        if general['selinux']:
            self.log.info("Perform SELinux labeling ...")
            selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
                             'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
                             'run_dir', 'schema_dir', 'tmp_dir')
            for path in selinux_paths:
                selinux_restorecon(slapd[path])

            selinux_label_port(slapd['port'])

        # Start the server
        # Make changes using the temp root
        self.log.debug(f"asan_enabled={ds_instance.has_asan()}")
        self.log.debug(
            f"libfaketime installed ={'libfaketime' in sys.modules}")
        assert_c(
            not ds_instance.has_asan() or 'libfaketime' not in sys.modules,
            "libfaketime python module is incompatible with ASAN build.")
        ds_instance.start(timeout=60)
        ds_instance.open()

        # In some cases we may want to change log settings
        # ds_instance.config.enable_log('audit')

        # Create the configs related to this version.
        base_config = get_config(general['defaults'])
        base_config_inst = base_config(ds_instance)
        base_config_inst.apply_config(install=True)

        # Setup TLS with the instance.

        # We *ALWAYS* set secure port, even if security is off, because it breaks
        # tests with standalone.enable_tls if we do not. It's only when security; on
        # that we actually start listening on it.
        if not slapd['secure_port']:
            slapd['secure_port'] = "636"
        ds_instance.config.set('nsslapd-secureport',
                               '%s' % slapd['secure_port'])
        if slapd['self_sign_cert']:
            ds_instance.config.set('nsslapd-security', 'on')

        # Before we create any backends, create any extra default indexes that may be
        # dynamically provisioned, rather than from template-dse.ldif. Looking at you
        # entryUUID (requires rust enabled).
        #
        # Indexes defaults to default_index_dn
        indexes = Indexes(ds_instance)
        if ds_instance.ds_paths.rust_enabled:
            indexes.create(
                properties={
                    'cn': 'entryUUID',
                    'nsSystemIndex': 'false',
                    'nsIndexType': ['eq', 'pres'],
                })

        # Create the backends as listed
        # Load example data if needed.
        for backend in backends:
            self.log.info(
                f"Create database backend: {backend['nsslapd-suffix']} ...")
            is_sample_entries_in_props = "sample_entries" in backend
            create_suffix_entry_in_props = backend.pop('create_suffix_entry',
                                                       False)
            ds_instance.backends.create(properties=backend)
            if not is_sample_entries_in_props and create_suffix_entry_in_props:
                # Set basic ACIs
                c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)'
                suffix_rdn_attr = backend['nsslapd-suffix'].split(
                    '=')[0].lower()
                if suffix_rdn_attr == 'dc':
                    domain = create_base_domain(ds_instance,
                                                backend['nsslapd-suffix'])
                    domain.add('aci', dc_aci)
                elif suffix_rdn_attr == 'o':
                    org = create_base_org(ds_instance,
                                          backend['nsslapd-suffix'])
                    org.add('aci', o_aci)
                elif suffix_rdn_attr == 'ou':
                    orgunit = create_base_orgunit(ds_instance,
                                                  backend['nsslapd-suffix'])
                    orgunit.add('aci', ou_aci)
                elif suffix_rdn_attr == 'cn':
                    cn = create_base_cn(ds_instance, backend['nsslapd-suffix'])
                    cn.add('aci', cn_aci)
                elif suffix_rdn_attr == 'c':
                    c = create_base_c(ds_instance, backend['nsslapd-suffix'])
                    c.add('aci', c_aci)
                else:
                    # Unsupported rdn
                    raise ValueError(
                        "Suffix RDN '{}' in '{}' is not supported.  Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'"
                        .format(suffix_rdn_attr, backend['nsslapd-suffix']))

        # Create all required sasl maps: if we have a single backend ...
        # our default maps are really really bad, and we should feel bad.
        # they basically only work with a single backend, and they'll break
        # GSSAPI in some cases too :(
        if len(backends) > 0:
            self.log.debug("Adding sasl maps for suffix %s" %
                           backend['nsslapd-suffix'])
            backend = backends[0]
            saslmappings = SaslMappings(ds_instance)
            saslmappings.create(
                properties={
                    'cn': 'rfc 2829 u syntax',
                    'nsSaslMapRegexString': '^u:\\(.*\\)',
                    'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                    'nsSaslMapFilterTemplate': '(uid=\\1)'
                })
            # I think this is for LDAPI
            saslmappings.create(
                properties={
                    'cn': 'uid mapping',
                    'nsSaslMapRegexString': '^[^:@]+$',
                    'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'],
                    'nsSaslMapFilterTemplate': '(uid=&)'
                })
        else:
            self.log.debug("Skipping default SASL maps - no backend found!")

        self.log.info("Perform post-installation tasks ...")
        # Change the root password finally
        ds_instance.config.set('nsslapd-rootpw', slapd['root_password'])

        # We need to log the password when containerised
        if self.containerised:
            self.log.debug("Root DN password: {}".format(
                slapd['root_password']))

        # Complete.
        if general['start']:
            # Restart for changes to take effect - this could be removed later
            ds_instance.restart(post_open=False)
        else:
            # Just stop the instance now.
            ds_instance.stop()

        self.log.debug(" 🎉 Instance setup complete")
예제 #30
0
    def _prepare_ds(self, general, slapd, backends):
        self.log.info("Validate installation settings ...")
        assert_c(general['defaults'] is not None,
                 "Configuration defaults in section [general] not found")
        self.log.debug("PASSED: using config settings %s" %
                       general['defaults'])
        # Validate our arguments.
        assert_c(slapd['user'] is not None,
                 "Configuration user in section [slapd] not found")
        # check the user exists
        assert_c(pwd.getpwnam(slapd['user']),
                 "user %s not found on system" % slapd['user'])
        slapd['user_uid'] = pwd.getpwnam(slapd['user']).pw_uid
        assert_c(slapd['group'] is not None,
                 "Configuration group in section [slapd] not found")
        assert_c(grp.getgrnam(slapd['group']),
                 "group %s not found on system" % slapd['group'])
        slapd['group_gid'] = grp.getgrnam(slapd['group']).gr_gid
        # check this group exists
        # Check that we are running as this user / group, or that we are root.
        assert_c(
            os.geteuid() == 0 or getpass.getuser() == slapd['user'],
            "Not running as user root or %s, may not have permission to continue"
            % slapd['user'])

        self.log.debug("PASSED: user / group checking")

        assert_c(
            general['full_machine_name'] is not None,
            "Configuration full_machine_name in section [general] not found")
        assert_c(
            general['strict_host_checking'] is not None,
            "Configuration strict_host_checking in section [general] not found"
        )
        if general['strict_host_checking'] is True:
            # Check it resolves with dns
            assert_c(
                socket.gethostbyname(general['full_machine_name']),
                "Strict hostname check failed. Check your DNS records for %s" %
                general['full_machine_name'])
            self.log.debug("PASSED: Hostname strict checking")

        assert_c(slapd['prefix'] is not None,
                 "Configuration prefix in section [slapd] not found")
        if (slapd['prefix'] != ""):
            assert_c(os.path.exists(slapd['prefix']),
                     "Prefix location '%s' not found" % slapd['prefix'])
        self.log.debug("PASSED: prefix checking")

        # We need to know the prefix before we can do the instance checks
        assert_c(slapd['instance_name'] is not None,
                 "Configuration instance_name in section [slapd] not found")
        assert_c(
            len(slapd['instance_name']) <= 80,
            "Server identifier should not be longer than 80 symbols")
        assert_c(all(ord(c) < 128 for c in slapd['instance_name']),
                 "Server identifier can not contain non ascii characters")
        assert_c(' ' not in slapd['instance_name'],
                 "Server identifier can not contain a space")
        assert_c(
            slapd['instance_name'] != 'admin',
            "Server identifier \"admin\" is reserved, please choose a different identifier"
        )

        # Check that valid characters are used
        safe = re.compile(r'^[:\w@_-]+$').search
        assert_c(
            bool(safe(slapd['instance_name'])),
            "Server identifier has invalid characters, please choose a different value"
        )

        # Check if the instance exists or not.
        # Should I move this import? I think this prevents some recursion
        from lib389 import DirSrv
        ds = DirSrv(verbose=self.verbose)
        ds.containerised = self.containerised
        ds.prefix = slapd['prefix']
        insts = ds.list(serverid=slapd['instance_name'])
        assert_c(
            len(insts) == 0, "Another instance named '%s' may already exist" %
            slapd['instance_name'])

        self.log.debug("PASSED: instance checking")

        assert_c(slapd['root_dn'] is not None,
                 "Configuration root_dn in section [slapd] not found")
        # Assert this is a valid DN
        assert_c(is_a_dn(slapd['root_dn']),
                 "root_dn in section [slapd] is not a well formed LDAP DN")
        assert_c(
            slapd['root_password'] is not None
            and slapd['root_password'] != '',
            "Configuration attribute 'root_password' in section [slapd] not found"
        )
        if len(slapd['root_password']) < 8:
            raise ValueError(
                "root_password must be at least 8 characters long")

        # Check if pre-hashed or not.
        # !!!!!!!!!!!!!!

        # Right now, the way that rootpw works on ns-slapd works, it force hashes the pw
        # see https://fedorahosted.org/389/ticket/48859
        if not re.match('^([A-Z0-9]+).*$', slapd['root_password']):
            # We need to hash it. Call pwdhash-bin.
            # slapd['root_password'] = password_hash(slapd['root_password'], prefix=slapd['prefix'])
            pass
        else:
            pass

        # Create a random string
        # Hash it.
        # This will be our temporary rootdn password so that we can do
        # live mods and setup rather than static ldif manipulations.
        self._raw_secure_password = password_generate()
        self._secure_password = password_hash(self._raw_secure_password,
                                              bin_dir=slapd['bin_dir'])

        self.log.debug("INFO: temp root password set to %s" %
                       self._raw_secure_password)
        self.log.debug("PASSED: root user checking")

        assert_c(slapd['port'] is not None,
                 "Configuration port in section [slapd] not found")

        if self.containerised:
            if slapd['port'] <= 1024:
                self.log.warning(
                    "WARNING: slapd port %s may not work without NET_BIND_SERVICE in containers"
                    % slapd['port'])
            if slapd['secure_port'] <= 1024:
                self.log.warning(
                    "WARNING: slapd secure_port %s may not work without NET_BIND_SERVICE in containers"
                    % slapd['secure_port'])
        assert_c(
            socket_check_open('::1', slapd['port']) is False,
            "port %s is already in use, or missing NET_BIND_SERVICE" %
            slapd['port'])
        # We enable secure port by default.
        assert_c(slapd['secure_port'] is not None,
                 "Configuration secure_port in section [slapd] not found")
        assert_c(
            socket_check_open('::1', slapd['secure_port']) is False,
            "secure_port %s is already in use, or missing NET_BIND_SERVICE" %
            slapd['secure_port'])
        self.log.debug("PASSED: network avaliability checking")

        # Make assertions of the paths?

        # Make assertions of the backends?
        # First fix some compat shenanigans. I hate legacy ...
        for be in backends:
            for k in BACKEND_PROPNAME_TO_ATTRNAME:
                if k in be:
                    be[BACKEND_PROPNAME_TO_ATTRNAME[k]] = be[k]
                    del (be[k])
        for be in backends:
            assert_c('nsslapd-suffix' in be)
            assert_c('cn' in be)