Example #1
0
def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw,
                                     is_encrypted):
    """Check if unhashed#user#password attribute value is encrypted or not"""

    if ds_supports_new_changelog():
        log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE))
        dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog')
    else:
        changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir),
                                       DEFAULT_CHANGELOG_DB)
        for dbfile in os.listdir(changelog_dbdir):
            if dbfile.endswith('.db'):
                changelog_dbfile = os.path.join(changelog_dbdir, dbfile)
                log.info(
                    'Changelog dbfile file exist: {}'.format(changelog_dbfile))
        log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE))
        dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile)

    count = 0
    for entry in dbscanOut.split(b'dbid: '):
        if ensure_bytes('operation: {}'.format(change_type)) in entry and\
           ensure_bytes(ATTRIBUTE) in entry and ensure_bytes(user_dn.lower()) in entry.lower():
            count += 1
            user_pw_attr = ensure_bytes('{}: {}'.format(ATTRIBUTE, user_pw))
            if is_encrypted:
                assert user_pw_attr not in entry, 'Changelog entry contains clear text password'
            else:
                assert user_pw_attr in entry, 'Changelog entry does not contain clear text password'
    assert count, 'Operation type and DN of the entry not matched in changelog'
Example #2
0
def _check_unhashed_userpw(inst, user_dn, is_present=False):
    """Check if unhashed#user#password attribute is present or not in the changelog"""
    unhashed_pwd_attribute = 'unhashed#user#password'

    if ds_supports_new_changelog():
        dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog')
    else:
        changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir),
                                       DEFAULT_CHANGELOG_DB)
        for dbfile in os.listdir(changelog_dbdir):
            if dbfile.endswith('.db'):
                changelog_dbfile = os.path.join(changelog_dbdir, dbfile)
                log.info(
                    'Changelog dbfile file exist: {}'.format(changelog_dbfile))
        log.info('Running dbscan -f to check {} attr'.format(
            unhashed_pwd_attribute))
        dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile)

    for entry in dbscanOut.split(b'dbid: '):
        if ensure_bytes('operation: modify') in entry and ensure_bytes(
                user_dn) in entry and ensure_bytes('userPassword') in entry:
            if is_present:
                assert ensure_bytes(unhashed_pwd_attribute) in entry
            else:
                assert ensure_bytes(unhashed_pwd_attribute) not in entry
Example #3
0
    def changes(self, agmnt_dn):
        """Get a number of changes sent by this agreement.

        :param agmtdn: agreement dn
        :type agmtdn: str

        :returns: Number of changes
        :raises: NoSuchEntryError - if agreement entry with changes
                  attribute is not found
        """

        retval = 0
        try:
            ent = self.conn.getEntry(ensure_str(agmnt_dn), ldap.SCOPE_BASE,
                                     "(objectclass=*)",
                                     [RA_PROPNAME_TO_ATTRNAME[RA_CHANGES]])
        except:
            raise NoSuchEntryError("Error reading status from agreement",
                                   agmnt_dn)

        if ent.nsds5replicaChangesSentSinceStartup:
            val = ent.nsds5replicaChangesSentSinceStartup
            items = val.split(ensure_bytes(' '))
            if len(items) == 1:
                retval = int(items[0])
            else:
                for item in items:
                    ary = item.split(ensure_bytes(":"))
                    if ary and len(ary) > 1:
                        retval = retval + int(ary[1].split(
                            ensure_bytes("/"))[0])
        return retval
Example #4
0
    def runUpgrade(prefix, online=True):
        '''
        Run "setup-ds.pl --update"  We simply pass in one DirSrv isntance, and
        this will update all the instances that are in this prefix.  For the
        update to work we must fix/adjust the permissions of the scripts in:

            /prefix/lib[64]/dirsrv/slapd-INSTANCE/
        '''
        if ds_is_older('1.4.0'):
            libdir = os.path.join(_ds_paths.lib_dir, 'dirsrv')

            # Gather all the instances so we can adjust the permissions, otherwise
            servers = []
            path = os.path.join(_ds_paths.sysconf_dir, 'dirsrv')
            for files in os.listdir(path):
                if files.startswith(
                        'slapd-') and not files.endswith('.removed'):
                    servers.append(os.path.join(libdir, files))

            if len(servers) == 0:
                # This should not happen
                log.fatal('runUpgrade: no servers found!')
                assert False
            '''
            The setup script calls things like /lib/dirsrv/slapd-instance/db2bak,
            etc, and when we run the setup perl script it gets permission denied
            as the default permissions are 750.  Adjust the permissions to 755.
            '''
            for instance in servers:
                for files in os.listdir(instance):
                    os.chmod(os.path.join(instance, files), 755)

            # Run the "upgrade"
            try:
                prog = os.path.join(_ds_paths.sbin_dir, PATH_SETUP_DS)
                process = subprocess.Popen([prog, '--update'],
                                           shell=False,
                                           stdin=subprocess.PIPE)
                # Answer the interactive questions, as "--update" currently does
                # not work with INF files
                process.stdin.write(b'yes\n')
                if (online):
                    process.stdin.write(b'online\n')
                    for x in servers:
                        process.stdin.write(ensure_bytes(DN_DM + '\n'))
                        process.stdin.write(ensure_bytes(PW_DM + '\n'))
                else:
                    process.stdin.write(b'offline\n')
                process.stdin.close()
                process.wait()
                if process.returncode != 0:
                    log.fatal('runUpgrade failed!  Error: %s ' %
                              process.returncode)
                    assert (False)
            except:
                log.fatal('runUpgrade failed!')
                raise
        else:
            pass
Example #5
0
def add_and_check(topo, plugin, attr, val, isvalid):
    """
    Helper function to add/replace attr: val and check the added value
    """
    if isvalid:
        log.info('Test %s: %s -- valid' % (attr, val))
        try:
            topo.ms["supplier1"].modify_s(
                plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))])
        except ldap.LDAPError as e:
            log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin +
                      ': error {}'.format(get_ldap_error_msg(e, 'desc')))
            assert False
    else:
        log.info('Test %s: %s -- invalid' % (attr, val))
        if plugin == CHANGELOG:
            try:
                topo.ms["supplier1"].modify_s(
                    plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))])
            except ldap.LDAPError as e:
                log.error('Expectedly failed to add ' + attr + ': ' + val +
                          ' to ' + plugin +
                          ': error {}'.format(get_ldap_error_msg(e, 'desc')))
        else:
            try:
                topo.ms["supplier1"].modify_s(
                    plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))])
            except ldap.LDAPError as e:
                log.error('Failed to add ' + attr + ': ' + val + ' to ' +
                          plugin +
                          ': error {}'.format(get_ldap_error_msg(e, 'desc')))

    try:
        entries = topo.ms["supplier1"].search_s(plugin, ldap.SCOPE_BASE,
                                                FILTER, [attr])
        if isvalid:
            if not entries[0].hasValue(attr, val):
                log.fatal('%s does not have expected (%s: %s)' %
                          (plugin, attr, val))
                assert False
        else:
            if plugin == CHANGELOG:
                if entries[0].hasValue(attr, val):
                    log.fatal('%s has unexpected (%s: %s)' %
                              (plugin, attr, val))
                    assert False
            else:
                if not entries[0].hasValue(attr, val):
                    log.fatal('%s does not have expected (%s: %s)' %
                              (plugin, attr, val))
                    assert False
    except ldap.LDAPError as e:
        log.fatal('Unable to search for entry %s: error %s' %
                  (plugin, e.message['desc']))
        assert False
Example #6
0
def test_valid_operations_are_permitted(topo, setup, reset_logs):
    """Verify that valid operations are  permitted

    :id: bd4f83f6-fe9e-11e8-88f4-8c16451d917b
    :setup: Standalone
    :steps:
        1. Verify that valid operations are  permitted
    :expectedresults:
        1. Should Success.
    """
    assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on')
    assert topo.standalone.config.set(
        'nsslapd-disk-monitoring-logging-critical', 'on')
    assert topo.standalone.config.set('nsslapd-errorlog-level', '8')
    topo.standalone.restart()
    # Trying to delete nsslapd-disk-monitoring-threshold
    assert topo.standalone.modify_s(
        'cn=config',
        [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')])
    # Trying to add another value to nsslapd-disk-monitoring-threshold (check that it is not multivalued)
    topo.standalone.config.add('nsslapd-disk-monitoring-threshold', '2000001')
    # Trying to delete nsslapd-disk-monitoring
    assert topo.standalone.modify_s(
        'cn=config',
        [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring',
          ensure_bytes(
              str(
                  topo.standalone.search_s(
                      'cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)',
                      ['nsslapd-disk-monitoring'
                       ])[0]).split(' ')[2].split('\n\n')[0]))])
    # Trying to add another value to nsslapd-disk-monitoring
    topo.standalone.config.add('nsslapd-disk-monitoring', 'off')
    # Trying to delete nsslapd-disk-monitoring-grace-period
    assert topo.standalone.modify_s(
        'cn=config',
        [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')])
    # Trying to add another value to nsslapd-disk-monitoring-grace-period
    topo.standalone.config.add('nsslapd-disk-monitoring-grace-period', '61')
    # Trying to delete nsslapd-disk-monitoring-logging-critical
    assert topo.standalone.modify_s(
        'cn=config',
        [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical',
          ensure_bytes(
              str(
                  topo.standalone.search_s(
                      'cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)',
                      ['nsslapd-disk-monitoring-logging-critical'
                       ])[0]).split(' ')[2].split('\n\n')[0]))])
    # Trying to add another value to nsslapd-disk-monitoring-logging-critical
    assert topo.standalone.config.set(
        'nsslapd-disk-monitoring-logging-critical', 'on')
Example #7
0
 def _lint_check_tls_version(self):
     tls_min = self.get_attr_val('sslVersionMin')
     if tls_min is not None and tls_min < ensure_bytes('TLS1.1'):
         report = copy.deepcopy(DSELE0001)
         report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
         report['check'] = "encryption:check_tls_version"
         yield report
Example #8
0
def changelog_init(topo):
    """ changlog dir is not configuarable, just
    enable cn=Retro Changelog Plugin,cn=plugins,cn=config
    """
    log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')

    # bind as directory manager
    topo.ms["supplier1"].log.info("Bind as %s" % DN_DM)
    topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD)

    if not ds_supports_new_changelog():
        try:
            changelogdir = os.path.join(
                os.path.dirname(topo.ms["supplier1"].dbdir), 'changelog')
            topo.ms["supplier1"].modify_s(
                CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir',
                             ensure_bytes(changelogdir))])
        except ldap.LDAPError as e:
            log.error('Failed to modify ' + CHANGELOG +
                      ': error {}'.format(get_ldap_error_msg(e, 'desc')))
            assert False

    try:
        topo.ms["supplier1"].modify_s(
            RETROCHANGELOG,
            [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')])
    except ldap.LDAPError as e:
        log.error('Failed to enable ' + RETROCHANGELOG +
                  ': error {}'.format(get_ldap_error_msg(e, 'desc')))
        assert False

    # restart the server
    topo.ms["supplier1"].restart(timeout=10)
Example #9
0
def test_global_vs_local(topo, passw_policy, test_user, user_pasw):
    """Passwords rejected if its similar to uid, cn, sn, givenname, ou and mail attributes

    :id: dfd6cf5d-8bcd-4895-a691-a43ad9ec1be8
    :setup: Standalone instance
    :steps:
        1. Configure global password policy with PasswordCheckSyntax set to off
        2. Add users with cn, sn, uid, mail, givenname and userPassword attributes
        3. Replace userPassword similar to cn, sn, uid, givenname, ou and mail attributes
    :expectedresults:
        1. Disabling the local policy should PASS.
        2. Add users should PASS.
        3. Resetting userPasswords similar to cn, sn, uid, givenname, ou and mail attributes should PASS.
    """

    log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to off')
    topo.standalone.config.set('nsslapd-pwpolicy-local', 'off')

    conn = test_user.bind(PASSWORD)
    log.info('Replace userPassword attribute with {}'.format(user_pasw))
    try:
        try:
            conn.modify_s(test_user.dn, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(user_pasw))])
        except ldap.LDAPError as e:
            log.fatal('Failed to replace userPassword: error {}'.format(e.message['desc']))
            raise e
    finally:
        conn.unbind_s()
        test_user.set('userPassword', PASSWORD)
Example #10
0
def test_trivial_passw_check(topo, passw_policy, test_user, user_pasw):
    """PasswordCheckSyntax attribute fails to validate cn, sn, uid, givenname, ou and mail attributes

    :id: bf9fe1ef-56cb-46a3-a6f8-5530398a06dc
    :setup: Standalone instance.
    :steps:
        1. Configure local password policy with PasswordCheckSyntax set to on.
        2. Add users with cn, sn, uid, givenname, mail and userPassword attributes.
        3. Configure subtree password policy for ou=people subtree.
        4. Reset userPassword with trivial values like cn, sn, uid, givenname, ou and mail attributes.
    :expectedresults:
        1. Enabling PasswordCheckSyntax should PASS.
        2. Add users should PASS.
        3. Configure subtree password policy should PASS.
        4. Resetting userPassword to cn, sn, uid and mail should be rejected.
    """

    conn = test_user.bind(PASSWORD)
    try:
        log.info('Replace userPassword attribute with {}'.format(user_pasw))
        with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo:
            conn.modify_s(test_user.dn, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(user_pasw))])
            log.fatal('Failed: Userpassword with {} is accepted'.format(user_pasw))
        assert 'password based off of user entry' in str(excinfo.value)
    finally:
        conn.unbind_s()
        test_user.set('userPassword', PASSWORD)
Example #11
0
    def schedule(self, agmtdn=None, interval=ALWAYS):
        """Schedule the replication agreement

        :param agmtdn: DN of the replica agreement
        :type agmtdn: str
        :param interval: - 'HHMM-HHMM D+' With D=[0123456]+
                          - Agreement.ALWAYS
                          - Agreement.NEVER
        :type interval: str

        :returns: None
        :raises: - ValueError - if interval is not valid;
                  - ldap.NO_SUCH_OBJECT - if agmtdn does not exist
        """
        if not agmtdn:
            raise InvalidArgumentError("agreement DN is missing")

        # check the validity of the interval
        if interval != Agreement.ALWAYS and interval != Agreement.NEVER:
            self._check_interval(interval)

        # Check if the replica agreement exists
        try:
            self.conn.getEntry(agmtdn, ldap.SCOPE_BASE)
        except ldap.NO_SUCH_OBJECT:
            raise

        # update it
        self.log.info("Schedule replication agreement %s" % agmtdn)
        mod = [(ldap.MOD_REPLACE, 'nsds5replicaupdateschedule',
                [ensure_bytes(interval)])]
        self.conn.modify_s(agmtdn, mod)
Example #12
0
    def getRawAci(self):
        """This method will rebuild an aci from the contents of the acidata
        dict found on the object.

        :returns: An aci attribute string.
        """

        # Rebuild the aci from the .acidata.
        rawaci = ''
        # For each key in the outer segment
        # Add a (key = val);. Depending on key format val:
        for key in self._keys:
            for value_dict in self.acidata[key]:
                rawaci += '(%s %s)' % (key, self._format_term(key, value_dict))
        # Now create the v3.0 aci part
        rawaci += "(version 3.0; "
        # This could be neater ...
        rawaci += 'acl "%s";' % self.acidata['acl'][0]['values'][0]
        for key in ['allow', 'deny']:
            if len(self.acidata[key]) > 0:
                rawaci += '%s (' % key
                for value in self.acidata[key][0]['values'][:-1]:
                    rawaci += '%s, ' % value
                rawaci += '%s)' % self.acidata[key][0]['values'][-1]
                rawaci += ('(%s);' % self.acidata["%s_raw_bindrules" %
                                                  key][0]['values'][-1])
        rawaci += ")"
        return ensure_bytes(rawaci)
Example #13
0
    def present(self, attr, value=None):
        """Assert that some attr, or some attr / value exist on the entry.

        :param attr: an attribute name
        :type attr: str
        :param value: an attribute value
        :type value: str

        :returns: True if attr is present
        """

        if self._instance.state != DIRSRV_STATE_ONLINE:
            raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE")
        self._log.debug("%s present(%r) %s" % (self._dn, attr, value))

        self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
                                        serverctrls=self._server_controls, clientctrls=self._client_controls,
                                        escapehatch='i am sure')[0]
        values = self.get_attr_vals_bytes(attr)
        self._log.debug("%s contains %s" % (self._dn, values))

        if value is None:
            # We are just checking if SOMETHING is present ....
            return len(values) > 0
        else:
            # Check if a value really does exist.
            return ensure_bytes(value).lower() in [x.lower() for x in values]
Example #14
0
    def delete(self):
        """Deletes the backend, it's mapping tree and all related indices.
        This can be changed with the self._protected flag!

        :raises: - UnwillingToPerform - if backend is protected
                 - UnwillingToPerform - if nsslapd-state is not 'backend'
        """

        if self._protected:
            raise ldap.UNWILLING_TO_PERFORM("This is a protected backend!")
        # First check if the mapping tree has our suffix still.
        # suffix = self.get_attr_val('nsslapd-suffix')
        bename = self.get_attr_val_utf8('cn')
        try:
            mt = self._mts.get(selector=bename)
            # Assert the type is "backend"
            # Are these the right types....?
            if mt.get_attr_val('nsslapd-state').lower() != ensure_bytes('backend'):
                raise ldap.UNWILLING_TO_PERFORM('Can not delete the mapping tree, not for a backend! You may need to delete this backend via cn=config .... ;_; ')
            # Delete our mapping tree if it exists.
            mt.delete()
        except ldap.NO_SUCH_OBJECT:
            # Righto, it's already gone! Do nothing ...
            pass
        # Delete all our related indices
        self._instance.index.delete_all(bename)

        # Now remove our children, this is all ldbm config

        configs = self._instance.search_s(self._dn, ldap.SCOPE_ONELEVEL)
        for c in configs:
            self._instance.delete_branch_s(c.dn, ldap.SCOPE_SUBTREE)
        # The super will actually delete ourselves.
        super(Backend, self).delete()
Example #15
0
    def apply_mods(self, mods):
        """Perform modification operation using several mods at once

        @param mods - list of tuples:  [(action, key, value),]
        @raise ValueError - if a provided mod op is invalid
        @raise LDAPError
        """
        mod_list = []
        for mod in mods:
            if len(mod) < 2:
                # Error
                raise ValueError('Not enough arguments in the mod op')
            elif len(mod) == 2:  # no action
                action = ldap.MOD_REPLACE
                key, value = mod
            elif len(mod) == 3:
                action, key, value = mod
                if action != ldap.MOD_REPLACE or \
                   action != ldap.MOD_ADD or \
                   action != ldap.MOD_DELETE:
                    raise ValueError('Invalid mod action(%s)' % str(action))
            else:
                # Error too many items
                raise ValueError('Too many arguments in the mod op')

            if isinstance(value, list):
                value = ensure_list_bytes(value)
            else:
                value = [ensure_bytes(value)]

            mod_list.append((action, key, value))
        return self._instance.modify_s(self._dn, mod_list)
Example #16
0
def test_threshold_is_reached_to_half(topo, setup, reset_logs):
    """RHDS not shutting down when disk monitoring threshold is reached to half.

    :id: b2d3665e-fe9e-11e8-b9c0-8c16451d917b
    :setup: Standalone
    :steps: Standalone
        1. Verify that there is not endless loop of error messages
    :expectedresults:
        1. Should Success
    """

    assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on')
    assert topo.standalone.config.set(
        'nsslapd-disk-monitoring-logging-critical', 'on')
    assert topo.standalone.config.set('nsslapd-errorlog-level', '8')
    assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold',
                                      ensure_bytes(THRESHOLD_BYTES))
    topo.standalone.restart()
    subprocess.call([
        'dd', 'if=/dev/zero',
        'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M',
        'count={}'.format(HALF_THR_FILL_SIZE)
    ])
    # Verify that there is not endless loop of error messages
    _witherrorlog(topo,
                  "temporarily setting error loglevel to the default level",
                  10)
    with open(topo.standalone.errlog, 'r') as study:
        study = study.read()
    assert len(
        re.findall("temporarily setting error loglevel to the default level",
                   study)) == 1
    os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir))
Example #17
0
    def _lint_mappingtree(self):
        """Backend lint

        This should check for:
        * missing mapping tree entries for the backend
        * missing indices if we are local and have log access?
        """

        # Check for the missing mapping tree.
        suffix = self.get_attr_val_utf8('nsslapd-suffix')
        bename = self.get_attr_val_bytes('cn')
        try:
            mt = self._mts.get(suffix)
            if mt.get_attr_val_bytes(
                    'nsslapd-backend') != bename and mt.get_attr_val(
                        'nsslapd-state') != ensure_bytes('backend'):
                raise ldap.NO_SUCH_OBJECT(
                    "We have a matching suffix, but not a backend or correct database name."
                )
        except ldap.NO_SUCH_OBJECT:
            result = DSBLE0001
            result['items'] = [
                bename,
            ]
            return result
        return None
    def set(self, key, value, action=ldap.MOD_REPLACE):
        """Perform a specified action on a key with value

        :param key: an attribute name
        :type key: str
        :param value: an attribute value
        :type value: str
        :param action: - ldap.MOD_REPLACE - by default
                        - ldap.MOD_ADD
                        - ldap.MOD_DELETE
        :type action: int

        :returns: result of modify_s operation
        :raises: ValueError - if instance is not online
        """

        if value is None or len(value) < 512:
            self._log.debug("%s set(%r, %r)" % (self._dn, key, value))
        else:
            self._log.debug("%s set(%r, value too large)" % (self._dn, key))
        if self._instance.state != DIRSRV_STATE_ONLINE:
            raise ValueError(
                "Invalid state. Cannot set properties on instance that is not ONLINE."
            )

        if isinstance(value, list):
            # value = map(lambda x: ensure_bytes(x), value)
            value = ensure_list_bytes(value)
        elif value is not None:
            value = [ensure_bytes(value)]

        return self._instance.modify_ext_s(self._dn, [(action, key, value)],
                                           serverctrls=self._server_controls,
                                           clientctrls=self._client_controls)
Example #19
0
 def _lint_hr_timestamp(self):
     hr_timestamp = self.get_attr_val('nsslapd-logging-hr-timestamps-enabled')
     if ensure_bytes('on') != hr_timestamp:
         report = copy.deepcopy(DSCLE0001)
         report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
         report['check'] = "config:hr_timestamp"
         yield report
Example #20
0
def test_schedule(topology):
    """Test the schedule behaviour with valid and invalid values"""

    topology.master.log.info("\n\n###########\n## SCHEDULE\n#########")
    ents = topology.master.agreement.list(suffix=SUFFIX,
                                          consumer_host=topology.consumer.host,
                                          consumer_port=topology.consumer.port)
    assert len(ents) == 1

    topology.master.agreement.schedule(ents[0].dn, Agreement.ALWAYS)
    ents = topology.master.agreement.list(suffix=SUFFIX,
                                          consumer_host=topology.consumer.host,
                                          consumer_port=topology.consumer.port)
    assert len(ents) == 1
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_SCHEDULE]) == \
        ensure_bytes(Agreement.ALWAYS)

    topology.master.agreement.schedule(ents[0].dn, Agreement.NEVER)
    ents = topology.master.agreement.list(suffix=SUFFIX,
                                          consumer_host=topology.consumer.host,
                                          consumer_port=topology.consumer.port)
    assert len(ents) == 1
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_SCHEDULE]) == \
        ensure_bytes(Agreement.NEVER)

    CUSTOM_SCHEDULE = "0000-1234 6420"
    topology.master.agreement.schedule(ents[0].dn, CUSTOM_SCHEDULE)
    ents = topology.master.agreement.list(suffix=SUFFIX,
                                          consumer_host=topology.consumer.host,
                                          consumer_port=topology.consumer.port)
    assert len(ents) == 1
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_SCHEDULE]) == \
        ensure_bytes(CUSTOM_SCHEDULE)

    CUSTOM_SCHEDULES = (
        "2500-1234 6420",  # Invalid HOUR schedule
        "0000-2534 6420",  # ^^
        "1300-1234 6420",  # Starting HOUR after ending HOUR
        "0062-1234 6420",  # Invalid MIN schedule
        "0000-1362 6420",  # ^^
        "0000-1234 6-420",  # Invalid DAYS schedule
        "0000-1362 64209",  # ^^
        "0000-1362 01234560")  # ^^

    for CUSTOM_SCHEDULE in CUSTOM_SCHEDULES:
        with pytest.raises(ValueError):
            topology.master.agreement.schedule(ents[0].dn, CUSTOM_SCHEDULE)
Example #21
0
def set_value(master, attr, val):
    """
    Helper function to add/replace attr: val and check the added value
    """
    try:
        master.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))])
    except ldap.LDAPError as e:
        log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc')))
        assert False
Example #22
0
    def _validate(self, rdn, properties, basedn):
        """
        Used to validate a create request.
        This way, it can be over-ridden without affecting
        the create types

        It also checks that all the values in _must_attribute exist
        in some form in the dictionary

        It has the useful trick of returning the dn, so subtypes
        can use extra properties to create the dn's here for this.
        """
        if properties is None:
            raise ldap.UNWILLING_TO_PERFORM('Invalid request to create. Properties cannot be None')
        if type(properties) != dict:
            raise ldap.UNWILLING_TO_PERFORM("properties must be a dictionary")

        # I think this needs to be made case insensitive
        # How will this work with the dictionary?
        for attr in self._must_attributes:
            if properties.get(attr, None) is None:
                raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr)

        # Make sure the naming attribute is present
        if properties.get(self._rdn_attribute, None) is None and rdn is None:
            raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None or rdn provided' % self._rdn_attribute)
        
        # This change here, means we can pre-load a full dn to _dn, or we can
        # accept based on the rdn
        tdn = self._dn

        if tdn is None:
            if basedn is None:
                raise ldap.UNWILLING_TO_PERFORM('Invalid request to create. basedn cannot be None')

            if properties.get(self._rdn_attribute, None) is not None:
                # Favour the value in the properties dictionary
                v = properties.get(self._rdn_attribute)
                if isinstance(v, list):
                    rdn = ensure_str(v[0])
                else:
                    rdn = ensure_str(v)

                tdn = '%s=%s,%s' % (self._rdn_attribute, rdn, basedn)

        # We may need to map over the data in the properties dict to satisfy python-ldap
        str_props = {}
        for k, v in properties.items():
            if isinstance(v, list):
                # str_props[k] = map(lambda v1: ensure_bytes(v1), v)
                str_props[k] = ensure_list_bytes(v)
            else:
                str_props[k] = ensure_bytes(v)
        #
        # Do we need to do extra dn validation here?
        return (tdn, str_props)
Example #23
0
    def get_attr_val_bytes(self, key, use_json=False):
        """Get a single attribute value from the entry in bytes type

        :param key: An attribute name
        :type key: str
        :returns: A single bytes value
        :raises: ValueError - if instance is offline
        """

        return ensure_bytes(self.get_attr_val(key))
Example #24
0
def test_list(topology):
    """List the replica agreement on a suffix => 1
    Add a RA
    List the replica agreements on that suffix again => 2
    List a specific RA

    PREREQUISITE: it exists a replica for SUFFIX and a replica agreement
    """

    topology.master.log.info("\n\n###########\n## LIST\n#############\n")
    ents = topology.master.agreement.list(suffix=SUFFIX)
    assert len(ents) == 1
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]) == \
        ensure_bytes(topology.consumer.host)
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]) == \
        ensure_bytes(str(topology.consumer.port))

    # Create a second RA to check .list returns 2 RA
    properties = {
        RA_NAME:
        r'meTo_%s:%d' % (topology.consumer.host, SECOND_AGMT_TEST_PORT),
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]
    }
    topology.master.agreement.create(suffix=SUFFIX,
                                     host=topology.consumer.host,
                                     port=SECOND_AGMT_TEST_PORT,
                                     properties=properties)
    ents = topology.master.agreement.list(suffix=SUFFIX)
    assert len(ents) == 2

    # Check we can .list a specific RA
    ents = topology.master.agreement.list(suffix=SUFFIX,
                                          consumer_host=topology.consumer.host,
                                          consumer_port=topology.consumer.port)
    assert len(ents) == 1
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]) == \
        ensure_bytes(topology.consumer.host)
    assert ents[0].getValue(RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]) == \
        ensure_bytes(str(topology.consumer.port))
Example #25
0
def test_setProperties(topology):
    """Set properties to the agreement and check, if it was successful"""

    topology.supplier.log.info("\n\n###########\n### SETPROPERTIES\n##########")
    ents = topology.supplier.agreement.list(suffix=SUFFIX,
                                            consumer_host=topology.consumer.host,
                                            consumer_port=topology.consumer.port)
    assert len(ents) == 1
    test_schedule = "1234-2345 12345"
    test_desc = "test_desc"
    topology.supplier.agreement.setProperties(
        agmnt_dn=ents[0].dn, properties={RA_SCHEDULE: test_schedule,
                                         RA_DESCRIPTION: test_desc})
    properties = topology.supplier.agreement.getProperties(
        agmnt_dn=ents[0].dn, properties=[RA_SCHEDULE, RA_DESCRIPTION])
    assert len(properties) == 2
    assert properties[RA_SCHEDULE][0] == ensure_bytes(test_schedule)
    assert properties[RA_DESCRIPTION][0] == ensure_bytes(test_desc)

    # Set RA Schedule back to "always"
    topology.supplier.agreement.schedule(ents[0].dn, Agreement.ALWAYS)
Example #26
0
def test_negagtive_parameterize(topo, setup, reset_logs, test_input, expected):
    """
    Verify that invalid operations are not permitted
    :id: b88efbf8-fe9e-11e8-8499-8c16451d917b
    :setup: Standalone
    :steps:
        1. Verify that invalid operations are not permitted.
    :expectedresults:
        1. Should not success.
    """
    with pytest.raises(Exception):
        topo.standalone.config.set(test_input, ensure_bytes(expected))
Example #27
0
def test_access_log_rotation(topology):
    """
    Check we can parse rotated logs as well as active log.
    """

    # Artificially rotate the log.
    lpath = topology.standalone.ds_access_log._get_log_path()
    shutil.copyfile(lpath, lpath + ensure_bytes('.20160515-104822'))
    # check we have the right number of lines.
    access_lines = topology.standalone.ds_access_log.readlines_archive()
    assert (len(access_lines) > 0)
    access_lines = topology.standalone.ds_access_log.match_archive('.*fd=.*')
    assert (len(access_lines) > 0)
Example #28
0
    def hasValue(self, name, val=None):
        """True if the given attribute is present and has the given value

            TODO: list comparison preserves order: should I use a set?
        """
        if not self.hasAttr(name):
            return False
        if not val:
            return True
        if isinstance(val, list):
            return val == self.data.get(name)
        if isinstance(val, tuple):
            return list(val) == self.data.get(name)
        return ensure_bytes(val) in self.data.get(name)
Example #29
0
    def set(self, key, value, action=ldap.MOD_REPLACE):
        self._log.debug("%s set(%r, %r)" % (self._dn, key, value))
        if self._instance.state != DIRSRV_STATE_ONLINE:
            raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")

        if isinstance(value, list):
            # value = map(lambda x: ensure_bytes(x), value)
            value = ensure_list_bytes(value)
        else:
            value = [ensure_bytes(value)]

        if self._batch:
            pass
        else:
            return self._instance.modify_s(self._dn, [(action, key, value)])
Example #30
0
def test_threshold_to_overflow_value(topo, setup, reset_logs):
    """
    Overflow in nsslapd-disk-monitoring-threshold
    :id: ad60ab3c-fe9e-11e8-88dc-8c16451d917b
    :setup: Standalone
    :steps:
        1. Setting nsslapd-disk-monitoring-threshold to overflow_value
    :expectedresults:
        1. Should Success
    """
    overflow_value = '3000000000'
    # Setting nsslapd-disk-monitoring-threshold to overflow_value
    assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value))
    assert overflow_value == re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str(
        topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)',
                                 ['nsslapd-disk-monitoring-threshold'])))[0].split(' ')[1]
Example #31
0
def setupthesystem(topo):
    """
    This function is part of fixture function setup , will setup the environment for this test.
    """
    global TOTAL_SIZE, USED_SIZE, AVAIL_SIZE, HALF_THR_FILL_SIZE, FULL_THR_FILL_SIZE
    topo.standalone.start()
    topo.standalone.config.set('nsslapd-disk-monitoring-grace-period', '1')
    topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off')
    topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES))
    TOTAL_SIZE = int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[2])*4096/1024/1024
    AVAIL_SIZE = round(int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[3]) * 4096 / 1024 / 1024)
    USED_SIZE = TOTAL_SIZE - AVAIL_SIZE
    HALF_THR_FILL_SIZE = TOTAL_SIZE - float(THRESHOLD) + 5 - USED_SIZE
    FULL_THR_FILL_SIZE = TOTAL_SIZE - 0.5 * float(THRESHOLD) + 5 - USED_SIZE
    HALF_THR_FILL_SIZE = round(HALF_THR_FILL_SIZE)
    FULL_THR_FILL_SIZE = round(FULL_THR_FILL_SIZE)
    topo.standalone.restart()
Example #32
0
 def toTupleList(self):
     """
     Convert the attrs and values to a list of 2-tuples.  The first
     element of the tuple is the attribute name.  The second element
     is either a single value or a list of values.
     """
     # For python3, we have to make sure EVERYTHING is a byte string.
     # Else everything EXPLODES
     lt = list(self.data.items())
     if MAJOR >= 3:
         ltnew = []
         for l in lt:
             vals = []
             for v in l[1]:
                 vals.append(ensure_bytes(v))
             ltnew.append((l[0], vals))
         lt = ltnew
     return lt
Example #33
0
    def readlines_archive(self):
        """
        Returns an array of all the lines in all logs, included rotated logs
        and compressed logs. (gzip)
        Will likely be very slow. Try using match instead.

        @return - an array of all the lines in all logs
        """
        lines = []
        for log in self._get_all_log_paths():
            # Open the log
            if log.endswith(ensure_bytes('.gz')):
                with gzip.open(log, 'r') as lf:
                    lines += lf.readlines()
            else:
                with open(log, 'r') as lf:
                    lines += lf.readlines()
        return lines
Example #34
0
    def readlines_archive(self):
        """
        Returns an array of all the lines in all logs, included rotated logs
        and compressed logs. (gzip)
        Will likely be very slow. Try using match instead.

        @return - an array of all the lines in all logs
        """
        lines = []
        for log in self._get_all_log_paths():
            # Open the log
            if log.endswith(ensure_bytes('.gz')):
                with gzip.open(log, 'r') as lf:
                    lines += lf.readlines()
            else:
                with open(log, 'r') as lf:
                    lines += lf.readlines()
        return lines
Example #35
0
    def fin():
        m1.start()
        m2.start()
        testuser.delete()
        m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
        repl.wait_for_replication(m1, m2)

        # on M2 restore a default 99user.ldif
        m2.stop()
        os.remove(m2.schemadir + "/99user.ldif")
        schema_filename = (m2.schemadir + "/99user.ldif")
        try:
            with open(schema_filename, 'w') as schema_file:
                schema_file.write("dn: cn=schema\n")
            os.chmod(schema_filename, 0o777)
        except OSError as e:
            log.fatal("Failed to update schema file: " +
                      "{} Error: {}".format(schema_filename, str(e)))
        m2.start()
Example #36
0
 def match_archive(self, pattern):
     """Search all the log files, including "zipped" logs
     @param pattern - a regex pattern
     @return - results of the pattern matching
     """
     results = []
     prog = re.compile(pattern)
     for log in self._get_all_log_paths():
         if log.endswith(ensure_bytes('.gz')):
             with gzip.open(log, 'r') as lf:
                 for line in lf:
                     mres = prog.match(line)
                     if mres:
                         results.append(line)
         else:
             with open(log, 'r') as lf:
                 for line in lf:
                     mres = prog.match(line)
                     if mres:
                         results.append(line)
     return results
Example #37
0
 def runInfProg(prog, content, verbose, prefix=None):
     """run a program that takes an .inf style file on stdin"""
     cmd = [prog]
     if verbose:
         cmd.append('-ddd')
     else:
         cmd.extend(['-l', '/dev/null'])
     cmd.extend(['-s', '-f', '-'])
     log.debug("running: %s " % cmd)
     if HASPOPEN:
         pipe = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
         child_stdin = pipe.stdin
         child_stdout = pipe.stdout
     else:
         pipe = popen2.Popen4(cmd)
         child_stdin = pipe.tochild
         child_stdout = pipe.fromchild
     child_stdin.write(ensure_bytes(content))
     child_stdin.close()
     if verbose:
         log.debug("PID %s" % pipe.pid)
     while pipe.poll() is None:
         (rr, wr, xr) = select.select([child_stdout], [], [], 1.0)
         if rr and len(rr) > 0:
             line = rr[0].readline()
             if not line:
                 break
             if verbose:
                 sys.stdout.write(ensure_str(line))
         elif verbose:
             print("timed out waiting to read from pid %s : %s " % (pipe.pid, cmd))
     child_stdout.close()
     exitCode = pipe.wait()
     # if verbose:
     log.debug("%s returned exit code %s" % (prog, exitCode))
     return exitCode